VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GMMR0.cpp@ 32264

Last change on this file since 32264 was 31383, checked in by vboxsync, 14 years ago

Fixed incorrect global module unregistration in the win64 case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 150.7 KB
Line 
1/* $Id: GMMR0.cpp 31383 2010-08-05 08:22:44Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager.
4 */
5
6/*
7 * Copyright (C) 2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_gmm GMM - The Global Memory Manager
20 *
21 * As the name indicates, this component is responsible for global memory
22 * management. Currently only guest RAM is allocated from the GMM, but this
23 * may change to include shadow page tables and other bits later.
24 *
25 * Guest RAM is managed as individual pages, but allocated from the host OS
26 * in chunks for reasons of portability / efficiency. To minimize the memory
27 * footprint all tracking structure must be as small as possible without
28 * unnecessary performance penalties.
29 *
30 * The allocation chunks has fixed sized, the size defined at compile time
31 * by the #GMM_CHUNK_SIZE \#define.
32 *
33 * Each chunk is given an unquie ID. Each page also has a unique ID. The
34 * relation ship between the two IDs is:
35 * @code
36 * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
37 * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
38 * @endcode
39 * Where iPage is the index of the page within the chunk. This ID scheme
40 * permits for efficient chunk and page lookup, but it relies on the chunk size
41 * to be set at compile time. The chunks are organized in an AVL tree with their
42 * IDs being the keys.
43 *
44 * The physical address of each page in an allocation chunk is maintained by
45 * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
46 * need to duplicate this information (it'll cost 8-bytes per page if we did).
47 *
48 * So what do we need to track per page? Most importantly we need to know
49 * which state the page is in:
50 * - Private - Allocated for (eventually) backing one particular VM page.
51 * - Shared - Readonly page that is used by one or more VMs and treated
52 * as COW by PGM.
53 * - Free - Not used by anyone.
54 *
55 * For the page replacement operations (sharing, defragmenting and freeing)
56 * to be somewhat efficient, private pages needs to be associated with a
57 * particular page in a particular VM.
58 *
59 * Tracking the usage of shared pages is impractical and expensive, so we'll
60 * settle for a reference counting system instead.
61 *
62 * Free pages will be chained on LIFOs
63 *
64 * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
65 * systems a 32-bit bitfield will have to suffice because of address space
66 * limitations. The #GMMPAGE structure shows the details.
67 *
68 *
69 * @section sec_gmm_alloc_strat Page Allocation Strategy
70 *
71 * The strategy for allocating pages has to take fragmentation and shared
72 * pages into account, or we may end up with with 2000 chunks with only
73 * a few pages in each. Shared pages cannot easily be reallocated because
74 * of the inaccurate usage accounting (see above). Private pages can be
75 * reallocated by a defragmentation thread in the same manner that sharing
76 * is done.
77 *
78 * The first approach is to manage the free pages in two sets depending on
79 * whether they are mainly for the allocation of shared or private pages.
80 * In the initial implementation there will be almost no possibility for
81 * mixing shared and private pages in the same chunk (only if we're really
82 * stressed on memory), but when we implement forking of VMs and have to
83 * deal with lots of COW pages it'll start getting kind of interesting.
84 *
85 * The sets are lists of chunks with approximately the same number of
86 * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
87 * consists of 16 lists. So, the first list will contain the chunks with
88 * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
89 * moved between the lists as pages are freed up or allocated.
90 *
91 *
92 * @section sec_gmm_costs Costs
93 *
94 * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
95 * entails. In addition there is the chunk cost of approximately
96 * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
97 *
98 * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
99 * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
100 * The cost on Linux is identical, but here it's because of sizeof(struct page *).
101 *
102 *
103 * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
104 *
105 * In legacy mode the page source is locked user pages and not
106 * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
107 * by the VM that locked it. We will make no attempt at implementing
108 * page sharing on these systems, just do enough to make it all work.
109 *
110 *
111 * @subsection sub_gmm_locking Serializing
112 *
113 * One simple fast mutex will be employed in the initial implementation, not
114 * two as metioned in @ref subsec_pgmPhys_Serializing.
115 *
116 * @see @ref subsec_pgmPhys_Serializing
117 *
118 *
119 * @section sec_gmm_overcommit Memory Over-Commitment Management
120 *
121 * The GVM will have to do the system wide memory over-commitment
122 * management. My current ideas are:
123 * - Per VM oc policy that indicates how much to initially commit
124 * to it and what to do in a out-of-memory situation.
125 * - Prevent overtaxing the host.
126 *
127 * There are some challenges here, the main ones are configurability and
128 * security. Should we for instance permit anyone to request 100% memory
129 * commitment? Who should be allowed to do runtime adjustments of the
130 * config. And how to prevent these settings from being lost when the last
131 * VM process exits? The solution is probably to have an optional root
132 * daemon the will keep VMMR0.r0 in memory and enable the security measures.
133 *
134 *
135 *
136 * @section sec_gmm_numa NUMA
137 *
138 * NUMA considerations will be designed and implemented a bit later.
139 *
140 * The preliminary guesses is that we will have to try allocate memory as
141 * close as possible to the CPUs the VM is executed on (EMT and additional CPU
142 * threads). Which means it's mostly about allocation and sharing policies.
143 * Both the scheduler and allocator interface will to supply some NUMA info
144 * and we'll need to have a way to calc access costs.
145 *
146 */
147
148
149/*******************************************************************************
150* Header Files *
151*******************************************************************************/
152#define LOG_GROUP LOG_GROUP_GMM
153#include <VBox/vm.h>
154#include <VBox/gmm.h>
155#include "GMMR0Internal.h"
156#include <VBox/gvm.h>
157#include <VBox/pgm.h>
158#include <VBox/log.h>
159#include <VBox/param.h>
160#include <VBox/err.h>
161#include <iprt/asm.h>
162#include <iprt/avl.h>
163#include <iprt/mem.h>
164#include <iprt/memobj.h>
165#include <iprt/semaphore.h>
166#include <iprt/string.h>
167
168
169/*******************************************************************************
170* Structures and Typedefs *
171*******************************************************************************/
172/** Pointer to set of free chunks. */
173typedef struct GMMCHUNKFREESET *PGMMCHUNKFREESET;
174
175/** Pointer to a GMM allocation chunk. */
176typedef struct GMMCHUNK *PGMMCHUNK;
177
178/**
179 * The per-page tracking structure employed by the GMM.
180 *
181 * On 32-bit hosts we'll some trickery is necessary to compress all
182 * the information into 32-bits. When the fSharedFree member is set,
183 * the 30th bit decides whether it's a free page or not.
184 *
185 * Because of the different layout on 32-bit and 64-bit hosts, macros
186 * are used to get and set some of the data.
187 */
188typedef union GMMPAGE
189{
190#if HC_ARCH_BITS == 64
191 /** Unsigned integer view. */
192 uint64_t u;
193
194 /** The common view. */
195 struct GMMPAGECOMMON
196 {
197 uint32_t uStuff1 : 32;
198 uint32_t uStuff2 : 30;
199 /** The page state. */
200 uint32_t u2State : 2;
201 } Common;
202
203 /** The view of a private page. */
204 struct GMMPAGEPRIVATE
205 {
206 /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
207 uint32_t pfn;
208 /** The GVM handle. (64K VMs) */
209 uint32_t hGVM : 16;
210 /** Reserved. */
211 uint32_t u16Reserved : 14;
212 /** The page state. */
213 uint32_t u2State : 2;
214 } Private;
215
216 /** The view of a shared page. */
217 struct GMMPAGESHARED
218 {
219 /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
220 uint32_t pfn;
221 /** The reference count (64K VMs). */
222 uint32_t cRefs : 16;
223 /** Reserved. Checksum or something? Two hGVMs for forking? */
224 uint32_t u14Reserved : 14;
225 /** The page state. */
226 uint32_t u2State : 2;
227 } Shared;
228
229 /** The view of a free page. */
230 struct GMMPAGEFREE
231 {
232 /** The index of the next page in the free list. UINT16_MAX is NIL. */
233 uint16_t iNext;
234 /** Reserved. Checksum or something? */
235 uint16_t u16Reserved0;
236 /** Reserved. Checksum or something? */
237 uint32_t u30Reserved1 : 30;
238 /** The page state. */
239 uint32_t u2State : 2;
240 } Free;
241
242#else /* 32-bit */
243 /** Unsigned integer view. */
244 uint32_t u;
245
246 /** The common view. */
247 struct GMMPAGECOMMON
248 {
249 uint32_t uStuff : 30;
250 /** The page state. */
251 uint32_t u2State : 2;
252 } Common;
253
254 /** The view of a private page. */
255 struct GMMPAGEPRIVATE
256 {
257 /** The guest page frame number. (Max addressable: 2 ^ 36) */
258 uint32_t pfn : 24;
259 /** The GVM handle. (127 VMs) */
260 uint32_t hGVM : 7;
261 /** The top page state bit, MBZ. */
262 uint32_t fZero : 1;
263 } Private;
264
265 /** The view of a shared page. */
266 struct GMMPAGESHARED
267 {
268 /** The reference count. */
269 uint32_t cRefs : 30;
270 /** The page state. */
271 uint32_t u2State : 2;
272 } Shared;
273
274 /** The view of a free page. */
275 struct GMMPAGEFREE
276 {
277 /** The index of the next page in the free list. UINT16_MAX is NIL. */
278 uint32_t iNext : 16;
279 /** Reserved. Checksum or something? */
280 uint32_t u14Reserved : 14;
281 /** The page state. */
282 uint32_t u2State : 2;
283 } Free;
284#endif
285} GMMPAGE;
286AssertCompileSize(GMMPAGE, sizeof(RTHCUINTPTR));
287/** Pointer to a GMMPAGE. */
288typedef GMMPAGE *PGMMPAGE;
289
290
291/** @name The Page States.
292 * @{ */
293/** A private page. */
294#define GMM_PAGE_STATE_PRIVATE 0
295/** A private page - alternative value used on the 32-bit implemenation.
296 * This will never be used on 64-bit hosts. */
297#define GMM_PAGE_STATE_PRIVATE_32 1
298/** A shared page. */
299#define GMM_PAGE_STATE_SHARED 2
300/** A free page. */
301#define GMM_PAGE_STATE_FREE 3
302/** @} */
303
304
305/** @def GMM_PAGE_IS_PRIVATE
306 *
307 * @returns true if private, false if not.
308 * @param pPage The GMM page.
309 */
310#if HC_ARCH_BITS == 64
311# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
312#else
313# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
314#endif
315
316/** @def GMM_PAGE_IS_SHARED
317 *
318 * @returns true if shared, false if not.
319 * @param pPage The GMM page.
320 */
321#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
322
323/** @def GMM_PAGE_IS_FREE
324 *
325 * @returns true if free, false if not.
326 * @param pPage The GMM page.
327 */
328#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
329
330/** @def GMM_PAGE_PFN_LAST
331 * The last valid guest pfn range.
332 * @remark Some of the values outside the range has special meaning,
333 * see GMM_PAGE_PFN_UNSHAREABLE.
334 */
335#if HC_ARCH_BITS == 64
336# define GMM_PAGE_PFN_LAST UINT32_C(0xfffffff0)
337#else
338# define GMM_PAGE_PFN_LAST UINT32_C(0x00fffff0)
339#endif
340AssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
341
342/** @def GMM_PAGE_PFN_UNSHAREABLE
343 * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
344 */
345#if HC_ARCH_BITS == 64
346# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
347#else
348# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
349#endif
350AssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
351
352
353/**
354 * A GMM allocation chunk ring-3 mapping record.
355 *
356 * This should really be associated with a session and not a VM, but
357 * it's simpler to associated with a VM and cleanup with the VM object
358 * is destroyed.
359 */
360typedef struct GMMCHUNKMAP
361{
362 /** The mapping object. */
363 RTR0MEMOBJ MapObj;
364 /** The VM owning the mapping. */
365 PGVM pGVM;
366} GMMCHUNKMAP;
367/** Pointer to a GMM allocation chunk mapping. */
368typedef struct GMMCHUNKMAP *PGMMCHUNKMAP;
369
370typedef enum GMMCHUNKTYPE
371{
372 GMMCHUNKTYPE_INVALID = 0,
373 GMMCHUNKTYPE_NON_CONTINUOUS = 1, /* 4 kb pages */
374 GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
375 GMMCHUNKTYPE_32BIT_HACK = 0x7fffffff
376} GMMCHUNKTYPE;
377
378
379/**
380 * A GMM allocation chunk.
381 */
382typedef struct GMMCHUNK
383{
384 /** The AVL node core.
385 * The Key is the chunk ID. */
386 AVLU32NODECORE Core;
387 /** The memory object.
388 * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
389 * what the host can dish up with. */
390 RTR0MEMOBJ MemObj;
391 /** Pointer to the next chunk in the free list. */
392 PGMMCHUNK pFreeNext;
393 /** Pointer to the previous chunk in the free list. */
394 PGMMCHUNK pFreePrev;
395 /** Pointer to the free set this chunk belongs to. NULL for
396 * chunks with no free pages. */
397 PGMMCHUNKFREESET pSet;
398 /** Pointer to an array of mappings. */
399 PGMMCHUNKMAP paMappings;
400 /** The number of mappings. */
401 uint16_t cMappings;
402 /** The head of the list of free pages. UINT16_MAX is the NIL value. */
403 uint16_t iFreeHead;
404 /** The number of free pages. */
405 uint16_t cFree;
406 /** The GVM handle of the VM that first allocated pages from this chunk, this
407 * is used as a preference when there are several chunks to choose from.
408 * When in bound memory mode this isn't a preference any longer. */
409 uint16_t hGVM;
410 /** The number of private pages. */
411 uint16_t cPrivate;
412 /** The number of shared pages. */
413 uint16_t cShared;
414 /** Chunk type */
415 GMMCHUNKTYPE enmType;
416 /** The pages. */
417 GMMPAGE aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT];
418} GMMCHUNK;
419
420
421/**
422 * An allocation chunk TLB entry.
423 */
424typedef struct GMMCHUNKTLBE
425{
426 /** The chunk id. */
427 uint32_t idChunk;
428 /** Pointer to the chunk. */
429 PGMMCHUNK pChunk;
430} GMMCHUNKTLBE;
431/** Pointer to an allocation chunk TLB entry. */
432typedef GMMCHUNKTLBE *PGMMCHUNKTLBE;
433
434
435/** The number of entries tin the allocation chunk TLB. */
436#define GMM_CHUNKTLB_ENTRIES 32
437/** Gets the TLB entry index for the given Chunk ID. */
438#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
439
440/**
441 * An allocation chunk TLB.
442 */
443typedef struct GMMCHUNKTLB
444{
445 /** The TLB entries. */
446 GMMCHUNKTLBE aEntries[GMM_CHUNKTLB_ENTRIES];
447} GMMCHUNKTLB;
448/** Pointer to an allocation chunk TLB. */
449typedef GMMCHUNKTLB *PGMMCHUNKTLB;
450
451
452/** The GMMCHUNK::cFree shift count. */
453#define GMM_CHUNK_FREE_SET_SHIFT 4
454/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
455#define GMM_CHUNK_FREE_SET_MASK 15
456/** The number of lists in set. */
457#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
458
459/**
460 * A set of free chunks.
461 */
462typedef struct GMMCHUNKFREESET
463{
464 /** The number of free pages in the set. */
465 uint64_t cFreePages;
466 /** Chunks ordered by increasing number of free pages. */
467 PGMMCHUNK apLists[GMM_CHUNK_FREE_SET_LISTS];
468} GMMCHUNKFREESET;
469
470
471/**
472 * The GMM instance data.
473 */
474typedef struct GMM
475{
476 /** Magic / eye catcher. GMM_MAGIC */
477 uint32_t u32Magic;
478 /** The fast mutex protecting the GMM.
479 * More fine grained locking can be implemented later if necessary. */
480 RTSEMFASTMUTEX Mtx;
481 /** The chunk tree. */
482 PAVLU32NODECORE pChunks;
483 /** The chunk TLB. */
484 GMMCHUNKTLB ChunkTLB;
485 /** The private free set. */
486 GMMCHUNKFREESET Private;
487 /** The shared free set. */
488 GMMCHUNKFREESET Shared;
489
490 /** Shared module tree (global). */
491 /** @todo seperate trees for distinctly different guest OSes. */
492 PAVLGCPTRNODECORE pGlobalSharedModuleTree;
493
494 /** The maximum number of pages we're allowed to allocate.
495 * @gcfgm 64-bit GMM/MaxPages Direct.
496 * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
497 uint64_t cMaxPages;
498 /** The number of pages that has been reserved.
499 * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
500 uint64_t cReservedPages;
501 /** The number of pages that we have over-committed in reservations. */
502 uint64_t cOverCommittedPages;
503 /** The number of actually allocated (committed if you like) pages. */
504 uint64_t cAllocatedPages;
505 /** The number of pages that are shared. A subset of cAllocatedPages. */
506 uint64_t cSharedPages;
507 /** The number of pages that are actually shared between VMs. */
508 uint64_t cDuplicatePages;
509 /** The number of pages that are shared that has been left behind by
510 * VMs not doing proper cleanups. */
511 uint64_t cLeftBehindSharedPages;
512 /** The number of allocation chunks.
513 * (The number of pages we've allocated from the host can be derived from this.) */
514 uint32_t cChunks;
515 /** The number of current ballooned pages. */
516 uint64_t cBalloonedPages;
517
518 /** The legacy allocation mode indicator.
519 * This is determined at initialization time. */
520 bool fLegacyAllocationMode;
521 /** The bound memory mode indicator.
522 * When set, the memory will be bound to a specific VM and never
523 * shared. This is always set if fLegacyAllocationMode is set.
524 * (Also determined at initialization time.) */
525 bool fBoundMemoryMode;
526 /** The number of registered VMs. */
527 uint16_t cRegisteredVMs;
528
529 /** The previous allocated Chunk ID.
530 * Used as a hint to avoid scanning the whole bitmap. */
531 uint32_t idChunkPrev;
532 /** Chunk ID allocation bitmap.
533 * Bits of allocated IDs are set, free ones are clear.
534 * The NIL id (0) is marked allocated. */
535 uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
536} GMM;
537/** Pointer to the GMM instance. */
538typedef GMM *PGMM;
539
540/** The value of GMM::u32Magic (Katsuhiro Otomo). */
541#define GMM_MAGIC 0x19540414
542
543
544/*******************************************************************************
545* Global Variables *
546*******************************************************************************/
547/** Pointer to the GMM instance data. */
548static PGMM g_pGMM = NULL;
549
550/** Macro for obtaining and validating the g_pGMM pointer.
551 * On failure it will return from the invoking function with the specified return value.
552 *
553 * @param pGMM The name of the pGMM variable.
554 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
555 * VBox status codes.
556 */
557#define GMM_GET_VALID_INSTANCE(pGMM, rc) \
558 do { \
559 (pGMM) = g_pGMM; \
560 AssertPtrReturn((pGMM), (rc)); \
561 AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
562 } while (0)
563
564/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
565 * On failure it will return from the invoking function.
566 *
567 * @param pGMM The name of the pGMM variable.
568 */
569#define GMM_GET_VALID_INSTANCE_VOID(pGMM) \
570 do { \
571 (pGMM) = g_pGMM; \
572 AssertPtrReturnVoid((pGMM)); \
573 AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
574 } while (0)
575
576
577/** @def GMM_CHECK_SANITY_UPON_ENTERING
578 * Checks the sanity of the GMM instance data before making changes.
579 *
580 * This is macro is a stub by default and must be enabled manually in the code.
581 *
582 * @returns true if sane, false if not.
583 * @param pGMM The name of the pGMM variable.
584 */
585#if defined(VBOX_STRICT) && 0
586# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
587#else
588# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
589#endif
590
591/** @def GMM_CHECK_SANITY_UPON_LEAVING
592 * Checks the sanity of the GMM instance data after making changes.
593 *
594 * This is macro is a stub by default and must be enabled manually in the code.
595 *
596 * @returns true if sane, false if not.
597 * @param pGMM The name of the pGMM variable.
598 */
599#if defined(VBOX_STRICT) && 0
600# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
601#else
602# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (true)
603#endif
604
605/** @def GMM_CHECK_SANITY_IN_LOOPS
606 * Checks the sanity of the GMM instance in the allocation loops.
607 *
608 * This is macro is a stub by default and must be enabled manually in the code.
609 *
610 * @returns true if sane, false if not.
611 * @param pGMM The name of the pGMM variable.
612 */
613#if defined(VBOX_STRICT) && 0
614# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
615#else
616# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (true)
617#endif
618
619
620/*******************************************************************************
621* Internal Functions *
622*******************************************************************************/
623static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
624static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
625static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
626/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
627DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
628DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
629static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
630static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
631static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
632static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
633
634
635
636/**
637 * Initializes the GMM component.
638 *
639 * This is called when the VMMR0.r0 module is loaded and protected by the
640 * loader semaphore.
641 *
642 * @returns VBox status code.
643 */
644GMMR0DECL(int) GMMR0Init(void)
645{
646 LogFlow(("GMMInit:\n"));
647
648 /*
649 * Allocate the instance data and the lock(s).
650 */
651 PGMM pGMM = (PGMM)RTMemAllocZ(sizeof(*pGMM));
652 if (!pGMM)
653 return VERR_NO_MEMORY;
654 pGMM->u32Magic = GMM_MAGIC;
655 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
656 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
657 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
658
659 int rc = RTSemFastMutexCreate(&pGMM->Mtx);
660 if (RT_SUCCESS(rc))
661 {
662 /*
663 * Check and see if RTR0MemObjAllocPhysNC works.
664 */
665#if 0 /* later, see #3170. */
666 RTR0MEMOBJ MemObj;
667 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
668 if (RT_SUCCESS(rc))
669 {
670 rc = RTR0MemObjFree(MemObj, true);
671 AssertRC(rc);
672 }
673 else if (rc == VERR_NOT_SUPPORTED)
674 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
675 else
676 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
677#else
678# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
679 pGMM->fLegacyAllocationMode = false;
680# if ARCH_BITS == 32
681 /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
682 pGMM->fBoundMemoryMode = true;
683# else
684 pGMM->fBoundMemoryMode = false;
685# endif
686# else
687 pGMM->fLegacyAllocationMode = true;
688 pGMM->fBoundMemoryMode = true;
689# endif
690#endif
691
692 /*
693 * Query system page count and guess a reasonable cMaxPages value.
694 */
695 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
696
697 g_pGMM = pGMM;
698 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
699 return VINF_SUCCESS;
700 }
701
702 RTMemFree(pGMM);
703 SUPR0Printf("GMMR0Init: failed! rc=%d\n", rc);
704 return rc;
705}
706
707
708/**
709 * Terminates the GMM component.
710 */
711GMMR0DECL(void) GMMR0Term(void)
712{
713 LogFlow(("GMMTerm:\n"));
714
715 /*
716 * Take care / be paranoid...
717 */
718 PGMM pGMM = g_pGMM;
719 if (!VALID_PTR(pGMM))
720 return;
721 if (pGMM->u32Magic != GMM_MAGIC)
722 {
723 SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
724 return;
725 }
726
727 /*
728 * Undo what init did and free all the resources we've acquired.
729 */
730 /* Destroy the fundamentals. */
731 g_pGMM = NULL;
732 pGMM->u32Magic++;
733 RTSemFastMutexDestroy(pGMM->Mtx);
734 pGMM->Mtx = NIL_RTSEMFASTMUTEX;
735
736 /* free any chunks still hanging around. */
737 RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
738
739 /* finally the instance data itself. */
740 RTMemFree(pGMM);
741 LogFlow(("GMMTerm: done\n"));
742}
743
744
745/**
746 * RTAvlU32Destroy callback.
747 *
748 * @returns 0
749 * @param pNode The node to destroy.
750 * @param pvGMM The GMM handle.
751 */
752static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
753{
754 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
755
756 if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
757 SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
758 pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
759
760 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
761 if (RT_FAILURE(rc))
762 {
763 SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
764 pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
765 AssertRC(rc);
766 }
767 pChunk->MemObj = NIL_RTR0MEMOBJ;
768
769 RTMemFree(pChunk->paMappings);
770 pChunk->paMappings = NULL;
771
772 RTMemFree(pChunk);
773 NOREF(pvGMM);
774 return 0;
775}
776
777
778/**
779 * Initializes the per-VM data for the GMM.
780 *
781 * This is called from within the GVMM lock (from GVMMR0CreateVM)
782 * and should only initialize the data members so GMMR0CleanupVM
783 * can deal with them. We reserve no memory or anything here,
784 * that's done later in GMMR0InitVM.
785 *
786 * @param pGVM Pointer to the Global VM structure.
787 */
788GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM)
789{
790 AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
791
792 pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
793 pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
794 pGVM->gmm.s.fMayAllocate = false;
795}
796
797
798/**
799 * Cleans up when a VM is terminating.
800 *
801 * @param pGVM Pointer to the Global VM structure.
802 */
803GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM)
804{
805 LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
806
807 PGMM pGMM;
808 GMM_GET_VALID_INSTANCE_VOID(pGMM);
809
810 int rc = RTSemFastMutexRequest(pGMM->Mtx);
811 AssertRC(rc);
812 GMM_CHECK_SANITY_UPON_ENTERING(pGMM);
813
814#ifdef VBOX_WITH_PAGE_SHARING
815 /* Clean up all registered shared modules. */
816 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
817#endif
818
819 /*
820 * The policy is 'INVALID' until the initial reservation
821 * request has been serviced.
822 */
823 if ( pGVM->gmm.s.enmPolicy > GMMOCPOLICY_INVALID
824 && pGVM->gmm.s.enmPolicy < GMMOCPOLICY_END)
825 {
826 /*
827 * If it's the last VM around, we can skip walking all the chunk looking
828 * for the pages owned by this VM and instead flush the whole shebang.
829 *
830 * This takes care of the eventuality that a VM has left shared page
831 * references behind (shouldn't happen of course, but you never know).
832 */
833 Assert(pGMM->cRegisteredVMs);
834 pGMM->cRegisteredVMs--;
835#if 0 /* disabled so it won't hide bugs. */
836 if (!pGMM->cRegisteredVMs)
837 {
838 RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
839
840 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
841 {
842 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
843 pGMM->ChunkTLB.aEntries[i].pChunk = NULL;
844 }
845
846 memset(&pGMM->Private, 0, sizeof(pGMM->Private));
847 memset(&pGMM->Shared, 0, sizeof(pGMM->Shared));
848
849 memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
850 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
851
852 pGMM->cReservedPages = 0;
853 pGMM->cOverCommittedPages = 0;
854 pGMM->cAllocatedPages = 0;
855 pGMM->cSharedPages = 0;
856 pGMM->cDuplicatePages = 0;
857 pGMM->cLeftBehindSharedPages = 0;
858 pGMM->cChunks = 0;
859 pGMM->cBalloonedPages = 0;
860 }
861 else
862#endif
863 {
864 /*
865 * Walk the entire pool looking for pages that belong to this VM
866 * and left over mappings. (This'll only catch private pages, shared
867 * pages will be 'left behind'.)
868 */
869 /** @todo this might be kind of expensive with a lot of VMs and
870 * memory hanging around... */
871 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
872 RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
873 if (pGVM->gmm.s.cPrivatePages)
874 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
875 pGMM->cAllocatedPages -= cPrivatePages;
876
877 /* free empty chunks. */
878 if (cPrivatePages)
879 {
880 PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
881 while (pCur)
882 {
883 PGMMCHUNK pNext = pCur->pFreeNext;
884 if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
885 && ( !pGMM->fBoundMemoryMode
886 || pCur->hGVM == pGVM->hSelf))
887 gmmR0FreeChunk(pGMM, pGVM, pCur);
888 pCur = pNext;
889 }
890 }
891
892 /* account for shared pages that weren't freed. */
893 if (pGVM->gmm.s.cSharedPages)
894 {
895 Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
896 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
897 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
898 }
899
900 /* Clean up balloon statistics in case the VM process crashed. */
901 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
902 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
903
904 /*
905 * Update the over-commitment management statistics.
906 */
907 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
908 + pGVM->gmm.s.Reserved.cFixedPages
909 + pGVM->gmm.s.Reserved.cShadowPages;
910 switch (pGVM->gmm.s.enmPolicy)
911 {
912 case GMMOCPOLICY_NO_OC:
913 break;
914 default:
915 /** @todo Update GMM->cOverCommittedPages */
916 break;
917 }
918 }
919 }
920
921 /* zap the GVM data. */
922 pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
923 pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
924 pGVM->gmm.s.fMayAllocate = false;
925
926 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
927 RTSemFastMutexRelease(pGMM->Mtx);
928
929 LogFlow(("GMMR0CleanupVM: returns\n"));
930}
931
932
933/**
934 * RTAvlU32DoWithAll callback.
935 *
936 * @returns 0
937 * @param pNode The node to search.
938 * @param pvGVM Pointer to the shared VM structure.
939 */
940static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
941{
942 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
943 PGVM pGVM = (PGVM)pvGVM;
944
945 /*
946 * Look for pages belonging to the VM.
947 * (Perform some internal checks while we're scanning.)
948 */
949#ifndef VBOX_STRICT
950 if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
951#endif
952 {
953 unsigned cPrivate = 0;
954 unsigned cShared = 0;
955 unsigned cFree = 0;
956
957 gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
958
959 uint16_t hGVM = pGVM->hSelf;
960 unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
961 while (iPage-- > 0)
962 if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
963 {
964 if (pChunk->aPages[iPage].Private.hGVM == hGVM)
965 {
966 /*
967 * Free the page.
968 *
969 * The reason for not using gmmR0FreePrivatePage here is that we
970 * must *not* cause the chunk to be freed from under us - we're in
971 * an AVL tree walk here.
972 */
973 pChunk->aPages[iPage].u = 0;
974 pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
975 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
976 pChunk->iFreeHead = iPage;
977 pChunk->cPrivate--;
978 pChunk->cFree++;
979 pGVM->gmm.s.cPrivatePages--;
980 cFree++;
981 }
982 else
983 cPrivate++;
984 }
985 else if (GMM_PAGE_IS_FREE(&pChunk->aPages[iPage]))
986 cFree++;
987 else
988 cShared++;
989
990 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
991
992 /*
993 * Did it add up?
994 */
995 if (RT_UNLIKELY( pChunk->cFree != cFree
996 || pChunk->cPrivate != cPrivate
997 || pChunk->cShared != cShared))
998 {
999 SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
1000 pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
1001 pChunk->cFree = cFree;
1002 pChunk->cPrivate = cPrivate;
1003 pChunk->cShared = cShared;
1004 }
1005 }
1006
1007 /*
1008 * Look for the mapping belonging to the terminating VM.
1009 */
1010 for (unsigned i = 0; i < pChunk->cMappings; i++)
1011 if (pChunk->paMappings[i].pGVM == pGVM)
1012 {
1013 RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj;
1014
1015 pChunk->cMappings--;
1016 if (i < pChunk->cMappings)
1017 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
1018 pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
1019 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
1020
1021 int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
1022 if (RT_FAILURE(rc))
1023 {
1024 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
1025 pChunk, pChunk->Core.Key, i, MemObj, rc);
1026 AssertRC(rc);
1027 }
1028 break;
1029 }
1030
1031 /*
1032 * If not in bound memory mode, we should reset the hGVM field
1033 * if it has our handle in it.
1034 */
1035 if (pChunk->hGVM == pGVM->hSelf)
1036 {
1037 if (!g_pGMM->fBoundMemoryMode)
1038 pChunk->hGVM = NIL_GVM_HANDLE;
1039 else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES)
1040 {
1041 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
1042 pChunk, pChunk->Core.Key, pChunk->cFree);
1043 AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
1044
1045 gmmR0UnlinkChunk(pChunk);
1046 pChunk->cFree = GMM_CHUNK_NUM_PAGES;
1047 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
1048 }
1049 }
1050
1051 return 0;
1052}
1053
1054
1055/**
1056 * RTAvlU32Destroy callback for GMMR0CleanupVM.
1057 *
1058 * @returns 0
1059 * @param pNode The node (allocation chunk) to destroy.
1060 * @param pvGVM Pointer to the shared VM structure.
1061 */
1062/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
1063{
1064 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
1065 PGVM pGVM = (PGVM)pvGVM;
1066
1067 for (unsigned i = 0; i < pChunk->cMappings; i++)
1068 {
1069 if (pChunk->paMappings[i].pGVM != pGVM)
1070 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
1071 pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
1072 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
1073 if (RT_FAILURE(rc))
1074 {
1075 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
1076 pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
1077 AssertRC(rc);
1078 }
1079 }
1080
1081 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
1082 if (RT_FAILURE(rc))
1083 {
1084 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
1085 pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
1086 AssertRC(rc);
1087 }
1088 pChunk->MemObj = NIL_RTR0MEMOBJ;
1089
1090 RTMemFree(pChunk->paMappings);
1091 pChunk->paMappings = NULL;
1092
1093 RTMemFree(pChunk);
1094 return 0;
1095}
1096
1097
1098/**
1099 * The initial resource reservations.
1100 *
1101 * This will make memory reservations according to policy and priority. If there aren't
1102 * sufficient resources available to sustain the VM this function will fail and all
1103 * future allocations requests will fail as well.
1104 *
1105 * These are just the initial reservations made very very early during the VM creation
1106 * process and will be adjusted later in the GMMR0UpdateReservation call after the
1107 * ring-3 init has completed.
1108 *
1109 * @returns VBox status code.
1110 * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
1111 * @retval VERR_GMM_
1112 *
1113 * @param pVM Pointer to the shared VM structure.
1114 * @param idCpu VCPU id
1115 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
1116 * This does not include MMIO2 and similar.
1117 * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
1118 * @param cFixedPages The number of pages that may be allocated for fixed objects like the
1119 * hyper heap, MMIO2 and similar.
1120 * @param enmPolicy The OC policy to use on this VM.
1121 * @param enmPriority The priority in an out-of-memory situation.
1122 *
1123 * @thread The creator thread / EMT.
1124 */
1125GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
1126 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
1127{
1128 LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
1129 pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
1130
1131 /*
1132 * Validate, get basics and take the semaphore.
1133 */
1134 PGMM pGMM;
1135 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
1136 PGVM pGVM;
1137 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
1138 if (RT_FAILURE(rc))
1139 return rc;
1140
1141 AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
1142 AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
1143 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
1144 AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
1145 AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
1146
1147 rc = RTSemFastMutexRequest(pGMM->Mtx);
1148 AssertRC(rc);
1149 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1150 {
1151 if ( !pGVM->gmm.s.Reserved.cBasePages
1152 && !pGVM->gmm.s.Reserved.cFixedPages
1153 && !pGVM->gmm.s.Reserved.cShadowPages)
1154 {
1155 /*
1156 * Check if we can accomodate this.
1157 */
1158 /* ... later ... */
1159 if (RT_SUCCESS(rc))
1160 {
1161 /*
1162 * Update the records.
1163 */
1164 pGVM->gmm.s.Reserved.cBasePages = cBasePages;
1165 pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
1166 pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
1167 pGVM->gmm.s.enmPolicy = enmPolicy;
1168 pGVM->gmm.s.enmPriority = enmPriority;
1169 pGVM->gmm.s.fMayAllocate = true;
1170
1171 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
1172 pGMM->cRegisteredVMs++;
1173 }
1174 }
1175 else
1176 rc = VERR_WRONG_ORDER;
1177 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1178 }
1179 else
1180 rc = VERR_INTERNAL_ERROR_5;
1181 RTSemFastMutexRelease(pGMM->Mtx);
1182 LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
1183 return rc;
1184}
1185
1186
1187/**
1188 * VMMR0 request wrapper for GMMR0InitialReservation.
1189 *
1190 * @returns see GMMR0InitialReservation.
1191 * @param pVM Pointer to the shared VM structure.
1192 * @param idCpu VCPU id
1193 * @param pReq The request packet.
1194 */
1195GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
1196{
1197 /*
1198 * Validate input and pass it on.
1199 */
1200 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1201 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1202 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1203
1204 return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
1205}
1206
1207
1208/**
1209 * This updates the memory reservation with the additional MMIO2 and ROM pages.
1210 *
1211 * @returns VBox status code.
1212 * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
1213 *
1214 * @param pVM Pointer to the shared VM structure.
1215 * @param idCpu VCPU id
1216 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
1217 * This does not include MMIO2 and similar.
1218 * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
1219 * @param cFixedPages The number of pages that may be allocated for fixed objects like the
1220 * hyper heap, MMIO2 and similar.
1221 *
1222 * @thread EMT.
1223 */
1224GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
1225{
1226 LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
1227 pVM, cBasePages, cShadowPages, cFixedPages));
1228
1229 /*
1230 * Validate, get basics and take the semaphore.
1231 */
1232 PGMM pGMM;
1233 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
1234 PGVM pGVM;
1235 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
1236 if (RT_FAILURE(rc))
1237 return rc;
1238
1239 AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
1240 AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
1241 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
1242
1243 rc = RTSemFastMutexRequest(pGMM->Mtx);
1244 AssertRC(rc);
1245 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1246 {
1247 if ( pGVM->gmm.s.Reserved.cBasePages
1248 && pGVM->gmm.s.Reserved.cFixedPages
1249 && pGVM->gmm.s.Reserved.cShadowPages)
1250 {
1251 /*
1252 * Check if we can accomodate this.
1253 */
1254 /* ... later ... */
1255 if (RT_SUCCESS(rc))
1256 {
1257 /*
1258 * Update the records.
1259 */
1260 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
1261 + pGVM->gmm.s.Reserved.cFixedPages
1262 + pGVM->gmm.s.Reserved.cShadowPages;
1263 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
1264
1265 pGVM->gmm.s.Reserved.cBasePages = cBasePages;
1266 pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
1267 pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
1268 }
1269 }
1270 else
1271 rc = VERR_WRONG_ORDER;
1272 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1273 }
1274 else
1275 rc = VERR_INTERNAL_ERROR_5;
1276 RTSemFastMutexRelease(pGMM->Mtx);
1277 LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
1278 return rc;
1279}
1280
1281
1282/**
1283 * VMMR0 request wrapper for GMMR0UpdateReservation.
1284 *
1285 * @returns see GMMR0UpdateReservation.
1286 * @param pVM Pointer to the shared VM structure.
1287 * @param idCpu VCPU id
1288 * @param pReq The request packet.
1289 */
1290GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
1291{
1292 /*
1293 * Validate input and pass it on.
1294 */
1295 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1296 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1297 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1298
1299 return GMMR0UpdateReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
1300}
1301
1302
1303/**
1304 * Performs sanity checks on a free set.
1305 *
1306 * @returns Error count.
1307 *
1308 * @param pGMM Pointer to the GMM instance.
1309 * @param pSet Pointer to the set.
1310 * @param pszSetName The set name.
1311 * @param pszFunction The function from which it was called.
1312 * @param uLine The line number.
1313 */
1314static uint32_t gmmR0SanityCheckSet(PGMM pGMM, PGMMCHUNKFREESET pSet, const char *pszSetName,
1315 const char *pszFunction, unsigned uLineNo)
1316{
1317 uint32_t cErrors = 0;
1318
1319 /*
1320 * Count the free pages in all the chunks and match it against pSet->cFreePages.
1321 */
1322 uint32_t cPages = 0;
1323 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1324 {
1325 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1326 {
1327 /** @todo check that the chunk is hash into the right set. */
1328 cPages += pCur->cFree;
1329 }
1330 }
1331 if (RT_UNLIKELY(cPages != pSet->cFreePages))
1332 {
1333 SUPR0Printf("GMM insanity: found %#x pages in the %s set, expected %#x. (%s, line %u)\n",
1334 cPages, pszSetName, pSet->cFreePages, pszFunction, uLineNo);
1335 cErrors++;
1336 }
1337
1338 return cErrors;
1339}
1340
1341
1342/**
1343 * Performs some sanity checks on the GMM while owning lock.
1344 *
1345 * @returns Error count.
1346 *
1347 * @param pGMM Pointer to the GMM instance.
1348 * @param pszFunction The function from which it is called.
1349 * @param uLineNo The line number.
1350 */
1351static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo)
1352{
1353 uint32_t cErrors = 0;
1354
1355 cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Private, "private", pszFunction, uLineNo);
1356 cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Shared, "shared", pszFunction, uLineNo);
1357 /** @todo add more sanity checks. */
1358
1359 return cErrors;
1360}
1361
1362
1363/**
1364 * Looks up a chunk in the tree and fill in the TLB entry for it.
1365 *
1366 * This is not expected to fail and will bitch if it does.
1367 *
1368 * @returns Pointer to the allocation chunk, NULL if not found.
1369 * @param pGMM Pointer to the GMM instance.
1370 * @param idChunk The ID of the chunk to find.
1371 * @param pTlbe Pointer to the TLB entry.
1372 */
1373static PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
1374{
1375 PGMMCHUNK pChunk = (PGMMCHUNK)RTAvlU32Get(&pGMM->pChunks, idChunk);
1376 AssertMsgReturn(pChunk, ("Chunk %#x not found!\n", idChunk), NULL);
1377 pTlbe->idChunk = idChunk;
1378 pTlbe->pChunk = pChunk;
1379 return pChunk;
1380}
1381
1382
1383/**
1384 * Finds a allocation chunk.
1385 *
1386 * This is not expected to fail and will bitch if it does.
1387 *
1388 * @returns Pointer to the allocation chunk, NULL if not found.
1389 * @param pGMM Pointer to the GMM instance.
1390 * @param idChunk The ID of the chunk to find.
1391 */
1392DECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
1393{
1394 /*
1395 * Do a TLB lookup, branch if not in the TLB.
1396 */
1397 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
1398 if ( pTlbe->idChunk != idChunk
1399 || !pTlbe->pChunk)
1400 return gmmR0GetChunkSlow(pGMM, idChunk, pTlbe);
1401 return pTlbe->pChunk;
1402}
1403
1404
1405/**
1406 * Finds a page.
1407 *
1408 * This is not expected to fail and will bitch if it does.
1409 *
1410 * @returns Pointer to the page, NULL if not found.
1411 * @param pGMM Pointer to the GMM instance.
1412 * @param idPage The ID of the page to find.
1413 */
1414DECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage)
1415{
1416 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
1417 if (RT_LIKELY(pChunk))
1418 return &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
1419 return NULL;
1420}
1421
1422
1423/**
1424 * Unlinks the chunk from the free list it's currently on (if any).
1425 *
1426 * @param pChunk The allocation chunk.
1427 */
1428DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk)
1429{
1430 PGMMCHUNKFREESET pSet = pChunk->pSet;
1431 if (RT_LIKELY(pSet))
1432 {
1433 pSet->cFreePages -= pChunk->cFree;
1434
1435 PGMMCHUNK pPrev = pChunk->pFreePrev;
1436 PGMMCHUNK pNext = pChunk->pFreeNext;
1437 if (pPrev)
1438 pPrev->pFreeNext = pNext;
1439 else
1440 pSet->apLists[(pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT] = pNext;
1441 if (pNext)
1442 pNext->pFreePrev = pPrev;
1443
1444 pChunk->pSet = NULL;
1445 pChunk->pFreeNext = NULL;
1446 pChunk->pFreePrev = NULL;
1447 }
1448 else
1449 {
1450 Assert(!pChunk->pFreeNext);
1451 Assert(!pChunk->pFreePrev);
1452 Assert(!pChunk->cFree);
1453 }
1454}
1455
1456
1457/**
1458 * Links the chunk onto the appropriate free list in the specified free set.
1459 *
1460 * If no free entries, it's not linked into any list.
1461 *
1462 * @param pChunk The allocation chunk.
1463 * @param pSet The free set.
1464 */
1465DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet)
1466{
1467 Assert(!pChunk->pSet);
1468 Assert(!pChunk->pFreeNext);
1469 Assert(!pChunk->pFreePrev);
1470
1471 if (pChunk->cFree > 0)
1472 {
1473 pChunk->pSet = pSet;
1474 pChunk->pFreePrev = NULL;
1475 unsigned iList = (pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT;
1476 pChunk->pFreeNext = pSet->apLists[iList];
1477 if (pChunk->pFreeNext)
1478 pChunk->pFreeNext->pFreePrev = pChunk;
1479 pSet->apLists[iList] = pChunk;
1480
1481 pSet->cFreePages += pChunk->cFree;
1482 }
1483}
1484
1485
1486/**
1487 * Frees a Chunk ID.
1488 *
1489 * @param pGMM Pointer to the GMM instance.
1490 * @param idChunk The Chunk ID to free.
1491 */
1492static void gmmR0FreeChunkId(PGMM pGMM, uint32_t idChunk)
1493{
1494 AssertReturnVoid(idChunk != NIL_GMM_CHUNKID);
1495 AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk));
1496 ASMAtomicBitClear(&pGMM->bmChunkId[0], idChunk);
1497}
1498
1499
1500/**
1501 * Allocates a new Chunk ID.
1502 *
1503 * @returns The Chunk ID.
1504 * @param pGMM Pointer to the GMM instance.
1505 */
1506static uint32_t gmmR0AllocateChunkId(PGMM pGMM)
1507{
1508 AssertCompile(!((GMM_CHUNKID_LAST + 1) & 31)); /* must be a multiple of 32 */
1509 AssertCompile(NIL_GMM_CHUNKID == 0);
1510
1511 /*
1512 * Try the next sequential one.
1513 */
1514 int32_t idChunk = ++pGMM->idChunkPrev;
1515#if 0 /* test the fallback first */
1516 if ( idChunk <= GMM_CHUNKID_LAST
1517 && idChunk > NIL_GMM_CHUNKID
1518 && !ASMAtomicBitTestAndSet(&pVMM->bmChunkId[0], idChunk))
1519 return idChunk;
1520#endif
1521
1522 /*
1523 * Scan sequentially from the last one.
1524 */
1525 if ( (uint32_t)idChunk < GMM_CHUNKID_LAST
1526 && idChunk > NIL_GMM_CHUNKID)
1527 {
1528 idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk);
1529 if (idChunk > NIL_GMM_CHUNKID)
1530 {
1531 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
1532 return pGMM->idChunkPrev = idChunk;
1533 }
1534 }
1535
1536 /*
1537 * Ok, scan from the start.
1538 * We're not racing anyone, so there is no need to expect failures or have restart loops.
1539 */
1540 idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1);
1541 AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE);
1542 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
1543
1544 return pGMM->idChunkPrev = idChunk;
1545}
1546
1547
1548/**
1549 * Registers a new chunk of memory.
1550 *
1551 * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
1552 * must own the global lock.
1553 *
1554 * @returns VBox status code.
1555 * @param pGMM Pointer to the GMM instance.
1556 * @param pSet Pointer to the set.
1557 * @param MemObj The memory object for the chunk.
1558 * @param hGVM The affinity of the chunk. NIL_GVM_HANDLE for no
1559 * affinity.
1560 * @param enmChunkType Chunk type (continuous or non-continuous)
1561 * @param ppChunk Chunk address (out)
1562 */
1563static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
1564{
1565 Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
1566
1567 int rc;
1568 PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1569 if (pChunk)
1570 {
1571 /*
1572 * Initialize it.
1573 */
1574 pChunk->MemObj = MemObj;
1575 pChunk->cFree = GMM_CHUNK_NUM_PAGES;
1576 pChunk->hGVM = hGVM;
1577 pChunk->iFreeHead = 0;
1578 pChunk->enmType = enmChunkType;
1579 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
1580 {
1581 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
1582 pChunk->aPages[iPage].Free.iNext = iPage + 1;
1583 }
1584 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
1585 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
1586
1587 /*
1588 * Allocate a Chunk ID and insert it into the tree.
1589 * This has to be done behind the mutex of course.
1590 */
1591 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1592 {
1593 pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
1594 if ( pChunk->Core.Key != NIL_GMM_CHUNKID
1595 && pChunk->Core.Key <= GMM_CHUNKID_LAST
1596 && RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
1597 {
1598 pGMM->cChunks++;
1599 gmmR0LinkChunk(pChunk, pSet);
1600 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
1601
1602 if (ppChunk)
1603 *ppChunk = pChunk;
1604
1605 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1606 return VINF_SUCCESS;
1607 }
1608
1609 /* bail out */
1610 rc = VERR_INTERNAL_ERROR;
1611 }
1612 else
1613 rc = VERR_INTERNAL_ERROR_5;
1614
1615 RTMemFree(pChunk);
1616 }
1617 else
1618 rc = VERR_NO_MEMORY;
1619 return rc;
1620}
1621
1622
1623/**
1624 * Allocate one new chunk and add it to the specified free set.
1625 *
1626 * @returns VBox status code.
1627 * @param pGMM Pointer to the GMM instance.
1628 * @param pSet Pointer to the set.
1629 * @param hGVM The affinity of the new chunk.
1630 * @param enmChunkType Chunk type (continuous or non-continuous)
1631 * @param ppChunk Chunk address (out)
1632 *
1633 * @remarks Called without owning the mutex.
1634 */
1635static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
1636{
1637 /*
1638 * Allocate the memory.
1639 */
1640 RTR0MEMOBJ MemObj;
1641 int rc;
1642
1643 AssertCompile(GMM_CHUNK_SIZE == _2M);
1644 AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
1645
1646 /* Leave the lock temporarily as the allocation might take long. */
1647 RTSemFastMutexRelease(pGMM->Mtx);
1648 if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS)
1649 rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
1650 else
1651 rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
1652
1653 /* Grab the lock again. */
1654 int rc2 = RTSemFastMutexRequest(pGMM->Mtx);
1655 AssertRCReturn(rc2, rc2);
1656
1657 if (RT_SUCCESS(rc))
1658 {
1659 rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
1660 if (RT_FAILURE(rc))
1661 RTR0MemObjFree(MemObj, false /* fFreeMappings */);
1662 }
1663 /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
1664 * allocation failure. */
1665 return rc;
1666}
1667
1668
1669/**
1670 * Attempts to allocate more pages until the requested amount is met.
1671 *
1672 * @returns VBox status code.
1673 * @param pGMM Pointer to the GMM instance data.
1674 * @param pGVM The calling VM.
1675 * @param pSet Pointer to the free set to grow.
1676 * @param cPages The number of pages needed.
1677 *
1678 * @remarks Called owning the mutex, but will leave it temporarily while
1679 * allocating the memory!
1680 */
1681static int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages)
1682{
1683 Assert(!pGMM->fLegacyAllocationMode);
1684
1685 if (!GMM_CHECK_SANITY_IN_LOOPS(pGMM))
1686 return VERR_INTERNAL_ERROR_4;
1687
1688 if (!pGMM->fBoundMemoryMode)
1689 {
1690 /*
1691 * Try steal free chunks from the other set first. (Only take 100% free chunks.)
1692 */
1693 PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->Private ? &pGMM->Shared : &pGMM->Private;
1694 while ( pSet->cFreePages < cPages
1695 && pOtherSet->cFreePages >= GMM_CHUNK_NUM_PAGES)
1696 {
1697 PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_LISTS - 1];
1698 while (pChunk && pChunk->cFree != GMM_CHUNK_NUM_PAGES)
1699 pChunk = pChunk->pFreeNext;
1700 if (!pChunk)
1701 break;
1702
1703 gmmR0UnlinkChunk(pChunk);
1704 gmmR0LinkChunk(pChunk, pSet);
1705 }
1706
1707 /*
1708 * If we need still more pages, allocate new chunks.
1709 * Note! We will leave the mutex while doing the allocation,
1710 */
1711 while (pSet->cFreePages < cPages)
1712 {
1713 int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
1714 if (RT_FAILURE(rc))
1715 return rc;
1716 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1717 return VERR_INTERNAL_ERROR_5;
1718 }
1719 }
1720 else
1721 {
1722 /*
1723 * The memory is bound to the VM allocating it, so we have to count
1724 * the free pages carefully as well as making sure we brand them with
1725 * our VM handle.
1726 *
1727 * Note! We will leave the mutex while doing the allocation,
1728 */
1729 uint16_t const hGVM = pGVM->hSelf;
1730 for (;;)
1731 {
1732 /* Count and see if we've reached the goal. */
1733 uint32_t cPagesFound = 0;
1734 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1735 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1736 if (pCur->hGVM == hGVM)
1737 {
1738 cPagesFound += pCur->cFree;
1739 if (cPagesFound >= cPages)
1740 break;
1741 }
1742 if (cPagesFound >= cPages)
1743 break;
1744
1745 /* Allocate more. */
1746 int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
1747 if (RT_FAILURE(rc))
1748 return rc;
1749 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1750 return VERR_INTERNAL_ERROR_5;
1751 }
1752 }
1753
1754 return VINF_SUCCESS;
1755}
1756
1757
1758/**
1759 * Allocates one private page.
1760 *
1761 * Worker for gmmR0AllocatePages.
1762 *
1763 * @param pGMM Pointer to the GMM instance data.
1764 * @param hGVM The GVM handle of the VM requesting memory.
1765 * @param pChunk The chunk to allocate it from.
1766 * @param pPageDesc The page descriptor.
1767 */
1768static void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
1769{
1770 /* update the chunk stats. */
1771 if (pChunk->hGVM == NIL_GVM_HANDLE)
1772 pChunk->hGVM = hGVM;
1773 Assert(pChunk->cFree);
1774 pChunk->cFree--;
1775 pChunk->cPrivate++;
1776
1777 /* unlink the first free page. */
1778 const uint32_t iPage = pChunk->iFreeHead;
1779 AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
1780 PGMMPAGE pPage = &pChunk->aPages[iPage];
1781 Assert(GMM_PAGE_IS_FREE(pPage));
1782 pChunk->iFreeHead = pPage->Free.iNext;
1783 Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
1784 pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
1785 pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
1786
1787 /* make the page private. */
1788 pPage->u = 0;
1789 AssertCompile(GMM_PAGE_STATE_PRIVATE == 0);
1790 pPage->Private.hGVM = hGVM;
1791 AssertCompile(NIL_RTHCPHYS >= GMM_GCPHYS_LAST);
1792 AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
1793 if (pPageDesc->HCPhysGCPhys <= GMM_GCPHYS_LAST)
1794 pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
1795 else
1796 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
1797
1798 /* update the page descriptor. */
1799 pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
1800 Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
1801 pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
1802 pPageDesc->idSharedPage = NIL_GMM_PAGEID;
1803}
1804
1805
1806/**
1807 * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
1808 *
1809 * @returns VBox status code:
1810 * @retval VINF_SUCCESS on success.
1811 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
1812 * gmmR0AllocateMoreChunks is necessary.
1813 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
1814 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
1815 * that is we're trying to allocate more than we've reserved.
1816 *
1817 * @param pGMM Pointer to the GMM instance data.
1818 * @param pGVM Pointer to the shared VM structure.
1819 * @param cPages The number of pages to allocate.
1820 * @param paPages Pointer to the page descriptors.
1821 * See GMMPAGEDESC for details on what is expected on input.
1822 * @param enmAccount The account to charge.
1823 */
1824static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
1825{
1826 /*
1827 * Check allocation limits.
1828 */
1829 if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
1830 return VERR_GMM_HIT_GLOBAL_LIMIT;
1831
1832 switch (enmAccount)
1833 {
1834 case GMMACCOUNT_BASE:
1835 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
1836 {
1837 Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
1838 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
1839 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1840 }
1841 break;
1842 case GMMACCOUNT_SHADOW:
1843 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
1844 {
1845 Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
1846 pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
1847 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1848 }
1849 break;
1850 case GMMACCOUNT_FIXED:
1851 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
1852 {
1853 Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
1854 pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
1855 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1856 }
1857 break;
1858 default:
1859 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
1860 }
1861
1862 /*
1863 * Check if we need to allocate more memory or not. In bound memory mode this
1864 * is a bit extra work but it's easier to do it upfront than bailing out later.
1865 */
1866 PGMMCHUNKFREESET pSet = &pGMM->Private;
1867 if (pSet->cFreePages < cPages)
1868 return VERR_GMM_SEED_ME;
1869 if (pGMM->fBoundMemoryMode)
1870 {
1871 uint16_t hGVM = pGVM->hSelf;
1872 uint32_t cPagesFound = 0;
1873 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1874 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1875 if (pCur->hGVM == hGVM)
1876 {
1877 cPagesFound += pCur->cFree;
1878 if (cPagesFound >= cPages)
1879 break;
1880 }
1881 if (cPagesFound < cPages)
1882 return VERR_GMM_SEED_ME;
1883 }
1884
1885 /*
1886 * Pick the pages.
1887 * Try make some effort keeping VMs sharing private chunks.
1888 */
1889 uint16_t hGVM = pGVM->hSelf;
1890 uint32_t iPage = 0;
1891
1892 /* first round, pick from chunks with an affinity to the VM. */
1893 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists) && iPage < cPages; i++)
1894 {
1895 PGMMCHUNK pCurFree = NULL;
1896 PGMMCHUNK pCur = pSet->apLists[i];
1897 while (pCur && iPage < cPages)
1898 {
1899 PGMMCHUNK pNext = pCur->pFreeNext;
1900
1901 if ( pCur->hGVM == hGVM
1902 && pCur->cFree < GMM_CHUNK_NUM_PAGES)
1903 {
1904 gmmR0UnlinkChunk(pCur);
1905 for (; pCur->cFree && iPage < cPages; iPage++)
1906 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1907 gmmR0LinkChunk(pCur, pSet);
1908 }
1909
1910 pCur = pNext;
1911 }
1912 }
1913
1914 if (iPage < cPages)
1915 {
1916 /* second round, pick pages from the 100% empty chunks we just skipped above. */
1917 PGMMCHUNK pCurFree = NULL;
1918 PGMMCHUNK pCur = pSet->apLists[RT_ELEMENTS(pSet->apLists) - 1];
1919 while (pCur && iPage < cPages)
1920 {
1921 PGMMCHUNK pNext = pCur->pFreeNext;
1922
1923 if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
1924 && ( pCur->hGVM == hGVM
1925 || !pGMM->fBoundMemoryMode))
1926 {
1927 gmmR0UnlinkChunk(pCur);
1928 for (; pCur->cFree && iPage < cPages; iPage++)
1929 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1930 gmmR0LinkChunk(pCur, pSet);
1931 }
1932
1933 pCur = pNext;
1934 }
1935 }
1936
1937 if ( iPage < cPages
1938 && !pGMM->fBoundMemoryMode)
1939 {
1940 /* third round, disregard affinity. */
1941 unsigned i = RT_ELEMENTS(pSet->apLists);
1942 while (i-- > 0 && iPage < cPages)
1943 {
1944 PGMMCHUNK pCurFree = NULL;
1945 PGMMCHUNK pCur = pSet->apLists[i];
1946 while (pCur && iPage < cPages)
1947 {
1948 PGMMCHUNK pNext = pCur->pFreeNext;
1949
1950 if ( pCur->cFree > GMM_CHUNK_NUM_PAGES / 2
1951 && cPages >= GMM_CHUNK_NUM_PAGES / 2)
1952 pCur->hGVM = hGVM; /* change chunk affinity */
1953
1954 gmmR0UnlinkChunk(pCur);
1955 for (; pCur->cFree && iPage < cPages; iPage++)
1956 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1957 gmmR0LinkChunk(pCur, pSet);
1958
1959 pCur = pNext;
1960 }
1961 }
1962 }
1963
1964 /*
1965 * Update the account.
1966 */
1967 switch (enmAccount)
1968 {
1969 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; break;
1970 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
1971 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; break;
1972 default:
1973 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
1974 }
1975 pGVM->gmm.s.cPrivatePages += iPage;
1976 pGMM->cAllocatedPages += iPage;
1977
1978 AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
1979
1980 /*
1981 * Check if we've reached some threshold and should kick one or two VMs and tell
1982 * them to inflate their balloons a bit more... later.
1983 */
1984
1985 return VINF_SUCCESS;
1986}
1987
1988
1989/**
1990 * Updates the previous allocations and allocates more pages.
1991 *
1992 * The handy pages are always taken from the 'base' memory account.
1993 * The allocated pages are not cleared and will contains random garbage.
1994 *
1995 * @returns VBox status code:
1996 * @retval VINF_SUCCESS on success.
1997 * @retval VERR_NOT_OWNER if the caller is not an EMT.
1998 * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found.
1999 * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a
2000 * private page.
2001 * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a
2002 * shared page.
2003 * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
2004 * owned by the VM.
2005 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2006 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2007 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2008 * that is we're trying to allocate more than we've reserved.
2009 *
2010 * @param pVM Pointer to the shared VM structure.
2011 * @param idCpu VCPU id
2012 * @param cPagesToUpdate The number of pages to update (starting from the head).
2013 * @param cPagesToAlloc The number of pages to allocate (starting from the head).
2014 * @param paPages The array of page descriptors.
2015 * See GMMPAGEDESC for details on what is expected on input.
2016 * @thread EMT.
2017 */
2018GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
2019{
2020 LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
2021 pVM, cPagesToUpdate, cPagesToAlloc, paPages));
2022
2023 /*
2024 * Validate, get basics and take the semaphore.
2025 * (This is a relatively busy path, so make predictions where possible.)
2026 */
2027 PGMM pGMM;
2028 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2029 PGVM pGVM;
2030 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2031 if (RT_FAILURE(rc))
2032 return rc;
2033
2034 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2035 AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024)
2036 || (cPagesToAlloc && cPagesToAlloc < 1024),
2037 ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc),
2038 VERR_INVALID_PARAMETER);
2039
2040 unsigned iPage = 0;
2041 for (; iPage < cPagesToUpdate; iPage++)
2042 {
2043 AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
2044 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
2045 || paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
2046 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
2047 ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys),
2048 VERR_INVALID_PARAMETER);
2049 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2050 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
2051 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2052 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2053 /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/,
2054 ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2055 }
2056
2057 for (; iPage < cPagesToAlloc; iPage++)
2058 {
2059 AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
2060 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2061 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2062 }
2063
2064 rc = RTSemFastMutexRequest(pGMM->Mtx);
2065 AssertRC(rc);
2066 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2067 {
2068 /* No allocations before the initial reservation has been made! */
2069 if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
2070 && pGVM->gmm.s.Reserved.cFixedPages
2071 && pGVM->gmm.s.Reserved.cShadowPages))
2072 {
2073 /*
2074 * Perform the updates.
2075 * Stop on the first error.
2076 */
2077 for (iPage = 0; iPage < cPagesToUpdate; iPage++)
2078 {
2079 if (paPages[iPage].idPage != NIL_GMM_PAGEID)
2080 {
2081 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage);
2082 if (RT_LIKELY(pPage))
2083 {
2084 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2085 {
2086 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
2087 {
2088 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
2089 if (RT_LIKELY(paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST))
2090 pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT;
2091 else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE)
2092 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE;
2093 /* else: NIL_RTHCPHYS nothing */
2094
2095 paPages[iPage].idPage = NIL_GMM_PAGEID;
2096 paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS;
2097 }
2098 else
2099 {
2100 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not owner! hGVM=%#x hSelf=%#x\n",
2101 iPage, paPages[iPage].idPage, pPage->Private.hGVM, pGVM->hSelf));
2102 rc = VERR_GMM_NOT_PAGE_OWNER;
2103 break;
2104 }
2105 }
2106 else
2107 {
2108 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
2109 rc = VERR_GMM_PAGE_NOT_PRIVATE;
2110 break;
2111 }
2112 }
2113 else
2114 {
2115 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (private)\n", iPage, paPages[iPage].idPage));
2116 rc = VERR_GMM_PAGE_NOT_FOUND;
2117 break;
2118 }
2119 }
2120
2121 if (paPages[iPage].idSharedPage != NIL_GMM_PAGEID)
2122 {
2123 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idSharedPage);
2124 if (RT_LIKELY(pPage))
2125 {
2126 if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
2127 {
2128 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
2129 Assert(pPage->Shared.cRefs);
2130 Assert(pGVM->gmm.s.cSharedPages);
2131 Assert(pGVM->gmm.s.Allocated.cBasePages);
2132
2133 Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
2134 pGVM->gmm.s.cSharedPages--;
2135 pGVM->gmm.s.Allocated.cBasePages--;
2136 if (!--pPage->Shared.cRefs)
2137 {
2138 gmmR0FreeSharedPage(pGMM, paPages[iPage].idSharedPage, pPage);
2139 }
2140 else
2141 {
2142 Assert(pGMM->cDuplicatePages);
2143 pGMM->cDuplicatePages--;
2144 }
2145
2146 paPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2147 }
2148 else
2149 {
2150 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not shared!\n", iPage, paPages[iPage].idSharedPage));
2151 rc = VERR_GMM_PAGE_NOT_SHARED;
2152 break;
2153 }
2154 }
2155 else
2156 {
2157 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
2158 rc = VERR_GMM_PAGE_NOT_FOUND;
2159 break;
2160 }
2161 }
2162 }
2163
2164 /*
2165 * Join paths with GMMR0AllocatePages for the allocation.
2166 * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
2167 */
2168 while (RT_SUCCESS(rc))
2169 {
2170 rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE);
2171 if ( rc != VERR_GMM_SEED_ME
2172 || pGMM->fLegacyAllocationMode)
2173 break;
2174 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPagesToAlloc);
2175 }
2176 }
2177 else
2178 rc = VERR_WRONG_ORDER;
2179 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2180 }
2181 else
2182 rc = VERR_INTERNAL_ERROR_5;
2183 RTSemFastMutexRelease(pGMM->Mtx);
2184 LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc));
2185 return rc;
2186}
2187
2188
2189/**
2190 * Allocate one or more pages.
2191 *
2192 * This is typically used for ROMs and MMIO2 (VRAM) during VM creation.
2193 * The allocated pages are not cleared and will contains random garbage.
2194 *
2195 * @returns VBox status code:
2196 * @retval VINF_SUCCESS on success.
2197 * @retval VERR_NOT_OWNER if the caller is not an EMT.
2198 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2199 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2200 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2201 * that is we're trying to allocate more than we've reserved.
2202 *
2203 * @param pVM Pointer to the shared VM structure.
2204 * @param idCpu VCPU id
2205 * @param cPages The number of pages to allocate.
2206 * @param paPages Pointer to the page descriptors.
2207 * See GMMPAGEDESC for details on what is expected on input.
2208 * @param enmAccount The account to charge.
2209 *
2210 * @thread EMT.
2211 */
2212GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
2213{
2214 LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
2215
2216 /*
2217 * Validate, get basics and take the semaphore.
2218 */
2219 PGMM pGMM;
2220 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2221 PGVM pGVM;
2222 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2223 if (RT_FAILURE(rc))
2224 return rc;
2225
2226 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2227 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
2228 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
2229
2230 for (unsigned iPage = 0; iPage < cPages; iPage++)
2231 {
2232 AssertMsgReturn( paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
2233 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE
2234 || ( enmAccount == GMMACCOUNT_BASE
2235 && paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
2236 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)),
2237 ("#%#x: %RHp enmAccount=%d\n", iPage, paPages[iPage].HCPhysGCPhys, enmAccount),
2238 VERR_INVALID_PARAMETER);
2239 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2240 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2241 }
2242
2243 rc = RTSemFastMutexRequest(pGMM->Mtx);
2244 AssertRC(rc);
2245 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2246 {
2247
2248 /* No allocations before the initial reservation has been made! */
2249 if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
2250 && pGVM->gmm.s.Reserved.cFixedPages
2251 && pGVM->gmm.s.Reserved.cShadowPages))
2252 {
2253 /*
2254 * gmmR0AllocatePages seed loop.
2255 * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
2256 */
2257 while (RT_SUCCESS(rc))
2258 {
2259 rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount);
2260 if ( rc != VERR_GMM_SEED_ME
2261 || pGMM->fLegacyAllocationMode)
2262 break;
2263 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPages);
2264 }
2265 }
2266 else
2267 rc = VERR_WRONG_ORDER;
2268 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2269 }
2270 else
2271 rc = VERR_INTERNAL_ERROR_5;
2272 RTSemFastMutexRelease(pGMM->Mtx);
2273 LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
2274 return rc;
2275}
2276
2277
2278/**
2279 * VMMR0 request wrapper for GMMR0AllocatePages.
2280 *
2281 * @returns see GMMR0AllocatePages.
2282 * @param pVM Pointer to the shared VM structure.
2283 * @param idCpu VCPU id
2284 * @param pReq The request packet.
2285 */
2286GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
2287{
2288 /*
2289 * Validate input and pass it on.
2290 */
2291 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2292 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2293 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0]),
2294 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0])),
2295 VERR_INVALID_PARAMETER);
2296 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages]),
2297 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages])),
2298 VERR_INVALID_PARAMETER);
2299
2300 return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
2301}
2302
2303/**
2304 * Allocate a large page to represent guest RAM
2305 *
2306 * The allocated pages are not cleared and will contains random garbage.
2307 *
2308 * @returns VBox status code:
2309 * @retval VINF_SUCCESS on success.
2310 * @retval VERR_NOT_OWNER if the caller is not an EMT.
2311 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2312 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2313 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2314 * that is we're trying to allocate more than we've reserved.
2315 * @returns see GMMR0AllocatePages.
2316 * @param pVM Pointer to the shared VM structure.
2317 * @param idCpu VCPU id
2318 * @param cbPage Large page size
2319 */
2320GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
2321{
2322 LogFlow(("GMMR0AllocateLargePage: pVM=%p cbPage=%x\n", pVM, cbPage));
2323
2324 AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER);
2325 AssertPtrReturn(pIdPage, VERR_INVALID_PARAMETER);
2326 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
2327
2328 /*
2329 * Validate, get basics and take the semaphore.
2330 */
2331 PGMM pGMM;
2332 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2333 PGVM pGVM;
2334 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2335 if (RT_FAILURE(rc))
2336 return rc;
2337
2338 /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
2339 if (pGMM->fLegacyAllocationMode)
2340 return VERR_NOT_SUPPORTED;
2341
2342 *pHCPhys = NIL_RTHCPHYS;
2343 *pIdPage = NIL_GMM_PAGEID;
2344
2345 rc = RTSemFastMutexRequest(pGMM->Mtx);
2346 AssertRCReturn(rc, rc);
2347 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2348 {
2349 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
2350 PGMMCHUNK pChunk;
2351 GMMPAGEDESC PageDesc;
2352
2353 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
2354 {
2355 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
2356 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));
2357 RTSemFastMutexRelease(pGMM->Mtx);
2358 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
2359 }
2360
2361 /* Allocate a new continous chunk. */
2362 rc = gmmR0AllocateOneChunk(pGMM, &pGMM->Private, pGVM->hSelf, GMMCHUNKTYPE_CONTINUOUS, &pChunk);
2363 if (RT_FAILURE(rc))
2364 {
2365 RTSemFastMutexRelease(pGMM->Mtx);
2366 return rc;
2367 }
2368
2369 /* Unlink the new chunk from the free list. */
2370 gmmR0UnlinkChunk(pChunk);
2371
2372 /* Allocate all pages. */
2373 gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
2374 /* Return the first page as we'll use the whole chunk as one big page. */
2375 *pIdPage = PageDesc.idPage;
2376 *pHCPhys = PageDesc.HCPhysGCPhys;
2377
2378 for (unsigned i = 1; i < cPages; i++)
2379 gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
2380
2381 /* Update accounting. */
2382 pGVM->gmm.s.Allocated.cBasePages += cPages;
2383 pGVM->gmm.s.cPrivatePages += cPages;
2384 pGMM->cAllocatedPages += cPages;
2385
2386 gmmR0LinkChunk(pChunk, &pGMM->Private);
2387 }
2388 else
2389 rc = VERR_INTERNAL_ERROR_5;
2390
2391 RTSemFastMutexRelease(pGMM->Mtx);
2392 LogFlow(("GMMR0AllocateLargePage: returns %Rrc\n", rc));
2393 return rc;
2394}
2395
2396
2397/**
2398 * Free a large page
2399 *
2400 * @returns VBox status code:
2401 * @param pVM Pointer to the shared VM structure.
2402 * @param idCpu VCPU id
2403 * @param idPage Large page id
2404 */
2405GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage)
2406{
2407 LogFlow(("GMMR0FreeLargePage: pVM=%p idPage=%x\n", pVM, idPage));
2408
2409 /*
2410 * Validate, get basics and take the semaphore.
2411 */
2412 PGMM pGMM;
2413 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2414 PGVM pGVM;
2415 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2416 if (RT_FAILURE(rc))
2417 return rc;
2418
2419 /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
2420 if (pGMM->fLegacyAllocationMode)
2421 return VERR_NOT_SUPPORTED;
2422
2423 rc = RTSemFastMutexRequest(pGMM->Mtx);
2424 AssertRC(rc);
2425 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2426 {
2427 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
2428
2429 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
2430 {
2431 Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
2432 RTSemFastMutexRelease(pGMM->Mtx);
2433 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2434 }
2435
2436 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
2437 if ( RT_LIKELY(pPage)
2438 && RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2439 {
2440 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2441 Assert(pChunk);
2442 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2443 Assert(pChunk->cPrivate > 0);
2444
2445 /* Release the memory immediately. */
2446 gmmR0FreeChunk(pGMM, NULL, pChunk);
2447
2448 /* Update accounting. */
2449 pGVM->gmm.s.Allocated.cBasePages -= cPages;
2450 pGVM->gmm.s.cPrivatePages -= cPages;
2451 pGMM->cAllocatedPages -= cPages;
2452 }
2453 else
2454 rc = VERR_GMM_PAGE_NOT_FOUND;
2455 }
2456 else
2457 rc = VERR_INTERNAL_ERROR_5;
2458
2459 RTSemFastMutexRelease(pGMM->Mtx);
2460 LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc));
2461 return rc;
2462}
2463
2464
2465/**
2466 * VMMR0 request wrapper for GMMR0FreeLargePage.
2467 *
2468 * @returns see GMMR0FreeLargePage.
2469 * @param pVM Pointer to the shared VM structure.
2470 * @param idCpu VCPU id
2471 * @param pReq The request packet.
2472 */
2473GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
2474{
2475 /*
2476 * Validate input and pass it on.
2477 */
2478 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2479 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2480 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMFREEPAGESREQ),
2481 ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(GMMFREEPAGESREQ)),
2482 VERR_INVALID_PARAMETER);
2483
2484 return GMMR0FreeLargePage(pVM, idCpu, pReq->idPage);
2485}
2486
2487/**
2488 * Frees a chunk, giving it back to the host OS.
2489 *
2490 * @param pGMM Pointer to the GMM instance.
2491 * @param pGVM This is set when called from GMMR0CleanupVM so we can
2492 * unmap and free the chunk in one go.
2493 * @param pChunk The chunk to free.
2494 */
2495static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
2496{
2497 Assert(pChunk->Core.Key != NIL_GMM_CHUNKID);
2498
2499 /*
2500 * Cleanup hack! Unmap the chunk from the callers address space.
2501 */
2502 if ( pChunk->cMappings
2503 && pGVM)
2504 gmmR0UnmapChunk(pGMM, pGVM, pChunk);
2505
2506 /*
2507 * If there are current mappings of the chunk, then request the
2508 * VMs to unmap them. Reposition the chunk in the free list so
2509 * it won't be a likely candidate for allocations.
2510 */
2511 if (pChunk->cMappings)
2512 {
2513 /** @todo R0 -> VM request */
2514 /* The chunk can be owned by more than one VM if fBoundMemoryMode is false! */
2515 Log(("gmmR0FreeChunk: chunk still has %d mappings; don't free!\n", pChunk->cMappings));
2516 }
2517 else
2518 {
2519 /*
2520 * Try free the memory object.
2521 */
2522 int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);
2523 if (RT_SUCCESS(rc))
2524 {
2525 pChunk->MemObj = NIL_RTR0MEMOBJ;
2526
2527 /*
2528 * Unlink it from everywhere.
2529 */
2530 gmmR0UnlinkChunk(pChunk);
2531
2532 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
2533 Assert(pCore == &pChunk->Core); NOREF(pCore);
2534
2535 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
2536 if (pTlbe->pChunk == pChunk)
2537 {
2538 pTlbe->idChunk = NIL_GMM_CHUNKID;
2539 pTlbe->pChunk = NULL;
2540 }
2541
2542 Assert(pGMM->cChunks > 0);
2543 pGMM->cChunks--;
2544
2545 /*
2546 * Free the Chunk ID and struct.
2547 */
2548 gmmR0FreeChunkId(pGMM, pChunk->Core.Key);
2549 pChunk->Core.Key = NIL_GMM_CHUNKID;
2550
2551 RTMemFree(pChunk->paMappings);
2552 pChunk->paMappings = NULL;
2553
2554 RTMemFree(pChunk);
2555 }
2556 else
2557 AssertRC(rc);
2558 }
2559}
2560
2561
2562/**
2563 * Free page worker.
2564 *
2565 * The caller does all the statistic decrementing, we do all the incrementing.
2566 *
2567 * @param pGMM Pointer to the GMM instance data.
2568 * @param pChunk Pointer to the chunk this page belongs to.
2569 * @param idPage The Page ID.
2570 * @param pPage Pointer to the page.
2571 */
2572static void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, uint32_t idPage, PGMMPAGE pPage)
2573{
2574 Log3(("F pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x\n",
2575 pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
2576
2577 /*
2578 * Put the page on the free list.
2579 */
2580 pPage->u = 0;
2581 pPage->Free.u2State = GMM_PAGE_STATE_FREE;
2582 Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX);
2583 pPage->Free.iNext = pChunk->iFreeHead;
2584 pChunk->iFreeHead = pPage - &pChunk->aPages[0];
2585
2586 /*
2587 * Update statistics (the cShared/cPrivate stats are up to date already),
2588 * and relink the chunk if necessary.
2589 */
2590 if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0)
2591 {
2592 gmmR0UnlinkChunk(pChunk);
2593 pChunk->cFree++;
2594 gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private);
2595 }
2596 else
2597 {
2598 pChunk->cFree++;
2599 pChunk->pSet->cFreePages++;
2600
2601 /*
2602 * If the chunk becomes empty, consider giving memory back to the host OS.
2603 *
2604 * The current strategy is to try give it back if there are other chunks
2605 * in this free list, meaning if there are at least 240 free pages in this
2606 * category. Note that since there are probably mappings of the chunk,
2607 * it won't be freed up instantly, which probably screws up this logic
2608 * a bit...
2609 */
2610 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES
2611 && pChunk->pFreeNext
2612 && pChunk->pFreePrev
2613 && !pGMM->fLegacyAllocationMode))
2614 gmmR0FreeChunk(pGMM, NULL, pChunk);
2615 }
2616}
2617
2618
2619/**
2620 * Frees a shared page, the page is known to exist and be valid and such.
2621 *
2622 * @param pGMM Pointer to the GMM instance.
2623 * @param idPage The Page ID
2624 * @param pPage The page structure.
2625 */
2626DECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
2627{
2628 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2629 Assert(pChunk);
2630 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2631 Assert(pChunk->cShared > 0);
2632 Assert(pGMM->cSharedPages > 0);
2633 Assert(pGMM->cAllocatedPages > 0);
2634 Assert(!pPage->Shared.cRefs);
2635
2636 pChunk->cShared--;
2637 pGMM->cAllocatedPages--;
2638 pGMM->cSharedPages--;
2639 gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
2640}
2641
2642#ifdef VBOX_WITH_PAGE_SHARING
2643/**
2644 * Converts a private page to a shared page, the page is known to exist and be valid and such.
2645 *
2646 * @param pGMM Pointer to the GMM instance.
2647 * @param pGVM Pointer to the GVM instance.
2648 * @param HCPhys Host physical address
2649 * @param idPage The Page ID
2650 * @param pPage The page structure.
2651 */
2652DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
2653{
2654 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2655 Assert(pChunk);
2656 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2657 Assert(GMM_PAGE_IS_PRIVATE(pPage));
2658
2659 pChunk->cPrivate--;
2660 pChunk->cShared++;
2661
2662 pGMM->cSharedPages++;
2663
2664 pGVM->gmm.s.cSharedPages++;
2665 pGVM->gmm.s.cPrivatePages--;
2666
2667 /* Modify the page structure. */
2668 pPage->Shared.pfn = (uint32_t)(uint64_t)(HCPhys >> PAGE_SHIFT);
2669 pPage->Shared.cRefs = 1;
2670 pPage->Common.u2State = GMM_PAGE_STATE_SHARED;
2671}
2672
2673/**
2674 * Increase the use count of a shared page, the page is known to exist and be valid and such.
2675 *
2676 * @param pGMM Pointer to the GMM instance.
2677 * @param pGVM Pointer to the GVM instance.
2678 * @param pPage The page structure.
2679 */
2680DECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
2681{
2682 Assert(pGMM->cSharedPages > 0);
2683 Assert(pGMM->cAllocatedPages > 0);
2684
2685 pGMM->cDuplicatePages++;
2686
2687 pPage->Shared.cRefs++;
2688 pGVM->gmm.s.cSharedPages++;
2689 pGVM->gmm.s.Allocated.cBasePages++;
2690}
2691#endif
2692
2693/**
2694 * Frees a private page, the page is known to exist and be valid and such.
2695 *
2696 * @param pGMM Pointer to the GMM instance.
2697 * @param idPage The Page ID
2698 * @param pPage The page structure.
2699 */
2700DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
2701{
2702 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2703 Assert(pChunk);
2704 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2705 Assert(pChunk->cPrivate > 0);
2706 Assert(pGMM->cAllocatedPages > 0);
2707
2708 pChunk->cPrivate--;
2709 pGMM->cAllocatedPages--;
2710 gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
2711}
2712
2713/**
2714 * Common worker for GMMR0FreePages and GMMR0BalloonedPages.
2715 *
2716 * @returns VBox status code:
2717 * @retval xxx
2718 *
2719 * @param pGMM Pointer to the GMM instance data.
2720 * @param pGVM Pointer to the shared VM structure.
2721 * @param cPages The number of pages to free.
2722 * @param paPages Pointer to the page descriptors.
2723 * @param enmAccount The account this relates to.
2724 */
2725static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
2726{
2727 /*
2728 * Check that the request isn't impossible wrt to the account status.
2729 */
2730 switch (enmAccount)
2731 {
2732 case GMMACCOUNT_BASE:
2733 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
2734 {
2735 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
2736 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2737 }
2738 break;
2739 case GMMACCOUNT_SHADOW:
2740 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages < cPages))
2741 {
2742 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cShadowPages, cPages));
2743 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2744 }
2745 break;
2746 case GMMACCOUNT_FIXED:
2747 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages < cPages))
2748 {
2749 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cFixedPages, cPages));
2750 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2751 }
2752 break;
2753 default:
2754 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
2755 }
2756
2757 /*
2758 * Walk the descriptors and free the pages.
2759 *
2760 * Statistics (except the account) are being updated as we go along,
2761 * unlike the alloc code. Also, stop on the first error.
2762 */
2763 int rc = VINF_SUCCESS;
2764 uint32_t iPage;
2765 for (iPage = 0; iPage < cPages; iPage++)
2766 {
2767 uint32_t idPage = paPages[iPage].idPage;
2768 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
2769 if (RT_LIKELY(pPage))
2770 {
2771 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2772 {
2773 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
2774 {
2775 Assert(pGVM->gmm.s.cPrivatePages);
2776 pGVM->gmm.s.cPrivatePages--;
2777 gmmR0FreePrivatePage(pGMM, idPage, pPage);
2778 }
2779 else
2780 {
2781 Log(("gmmR0AllocatePages: #%#x/%#x: not owner! hGVM=%#x hSelf=%#x\n", iPage, idPage,
2782 pPage->Private.hGVM, pGVM->hSelf));
2783 rc = VERR_GMM_NOT_PAGE_OWNER;
2784 break;
2785 }
2786 }
2787 else if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
2788 {
2789 Assert(pGVM->gmm.s.cSharedPages);
2790 pGVM->gmm.s.cSharedPages--;
2791 Assert(pPage->Shared.cRefs);
2792 if (!--pPage->Shared.cRefs)
2793 {
2794 gmmR0FreeSharedPage(pGMM, idPage, pPage);
2795 }
2796 else
2797 {
2798 Assert(pGMM->cDuplicatePages);
2799 pGMM->cDuplicatePages--;
2800 }
2801 }
2802 else
2803 {
2804 Log(("gmmR0AllocatePages: #%#x/%#x: already free!\n", iPage, idPage));
2805 rc = VERR_GMM_PAGE_ALREADY_FREE;
2806 break;
2807 }
2808 }
2809 else
2810 {
2811 Log(("gmmR0AllocatePages: #%#x/%#x: not found!\n", iPage, idPage));
2812 rc = VERR_GMM_PAGE_NOT_FOUND;
2813 break;
2814 }
2815 paPages[iPage].idPage = NIL_GMM_PAGEID;
2816 }
2817
2818 /*
2819 * Update the account.
2820 */
2821 switch (enmAccount)
2822 {
2823 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= iPage; break;
2824 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= iPage; break;
2825 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= iPage; break;
2826 default:
2827 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
2828 }
2829
2830 /*
2831 * Any threshold stuff to be done here?
2832 */
2833
2834 return rc;
2835}
2836
2837
2838/**
2839 * Free one or more pages.
2840 *
2841 * This is typically used at reset time or power off.
2842 *
2843 * @returns VBox status code:
2844 * @retval xxx
2845 *
2846 * @param pVM Pointer to the shared VM structure.
2847 * @param idCpu VCPU id
2848 * @param cPages The number of pages to allocate.
2849 * @param paPages Pointer to the page descriptors containing the Page IDs for each page.
2850 * @param enmAccount The account this relates to.
2851 * @thread EMT.
2852 */
2853GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
2854{
2855 LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
2856
2857 /*
2858 * Validate input and get the basics.
2859 */
2860 PGMM pGMM;
2861 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2862 PGVM pGVM;
2863 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2864 if (RT_FAILURE(rc))
2865 return rc;
2866
2867 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2868 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
2869 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
2870
2871 for (unsigned iPage = 0; iPage < cPages; iPage++)
2872 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2873 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
2874 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2875
2876 /*
2877 * Take the semaphore and call the worker function.
2878 */
2879 rc = RTSemFastMutexRequest(pGMM->Mtx);
2880 AssertRC(rc);
2881 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2882 {
2883 rc = gmmR0FreePages(pGMM, pGVM, cPages, paPages, enmAccount);
2884 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2885 }
2886 else
2887 rc = VERR_INTERNAL_ERROR_5;
2888 RTSemFastMutexRelease(pGMM->Mtx);
2889 LogFlow(("GMMR0FreePages: returns %Rrc\n", rc));
2890 return rc;
2891}
2892
2893
2894/**
2895 * VMMR0 request wrapper for GMMR0FreePages.
2896 *
2897 * @returns see GMMR0FreePages.
2898 * @param pVM Pointer to the shared VM structure.
2899 * @param idCpu VCPU id
2900 * @param pReq The request packet.
2901 */
2902GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
2903{
2904 /*
2905 * Validate input and pass it on.
2906 */
2907 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2908 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2909 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]),
2910 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])),
2911 VERR_INVALID_PARAMETER);
2912 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]),
2913 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])),
2914 VERR_INVALID_PARAMETER);
2915
2916 return GMMR0FreePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
2917}
2918
2919
2920/**
2921 * Report back on a memory ballooning request.
2922 *
2923 * The request may or may not have been initiated by the GMM. If it was initiated
2924 * by the GMM it is important that this function is called even if no pages were
2925 * ballooned.
2926 *
2927 * @returns VBox status code:
2928 * @retval VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH
2929 * @retval VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH
2930 * @retval VERR_GMM_OVERCOMMITED_TRY_AGAIN_IN_A_BIT - reset condition
2931 * indicating that we won't necessarily have sufficient RAM to boot
2932 * the VM again and that it should pause until this changes (we'll try
2933 * balloon some other VM). (For standard deflate we have little choice
2934 * but to hope the VM won't use the memory that was returned to it.)
2935 *
2936 * @param pVM Pointer to the shared VM structure.
2937 * @param idCpu VCPU id
2938 * @param enmAction Inflate/deflate/reset
2939 * @param cBalloonedPages The number of pages that was ballooned.
2940 *
2941 * @thread EMT.
2942 */
2943GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
2944{
2945 LogFlow(("GMMR0BalloonedPages: pVM=%p enmAction=%d cBalloonedPages=%#x\n",
2946 pVM, enmAction, cBalloonedPages));
2947
2948 AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
2949
2950 /*
2951 * Validate input and get the basics.
2952 */
2953 PGMM pGMM;
2954 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2955 PGVM pGVM;
2956 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2957 if (RT_FAILURE(rc))
2958 return rc;
2959
2960 /*
2961 * Take the sempahore and do some more validations.
2962 */
2963 rc = RTSemFastMutexRequest(pGMM->Mtx);
2964 AssertRC(rc);
2965 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2966 {
2967 switch (enmAction)
2968 {
2969 case GMMBALLOONACTION_INFLATE:
2970 {
2971 if (RT_LIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cBalloonedPages <= pGVM->gmm.s.Reserved.cBasePages))
2972 {
2973 /*
2974 * Record the ballooned memory.
2975 */
2976 pGMM->cBalloonedPages += cBalloonedPages;
2977 if (pGVM->gmm.s.cReqBalloonedPages)
2978 {
2979 /* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
2980 AssertFailed();
2981
2982 pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
2983 pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages;
2984 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
2985 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
2986 }
2987 else
2988 {
2989 pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
2990 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n",
2991 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
2992 }
2993 }
2994 else
2995 rc = VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2996 break;
2997 }
2998
2999 case GMMBALLOONACTION_DEFLATE:
3000 {
3001 /* Deflate. */
3002 if (pGVM->gmm.s.cBalloonedPages >= cBalloonedPages)
3003 {
3004 /*
3005 * Record the ballooned memory.
3006 */
3007 Assert(pGMM->cBalloonedPages >= cBalloonedPages);
3008 pGMM->cBalloonedPages -= cBalloonedPages;
3009 pGVM->gmm.s.cBalloonedPages -= cBalloonedPages;
3010 if (pGVM->gmm.s.cReqDeflatePages)
3011 {
3012 AssertFailed(); /* This is path is for later. */
3013 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n",
3014 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
3015
3016 /*
3017 * Anything we need to do here now when the request has been completed?
3018 */
3019 pGVM->gmm.s.cReqDeflatePages = 0;
3020 }
3021 else
3022 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx (user)\n",
3023 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
3024 }
3025 else
3026 rc = VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH;
3027 break;
3028 }
3029
3030 case GMMBALLOONACTION_RESET:
3031 {
3032 /* Reset to an empty balloon. */
3033 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
3034
3035 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
3036 pGVM->gmm.s.cBalloonedPages = 0;
3037 break;
3038 }
3039
3040 default:
3041 rc = VERR_INVALID_PARAMETER;
3042 break;
3043 }
3044 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3045 }
3046 else
3047 rc = VERR_INTERNAL_ERROR_5;
3048
3049 RTSemFastMutexRelease(pGMM->Mtx);
3050 LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc));
3051 return rc;
3052}
3053
3054
3055/**
3056 * VMMR0 request wrapper for GMMR0BalloonedPages.
3057 *
3058 * @returns see GMMR0BalloonedPages.
3059 * @param pVM Pointer to the shared VM structure.
3060 * @param idCpu VCPU id
3061 * @param pReq The request packet.
3062 */
3063GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
3064{
3065 /*
3066 * Validate input and pass it on.
3067 */
3068 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3069 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3070 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMBALLOONEDPAGESREQ),
3071 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMBALLOONEDPAGESREQ)),
3072 VERR_INVALID_PARAMETER);
3073
3074 return GMMR0BalloonedPages(pVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
3075}
3076
3077/**
3078 * Return memory statistics for the hypervisor
3079 *
3080 * @returns VBox status code:
3081 * @param pVM Pointer to the shared VM structure.
3082 * @param pReq The request packet.
3083 */
3084GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq)
3085{
3086 /*
3087 * Validate input and pass it on.
3088 */
3089 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3090 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3091 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
3092 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
3093 VERR_INVALID_PARAMETER);
3094
3095 /*
3096 * Validate input and get the basics.
3097 */
3098 PGMM pGMM;
3099 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3100 pReq->cAllocPages = pGMM->cAllocatedPages;
3101 pReq->cFreePages = (pGMM->cChunks << (GMM_CHUNK_SHIFT- PAGE_SHIFT)) - pGMM->cAllocatedPages;
3102 pReq->cBalloonedPages = pGMM->cBalloonedPages;
3103 pReq->cMaxPages = pGMM->cMaxPages;
3104 pReq->cSharedPages = pGMM->cDuplicatePages;
3105 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3106
3107 return VINF_SUCCESS;
3108}
3109
3110/**
3111 * Return memory statistics for the VM
3112 *
3113 * @returns VBox status code:
3114 * @param pVM Pointer to the shared VM structure.
3115 * @parma idCpu Cpu id.
3116 * @param pReq The request packet.
3117 */
3118GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
3119{
3120 /*
3121 * Validate input and pass it on.
3122 */
3123 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3124 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3125 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
3126 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
3127 VERR_INVALID_PARAMETER);
3128
3129 /*
3130 * Validate input and get the basics.
3131 */
3132 PGMM pGMM;
3133 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3134 PGVM pGVM;
3135 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3136 if (RT_FAILURE(rc))
3137 return rc;
3138
3139 /*
3140 * Take the sempahore and do some more validations.
3141 */
3142 rc = RTSemFastMutexRequest(pGMM->Mtx);
3143 AssertRC(rc);
3144 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3145 {
3146 pReq->cAllocPages = pGVM->gmm.s.Allocated.cBasePages;
3147 pReq->cBalloonedPages = pGVM->gmm.s.cBalloonedPages;
3148 pReq->cMaxPages = pGVM->gmm.s.Reserved.cBasePages;
3149 pReq->cFreePages = pReq->cMaxPages - pReq->cAllocPages;
3150 }
3151 else
3152 rc = VERR_INTERNAL_ERROR_5;
3153
3154 RTSemFastMutexRelease(pGMM->Mtx);
3155 LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc));
3156 return rc;
3157}
3158
3159/**
3160 * Unmaps a chunk previously mapped into the address space of the current process.
3161 *
3162 * @returns VBox status code.
3163 * @param pGMM Pointer to the GMM instance data.
3164 * @param pGVM Pointer to the Global VM structure.
3165 * @param pChunk Pointer to the chunk to be unmapped.
3166 */
3167static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
3168{
3169 if (!pGMM->fLegacyAllocationMode)
3170 {
3171 /*
3172 * Find the mapping and try unmapping it.
3173 */
3174 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3175 {
3176 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3177 if (pChunk->paMappings[i].pGVM == pGVM)
3178 {
3179 /* unmap */
3180 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
3181 if (RT_SUCCESS(rc))
3182 {
3183 /* update the record. */
3184 pChunk->cMappings--;
3185 if (i < pChunk->cMappings)
3186 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
3187 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
3188 pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
3189 }
3190 return rc;
3191 }
3192 }
3193 }
3194 else if (pChunk->hGVM == pGVM->hSelf)
3195 return VINF_SUCCESS;
3196
3197 Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
3198 return VERR_GMM_CHUNK_NOT_MAPPED;
3199}
3200
3201
3202/**
3203 * Maps a chunk into the user address space of the current process.
3204 *
3205 * @returns VBox status code.
3206 * @param pGMM Pointer to the GMM instance data.
3207 * @param pGVM Pointer to the Global VM structure.
3208 * @param pChunk Pointer to the chunk to be mapped.
3209 * @param ppvR3 Where to store the ring-3 address of the mapping.
3210 * In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
3211 * contain the address of the existing mapping.
3212 */
3213static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
3214{
3215 /*
3216 * If we're in legacy mode this is simple.
3217 */
3218 if (pGMM->fLegacyAllocationMode)
3219 {
3220 if (pChunk->hGVM != pGVM->hSelf)
3221 {
3222 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
3223 return VERR_GMM_CHUNK_NOT_FOUND;
3224 }
3225
3226 *ppvR3 = RTR0MemObjAddressR3(pChunk->MemObj);
3227 return VINF_SUCCESS;
3228 }
3229
3230 /*
3231 * Check to see if the chunk is already mapped.
3232 */
3233 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3234 {
3235 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3236 if (pChunk->paMappings[i].pGVM == pGVM)
3237 {
3238 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
3239 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
3240#ifdef VBOX_WITH_PAGE_SHARING
3241 /* The ring-3 chunk cache can be out of sync; don't fail. */
3242 return VINF_SUCCESS;
3243#else
3244 return VERR_GMM_CHUNK_ALREADY_MAPPED;
3245#endif
3246 }
3247 }
3248
3249 /*
3250 * Do the mapping.
3251 */
3252 RTR0MEMOBJ MapObj;
3253 int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3254 if (RT_SUCCESS(rc))
3255 {
3256 /* reallocate the array? */
3257 if ((pChunk->cMappings & 1 /*7*/) == 0)
3258 {
3259 void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
3260 if (RT_UNLIKELY(!pvMappings))
3261 {
3262 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */);
3263 AssertRC(rc);
3264 return VERR_NO_MEMORY;
3265 }
3266 pChunk->paMappings = (PGMMCHUNKMAP)pvMappings;
3267 }
3268
3269 /* insert new entry */
3270 pChunk->paMappings[pChunk->cMappings].MapObj = MapObj;
3271 pChunk->paMappings[pChunk->cMappings].pGVM = pGVM;
3272 pChunk->cMappings++;
3273
3274 *ppvR3 = RTR0MemObjAddressR3(MapObj);
3275 }
3276
3277 return rc;
3278}
3279
3280/**
3281 * Check if a chunk is mapped into the specified VM
3282 *
3283 * @returns mapped yes/no
3284 * @param pGVM Pointer to the Global VM structure.
3285 * @param pChunk Pointer to the chunk to be mapped.
3286 * @param ppvR3 Where to store the ring-3 address of the mapping.
3287 */
3288static int gmmR0IsChunkMapped(PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
3289{
3290 /*
3291 * Check to see if the chunk is already mapped.
3292 */
3293 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3294 {
3295 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3296 if (pChunk->paMappings[i].pGVM == pGVM)
3297 {
3298 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
3299 return true;
3300 }
3301 }
3302 *ppvR3 = NULL;
3303 return false;
3304}
3305
3306/**
3307 * Map a chunk and/or unmap another chunk.
3308 *
3309 * The mapping and unmapping applies to the current process.
3310 *
3311 * This API does two things because it saves a kernel call per mapping when
3312 * when the ring-3 mapping cache is full.
3313 *
3314 * @returns VBox status code.
3315 * @param pVM The VM.
3316 * @param idChunkMap The chunk to map. NIL_GMM_CHUNKID if nothing to map.
3317 * @param idChunkUnmap The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap.
3318 * @param ppvR3 Where to store the address of the mapped chunk. NULL is ok if nothing to map.
3319 * @thread EMT
3320 */
3321GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
3322{
3323 LogFlow(("GMMR0MapUnmapChunk: pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
3324 pVM, idChunkMap, idChunkUnmap, ppvR3));
3325
3326 /*
3327 * Validate input and get the basics.
3328 */
3329 PGMM pGMM;
3330 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3331 PGVM pGVM;
3332 int rc = GVMMR0ByVM(pVM, &pGVM);
3333 if (RT_FAILURE(rc))
3334 return rc;
3335
3336 AssertCompile(NIL_GMM_CHUNKID == 0);
3337 AssertMsgReturn(idChunkMap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkMap), VERR_INVALID_PARAMETER);
3338 AssertMsgReturn(idChunkUnmap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkUnmap), VERR_INVALID_PARAMETER);
3339
3340 if ( idChunkMap == NIL_GMM_CHUNKID
3341 && idChunkUnmap == NIL_GMM_CHUNKID)
3342 return VERR_INVALID_PARAMETER;
3343
3344 if (idChunkMap != NIL_GMM_CHUNKID)
3345 {
3346 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3347 *ppvR3 = NIL_RTR3PTR;
3348 }
3349
3350 /*
3351 * Take the semaphore and do the work.
3352 *
3353 * The unmapping is done last since it's easier to undo a mapping than
3354 * undoing an unmapping. The ring-3 mapping cache cannot not be so big
3355 * that it pushes the user virtual address space to within a chunk of
3356 * it it's limits, so, no problem here.
3357 */
3358 rc = RTSemFastMutexRequest(pGMM->Mtx);
3359 AssertRC(rc);
3360 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3361 {
3362 PGMMCHUNK pMap = NULL;
3363 if (idChunkMap != NIL_GVM_HANDLE)
3364 {
3365 pMap = gmmR0GetChunk(pGMM, idChunkMap);
3366 if (RT_LIKELY(pMap))
3367 rc = gmmR0MapChunk(pGMM, pGVM, pMap, ppvR3);
3368 else
3369 {
3370 Log(("GMMR0MapUnmapChunk: idChunkMap=%#x\n", idChunkMap));
3371 rc = VERR_GMM_CHUNK_NOT_FOUND;
3372 }
3373 }
3374
3375 if ( idChunkUnmap != NIL_GMM_CHUNKID
3376 && RT_SUCCESS(rc))
3377 {
3378 PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap);
3379 if (RT_LIKELY(pUnmap))
3380 rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap);
3381 else
3382 {
3383 Log(("GMMR0MapUnmapChunk: idChunkUnmap=%#x\n", idChunkUnmap));
3384 rc = VERR_GMM_CHUNK_NOT_FOUND;
3385 }
3386
3387 if (RT_FAILURE(rc) && pMap)
3388 gmmR0UnmapChunk(pGMM, pGVM, pMap);
3389 }
3390
3391 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3392 }
3393 else
3394 rc = VERR_INTERNAL_ERROR_5;
3395 RTSemFastMutexRelease(pGMM->Mtx);
3396
3397 LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc));
3398 return rc;
3399}
3400
3401
3402/**
3403 * VMMR0 request wrapper for GMMR0MapUnmapChunk.
3404 *
3405 * @returns see GMMR0MapUnmapChunk.
3406 * @param pVM Pointer to the shared VM structure.
3407 * @param pReq The request packet.
3408 */
3409GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq)
3410{
3411 /*
3412 * Validate input and pass it on.
3413 */
3414 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3415 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3416 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3417
3418 return GMMR0MapUnmapChunk(pVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
3419}
3420
3421
3422/**
3423 * Legacy mode API for supplying pages.
3424 *
3425 * The specified user address points to a allocation chunk sized block that
3426 * will be locked down and used by the GMM when the GM asks for pages.
3427 *
3428 * @returns VBox status code.
3429 * @param pVM The VM.
3430 * @param idCpu VCPU id
3431 * @param pvR3 Pointer to the chunk size memory block to lock down.
3432 */
3433GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3)
3434{
3435 /*
3436 * Validate input and get the basics.
3437 */
3438 PGMM pGMM;
3439 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3440 PGVM pGVM;
3441 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3442 if (RT_FAILURE(rc))
3443 return rc;
3444
3445 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
3446 AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);
3447
3448 if (!pGMM->fLegacyAllocationMode)
3449 {
3450 Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));
3451 return VERR_NOT_SUPPORTED;
3452 }
3453
3454 /*
3455 * Lock the memory before taking the semaphore.
3456 */
3457 RTR0MEMOBJ MemObj;
3458 rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3459 if (RT_SUCCESS(rc))
3460 {
3461 /* Grab the lock. */
3462 rc = RTSemFastMutexRequest(pGMM->Mtx);
3463 AssertRC(rc);
3464 if (RT_SUCCESS(rc))
3465 {
3466 /*
3467 * Add a new chunk with our hGVM.
3468 */
3469 rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
3470 RTSemFastMutexRelease(pGMM->Mtx);
3471 }
3472
3473 if (RT_FAILURE(rc))
3474 RTR0MemObjFree(MemObj, false /* fFreeMappings */);
3475 }
3476
3477 LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));
3478 return rc;
3479}
3480
3481typedef struct
3482{
3483 PAVLGCPTRNODECORE pNode;
3484 char *pszModuleName;
3485 char *pszVersion;
3486 VBOXOSFAMILY enmGuestOS;
3487} GMMFINDMODULEBYNAME, *PGMMFINDMODULEBYNAME;
3488
3489/**
3490 * Tree enumeration callback for finding identical modules by name and version
3491 */
3492DECLCALLBACK(int) gmmR0CheckForIdenticalModule(PAVLGCPTRNODECORE pNode, void *pvUser)
3493{
3494 PGMMFINDMODULEBYNAME pInfo = (PGMMFINDMODULEBYNAME)pvUser;
3495 PGMMSHAREDMODULE pModule = (PGMMSHAREDMODULE)pNode;
3496
3497 if ( pInfo
3498 && pInfo->enmGuestOS == pModule->enmGuestOS
3499 /** @todo replace with RTStrNCmp */
3500 && !strcmp(pModule->szName, pInfo->pszModuleName)
3501 && !strcmp(pModule->szVersion, pInfo->pszVersion))
3502 {
3503 pInfo->pNode = pNode;
3504 return 1; /* stop search */
3505 }
3506 return 0;
3507}
3508
3509
3510/**
3511 * Registers a new shared module for the VM
3512 *
3513 * @returns VBox status code.
3514 * @param pVM VM handle
3515 * @param idCpu VCPU id
3516 * @param enmGuestOS Guest OS type
3517 * @param pszModuleName Module name
3518 * @param pszVersion Module version
3519 * @param GCBaseAddr Module base address
3520 * @param cbModule Module size
3521 * @param cRegions Number of shared region descriptors
3522 * @param pRegions Shared region(s)
3523 */
3524GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
3525 unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions)
3526{
3527#ifdef VBOX_WITH_PAGE_SHARING
3528 /*
3529 * Validate input and get the basics.
3530 */
3531 PGMM pGMM;
3532 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3533 PGVM pGVM;
3534 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3535 if (RT_FAILURE(rc))
3536 return rc;
3537
3538 Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
3539
3540 /*
3541 * Take the sempahore and do some more validations.
3542 */
3543 rc = RTSemFastMutexRequest(pGMM->Mtx);
3544 AssertRC(rc);
3545 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3546 {
3547 bool fNewModule = false;
3548
3549 /* Check if this module is already locally registered. */
3550 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3551 if (!pRecVM)
3552 {
3553 pRecVM = (PGMMSHAREDMODULEPERVM)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULEPERVM, aRegions[cRegions]));
3554 if (!pRecVM)
3555 {
3556 AssertFailed();
3557 rc = VERR_NO_MEMORY;
3558 goto end;
3559 }
3560 pRecVM->Core.Key = GCBaseAddr;
3561 pRecVM->cRegions = cRegions;
3562
3563 /* Save the region data as they can differ between VMs (address space scrambling or simply different loading order) */
3564 for (unsigned i = 0; i < cRegions; i++)
3565 {
3566 pRecVM->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
3567 pRecVM->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
3568 pRecVM->aRegions[i].u32Alignment = 0;
3569 pRecVM->aRegions[i].paHCPhysPageID = NULL; /* unused */
3570 }
3571
3572 bool ret = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core);
3573 Assert(ret);
3574
3575 Log(("GMMR0RegisterSharedModule: new local module %s\n", pszModuleName));
3576 fNewModule = true;
3577 }
3578 else
3579 rc = VINF_PGM_SHARED_MODULE_ALREADY_REGISTERED;
3580
3581 /* Check if this module is already globally registered. */
3582 PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
3583 if ( !pGlobalModule
3584 && enmGuestOS == VBOXOSFAMILY_Windows64)
3585 {
3586 /* Two identical copies of e.g. Win7 x64 will typically not have a similar virtual address space layout for dlls or kernel modules.
3587 * Try to find identical binaries based on name and version.
3588 */
3589 GMMFINDMODULEBYNAME Info;
3590
3591 Info.pNode = NULL;
3592 Info.pszVersion = pszVersion;
3593 Info.pszModuleName = pszModuleName;
3594 Info.enmGuestOS = enmGuestOS;
3595
3596 Log(("Try to find identical module %s\n", pszModuleName));
3597 int ret = RTAvlGCPtrDoWithAll(&pGMM->pGlobalSharedModuleTree, true /* fFromLeft */, gmmR0CheckForIdenticalModule, &Info);
3598 if (ret == 1)
3599 {
3600 Assert(Info.pNode);
3601 pGlobalModule = (PGMMSHAREDMODULE)Info.pNode;
3602 Log(("Found identical module at %RGv\n", pGlobalModule->Core.Key));
3603 }
3604 }
3605
3606 if (!pGlobalModule)
3607 {
3608 Assert(fNewModule);
3609 Assert(!pRecVM->fCollision);
3610
3611 pGlobalModule = (PGMMSHAREDMODULE)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULE, aRegions[cRegions]));
3612 if (!pGlobalModule)
3613 {
3614 AssertFailed();
3615 rc = VERR_NO_MEMORY;
3616 goto end;
3617 }
3618
3619 pGlobalModule->Core.Key = GCBaseAddr;
3620 pGlobalModule->cbModule = cbModule;
3621 /* Input limit already safe; no need to check again. */
3622 /** @todo replace with RTStrCopy */
3623 strcpy(pGlobalModule->szName, pszModuleName);
3624 strcpy(pGlobalModule->szVersion, pszVersion);
3625
3626 pGlobalModule->enmGuestOS = enmGuestOS;
3627 pGlobalModule->cRegions = cRegions;
3628
3629 for (unsigned i = 0; i < cRegions; i++)
3630 {
3631 Log(("New region %d base=%RGv size %x\n", i, pRegions[i].GCRegionAddr, pRegions[i].cbRegion));
3632 pGlobalModule->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
3633 pGlobalModule->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
3634 pGlobalModule->aRegions[i].u32Alignment = 0;
3635 pGlobalModule->aRegions[i].paHCPhysPageID = NULL; /* uninitialized. */
3636 }
3637
3638 /* Save reference. */
3639 pRecVM->pGlobalModule = pGlobalModule;
3640 pRecVM->fCollision = false;
3641 pGlobalModule->cUsers++;
3642 rc = VINF_SUCCESS;
3643
3644 bool ret = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core);
3645 Assert(ret);
3646
3647 Log(("GMMR0RegisterSharedModule: new global module %s\n", pszModuleName));
3648 }
3649 else
3650 {
3651 Assert(pGlobalModule->cUsers > 0);
3652
3653 /* Make sure the name and version are identical. */
3654 /** @todo replace with RTStrNCmp */
3655 if ( !strcmp(pGlobalModule->szName, pszModuleName)
3656 && !strcmp(pGlobalModule->szVersion, pszVersion))
3657 {
3658 /* Save reference. */
3659 pRecVM->pGlobalModule = pGlobalModule;
3660 if ( fNewModule
3661 || pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
3662 {
3663 pGlobalModule->cUsers++;
3664 Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
3665 }
3666 pRecVM->fCollision = false;
3667 rc = VINF_SUCCESS;
3668 }
3669 else
3670 {
3671 Log(("GMMR0RegisterSharedModule: module %s collision!\n", pszModuleName));
3672 pRecVM->fCollision = true;
3673 rc = VINF_PGM_SHARED_MODULE_COLLISION;
3674 goto end;
3675 }
3676 }
3677
3678 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3679 }
3680 else
3681 rc = VERR_INTERNAL_ERROR_5;
3682
3683end:
3684 RTSemFastMutexRelease(pGMM->Mtx);
3685 return rc;
3686#else
3687 return VERR_NOT_IMPLEMENTED;
3688#endif
3689}
3690
3691
3692/**
3693 * VMMR0 request wrapper for GMMR0RegisterSharedModule.
3694 *
3695 * @returns see GMMR0RegisterSharedModule.
3696 * @param pVM Pointer to the shared VM structure.
3697 * @param idCpu VCPU id
3698 * @param pReq The request packet.
3699 */
3700GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
3701{
3702 /*
3703 * Validate input and pass it on.
3704 */
3705 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3706 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3707 AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3708
3709 /* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */
3710 pReq->rc = GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
3711 return VINF_SUCCESS;
3712}
3713
3714/**
3715 * Unregisters a shared module for the VM
3716 *
3717 * @returns VBox status code.
3718 * @param pVM VM handle
3719 * @param idCpu VCPU id
3720 * @param pszModuleName Module name
3721 * @param pszVersion Module version
3722 * @param GCBaseAddr Module base address
3723 * @param cbModule Module size
3724 */
3725GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
3726{
3727#ifdef VBOX_WITH_PAGE_SHARING
3728 /*
3729 * Validate input and get the basics.
3730 */
3731 PGMM pGMM;
3732 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3733 PGVM pGVM;
3734 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3735 if (RT_FAILURE(rc))
3736 return rc;
3737
3738 Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
3739
3740 /*
3741 * Take the sempahore and do some more validations.
3742 */
3743 rc = RTSemFastMutexRequest(pGMM->Mtx);
3744 AssertRC(rc);
3745 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3746 {
3747 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3748 if (pRecVM)
3749 {
3750 /* Remove reference to global shared module. */
3751 if (!pRecVM->fCollision)
3752 {
3753 PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
3754 Assert(pRec);
3755
3756 if (pRec) /* paranoia */
3757 {
3758 Assert(pRec->cUsers);
3759 pRec->cUsers--;
3760 if (pRec->cUsers == 0)
3761 {
3762 /* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
3763 for (unsigned i = 0; i < pRec->cRegions; i++)
3764 if (pRec->aRegions[i].paHCPhysPageID)
3765 RTMemFree(pRec->aRegions[i].paHCPhysPageID);
3766
3767 Assert(pRec->Core.Key == GCBaseAddr || pRec->enmGuestOS == VBOXOSFAMILY_Windows64);
3768 Assert(pRec->cRegions == pRecVM->cRegions);
3769#ifdef VBOX_STRICT
3770 for (unsigned i = 0; i < pRecVM->cRegions; i++)
3771 {
3772 Assert(pRecVM->aRegions[i].GCRegionAddr == pRec->aRegions[i].GCRegionAddr);
3773 Assert(pRecVM->aRegions[i].cbRegion == pRec->aRegions[i].cbRegion);
3774 }
3775#endif
3776
3777 /* Remove from the tree and free memory. */
3778 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key);
3779 RTMemFree(pRec);
3780 }
3781 }
3782 else
3783 rc = VERR_PGM_SHARED_MODULE_REGISTRATION_INCONSISTENCY;
3784 }
3785 else
3786 Assert(!pRecVM->pGlobalModule);
3787
3788 /* Remove from the tree and free memory. */
3789 RTAvlGCPtrRemove(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3790 RTMemFree(pRecVM);
3791 }
3792 else
3793 rc = VERR_PGM_SHARED_MODULE_NOT_FOUND;
3794
3795 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3796 }
3797 else
3798 rc = VERR_INTERNAL_ERROR_5;
3799
3800 RTSemFastMutexRelease(pGMM->Mtx);
3801 return rc;
3802#else
3803 return VERR_NOT_IMPLEMENTED;
3804#endif
3805}
3806
3807/**
3808 * VMMR0 request wrapper for GMMR0UnregisterSharedModule.
3809 *
3810 * @returns see GMMR0UnregisterSharedModule.
3811 * @param pVM Pointer to the shared VM structure.
3812 * @param idCpu VCPU id
3813 * @param pReq The request packet.
3814 */
3815GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
3816{
3817 /*
3818 * Validate input and pass it on.
3819 */
3820 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3821 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3822 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3823
3824 return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
3825}
3826
3827
3828#ifdef VBOX_WITH_PAGE_SHARING
3829/**
3830 * Checks specified shared module range for changes
3831 *
3832 * Performs the following tasks:
3833 * - If a shared page is new, then it changes the GMM page type to shared and
3834 * returns it in the pPageDesc descriptor.
3835 * - If a shared page already exists, then it checks if the VM page is
3836 * identical and if so frees the VM page and returns the shared page in
3837 * pPageDesc descriptor.
3838 *
3839 * @remarks ASSUMES the caller has acquired the GMM semaphore!!
3840 *
3841 * @returns VBox status code.
3842 * @param pGMM Pointer to the GMM instance data.
3843 * @param pGVM Pointer to the GVM instance data.
3844 * @param pModule Module description
3845 * @param idxRegion Region index
3846 * @param idxPage Page index
3847 * @param paPageDesc Page descriptor
3848 */
3849GMMR0DECL(int) GMMR0SharedModuleCheckPage(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned idxPage,
3850 PGMMSHAREDPAGEDESC pPageDesc)
3851{
3852 int rc = VINF_SUCCESS;
3853 PGMM pGMM;
3854 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3855 unsigned cPages = pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT;
3856
3857 AssertReturn(idxRegion < pModule->cRegions, VERR_INVALID_PARAMETER);
3858 AssertReturn(idxPage < cPages, VERR_INVALID_PARAMETER);
3859
3860 LogFlow(("GMMR0SharedModuleCheckRange %s base %RGv region %d idxPage %d\n", pModule->szName, pModule->Core.Key, idxRegion, idxPage));
3861
3862 PGMMSHAREDREGIONDESC pGlobalRegion = &pModule->aRegions[idxRegion];
3863 if (!pGlobalRegion->paHCPhysPageID)
3864 {
3865 /* First time; create a page descriptor array. */
3866 Log(("Allocate page descriptor array for %d pages\n", cPages));
3867 pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
3868 if (!pGlobalRegion->paHCPhysPageID)
3869 {
3870 AssertFailed();
3871 rc = VERR_NO_MEMORY;
3872 goto end;
3873 }
3874 /* Invalidate all descriptors. */
3875 for (unsigned i = 0; i < cPages; i++)
3876 pGlobalRegion->paHCPhysPageID[i] = NIL_GMM_PAGEID;
3877 }
3878
3879 /* We've seen this shared page for the first time? */
3880 if (pGlobalRegion->paHCPhysPageID[idxPage] == NIL_GMM_PAGEID)
3881 {
3882new_shared_page:
3883 Log(("New shared page guest %RGp host %RHp\n", pPageDesc->GCPhys, pPageDesc->HCPhys));
3884
3885 /* Easy case: just change the internal page type. */
3886 PGMMPAGE pPage = gmmR0GetPage(pGMM, pPageDesc->uHCPhysPageId);
3887 if (!pPage)
3888 {
3889 Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #1 (GCPhys=%RGp HCPhys=%RHp idxRegion=%#x idxPage=%#x)\n",
3890 pPageDesc->uHCPhysPageId, pPageDesc->GCPhys, pPageDesc->HCPhys, idxRegion, idxPage));
3891 AssertFailed();
3892 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3893 goto end;
3894 }
3895
3896 AssertMsg(pPageDesc->GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", pPageDesc->HCPhys, (pPage->Private.pfn << 12)));
3897
3898 gmmR0ConvertToSharedPage(pGMM, pGVM, pPageDesc->HCPhys, pPageDesc->uHCPhysPageId, pPage);
3899
3900 /* Keep track of these references. */
3901 pGlobalRegion->paHCPhysPageID[idxPage] = pPageDesc->uHCPhysPageId;
3902 }
3903 else
3904 {
3905 uint8_t *pbLocalPage, *pbSharedPage;
3906 uint8_t *pbChunk;
3907 PGMMCHUNK pChunk;
3908
3909 Assert(pPageDesc->uHCPhysPageId != pGlobalRegion->paHCPhysPageID[idxPage]);
3910
3911 Log(("Replace existing page guest %RGp host %RHp id %x -> id %x\n", pPageDesc->GCPhys, pPageDesc->HCPhys, pPageDesc->uHCPhysPageId, pGlobalRegion->paHCPhysPageID[idxPage]));
3912
3913 /* Get the shared page source. */
3914 PGMMPAGE pPage = gmmR0GetPage(pGMM, pGlobalRegion->paHCPhysPageID[idxPage]);
3915 if (!pPage)
3916 {
3917 Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #2 (idxRegion=%#x idxPage=%#x)\n",
3918 pPageDesc->uHCPhysPageId, idxRegion, idxPage));
3919 AssertFailed();
3920 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3921 goto end;
3922 }
3923 if (pPage->Common.u2State != GMM_PAGE_STATE_SHARED)
3924 {
3925 /* Page was freed at some point; invalidate this entry. */
3926 /** @todo this isn't really bullet proof. */
3927 Log(("Old shared page was freed -> create a new one\n"));
3928 pGlobalRegion->paHCPhysPageID[idxPage] = NIL_GMM_PAGEID;
3929 goto new_shared_page; /* ugly goto */
3930 }
3931
3932 Log(("Replace existing page guest host %RHp -> %RHp\n", pPageDesc->HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
3933
3934 /* Calculate the virtual address of the local page. */
3935 pChunk = gmmR0GetChunk(pGMM, pPageDesc->uHCPhysPageId >> GMM_CHUNKID_SHIFT);
3936 if (pChunk)
3937 {
3938 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
3939 {
3940 Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #3\n", pPageDesc->uHCPhysPageId));
3941 AssertFailed();
3942 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3943 goto end;
3944 }
3945 pbLocalPage = pbChunk + ((pPageDesc->uHCPhysPageId & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
3946 }
3947 else
3948 {
3949 Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #4\n", pPageDesc->uHCPhysPageId));
3950 AssertFailed();
3951 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3952 goto end;
3953 }
3954
3955 /* Calculate the virtual address of the shared page. */
3956 pChunk = gmmR0GetChunk(pGMM, pGlobalRegion->paHCPhysPageID[idxPage] >> GMM_CHUNKID_SHIFT);
3957 Assert(pChunk); /* can't fail as gmmR0GetPage succeeded. */
3958
3959 /* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
3960 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
3961 {
3962 Log(("Map chunk into process!\n"));
3963 rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
3964 if (rc != VINF_SUCCESS)
3965 {
3966 AssertRC(rc);
3967 goto end;
3968 }
3969 }
3970 pbSharedPage = pbChunk + ((pGlobalRegion->paHCPhysPageID[idxPage] & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
3971
3972 /** @todo write ASMMemComparePage. */
3973 if (memcmp(pbSharedPage, pbLocalPage, PAGE_SIZE))
3974 {
3975 Log(("Unexpected differences found between local and shared page; skip\n"));
3976 /* Signal to the caller that this one hasn't changed. */
3977 pPageDesc->uHCPhysPageId = NIL_GMM_PAGEID;
3978 goto end;
3979 }
3980
3981 /* Free the old local page. */
3982 GMMFREEPAGEDESC PageDesc;
3983
3984 PageDesc.idPage = pPageDesc->uHCPhysPageId;
3985 rc = gmmR0FreePages(pGMM, pGVM, 1, &PageDesc, GMMACCOUNT_BASE);
3986 AssertRCReturn(rc, rc);
3987
3988 gmmR0UseSharedPage(pGMM, pGVM, pPage);
3989
3990 /* Pass along the new physical address & page id. */
3991 pPageDesc->HCPhys = ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT;
3992 pPageDesc->uHCPhysPageId = pGlobalRegion->paHCPhysPageID[idxPage];
3993 }
3994end:
3995 return rc;
3996}
3997
3998/**
3999 * RTAvlU32Destroy callback.
4000 *
4001 * @returns 0
4002 * @param pNode The node to destroy.
4003 * @param pvGVM The GVM handle.
4004 */
4005static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM)
4006{
4007 PGVM pGVM = (PGVM)pvGVM;
4008 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
4009 PGMM pGMM;
4010 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4011
4012 Assert(pRecVM->pGlobalModule || pRecVM->fCollision);
4013 if (pRecVM->pGlobalModule)
4014 {
4015 PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
4016 Assert(pRec);
4017 Assert(pRec->cUsers);
4018
4019 Log(("gmmR0CleanupSharedModule: %s %s cUsers=%d\n", pRec->szName, pRec->szVersion, pRec->cUsers));
4020 pRec->cUsers--;
4021 if (pRec->cUsers == 0)
4022 {
4023 for (unsigned i = 0; i < pRec->cRegions; i++)
4024 if (pRec->aRegions[i].paHCPhysPageID)
4025 RTMemFree(pRec->aRegions[i].paHCPhysPageID);
4026
4027 /* Remove from the tree and free memory. */
4028 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key);
4029 RTMemFree(pRec);
4030 }
4031 }
4032 RTMemFree(pRecVM);
4033 return 0;
4034}
4035#endif
4036
4037/**
4038 * Removes all shared modules for the specified VM
4039 *
4040 * @returns VBox status code.
4041 * @param pVM VM handle
4042 * @param idCpu VCPU id
4043 */
4044GMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu)
4045{
4046#ifdef VBOX_WITH_PAGE_SHARING
4047 /*
4048 * Validate input and get the basics.
4049 */
4050 PGMM pGMM;
4051 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4052 PGVM pGVM;
4053 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
4054 if (RT_FAILURE(rc))
4055 return rc;
4056
4057 /*
4058 * Take the sempahore and do some more validations.
4059 */
4060 rc = RTSemFastMutexRequest(pGMM->Mtx);
4061 AssertRC(rc);
4062 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4063 {
4064 Log(("GMMR0ResetSharedModules\n"));
4065 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
4066
4067 rc = VINF_SUCCESS;
4068 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
4069 }
4070 else
4071 rc = VERR_INTERNAL_ERROR_5;
4072
4073 RTSemFastMutexRelease(pGMM->Mtx);
4074 return rc;
4075#else
4076 return VERR_NOT_IMPLEMENTED;
4077#endif
4078}
4079
4080#ifdef VBOX_WITH_PAGE_SHARING
4081typedef struct
4082{
4083 PGVM pGVM;
4084 VMCPUID idCpu;
4085 int rc;
4086} GMMCHECKSHAREDMODULEINFO, *PGMMCHECKSHAREDMODULEINFO;
4087
4088/**
4089 * Tree enumeration callback for checking a shared module.
4090 */
4091DECLCALLBACK(int) gmmR0CheckSharedModule(PAVLGCPTRNODECORE pNode, void *pvUser)
4092{
4093 PGMMCHECKSHAREDMODULEINFO pInfo = (PGMMCHECKSHAREDMODULEINFO)pvUser;
4094 PGMMSHAREDMODULEPERVM pLocalModule = (PGMMSHAREDMODULEPERVM)pNode;
4095 PGMMSHAREDMODULE pGlobalModule = pLocalModule->pGlobalModule;
4096
4097 if ( !pLocalModule->fCollision
4098 && pGlobalModule)
4099 {
4100 Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
4101 pInfo->rc = PGMR0SharedModuleCheck(pInfo->pGVM->pVM, pInfo->pGVM, pInfo->idCpu, pGlobalModule, pLocalModule->cRegions, pLocalModule->aRegions);
4102 if (RT_FAILURE(pInfo->rc))
4103 return 1; /* stop enumeration. */
4104 }
4105 return 0;
4106}
4107#endif
4108
4109#ifdef DEBUG_sandervl
4110/**
4111 * Setup for a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
4112 *
4113 * @returns VBox status code.
4114 * @param pVM VM handle
4115 */
4116GMMR0DECL(int) GMMR0CheckSharedModulesStart(PVM pVM)
4117{
4118 /*
4119 * Validate input and get the basics.
4120 */
4121 PGMM pGMM;
4122 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4123
4124 /*
4125 * Take the sempahore and do some more validations.
4126 */
4127 int rc = RTSemFastMutexRequest(pGMM->Mtx);
4128 AssertRC(rc);
4129 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4130 rc = VERR_INTERNAL_ERROR_5;
4131 else
4132 rc = VINF_SUCCESS;
4133
4134 return rc;
4135}
4136
4137/**
4138 * Clean up after a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
4139 *
4140 * @returns VBox status code.
4141 * @param pVM VM handle
4142 */
4143GMMR0DECL(int) GMMR0CheckSharedModulesEnd(PVM pVM)
4144{
4145 /*
4146 * Validate input and get the basics.
4147 */
4148 PGMM pGMM;
4149 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4150
4151 RTSemFastMutexRelease(pGMM->Mtx);
4152 return VINF_SUCCESS;
4153}
4154#endif
4155
4156/**
4157 * Check all shared modules for the specified VM
4158 *
4159 * @returns VBox status code.
4160 * @param pVM VM handle
4161 * @param pVCpu VMCPU handle
4162 */
4163GMMR0DECL(int) GMMR0CheckSharedModules(PVM pVM, PVMCPU pVCpu)
4164{
4165#ifdef VBOX_WITH_PAGE_SHARING
4166 /*
4167 * Validate input and get the basics.
4168 */
4169 PGMM pGMM;
4170 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4171 PGVM pGVM;
4172 int rc = GVMMR0ByVMAndEMT(pVM, pVCpu->idCpu, &pGVM);
4173 if (RT_FAILURE(rc))
4174 return rc;
4175
4176# ifndef DEBUG_sandervl
4177 /*
4178 * Take the sempahore and do some more validations.
4179 */
4180 rc = RTSemFastMutexRequest(pGMM->Mtx);
4181 AssertRC(rc);
4182# endif
4183 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4184 {
4185 GMMCHECKSHAREDMODULEINFO Info;
4186
4187 Log(("GMMR0CheckSharedModules\n"));
4188 Info.pGVM = pGVM;
4189 Info.idCpu = pVCpu->idCpu;
4190 Info.rc = VINF_SUCCESS;
4191
4192 RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
4193
4194 rc = Info.rc;
4195
4196 Log(("GMMR0CheckSharedModules done!\n"));
4197
4198 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
4199 }
4200 else
4201 rc = VERR_INTERNAL_ERROR_5;
4202
4203# ifndef DEBUG_sandervl
4204 RTSemFastMutexRelease(pGMM->Mtx);
4205# endif
4206 return rc;
4207#else
4208 return VERR_NOT_IMPLEMENTED;
4209#endif
4210}
4211
4212#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
4213typedef struct
4214{
4215 PGVM pGVM;
4216 PGMM pGMM;
4217 uint8_t *pSourcePage;
4218 bool fFoundDuplicate;
4219} GMMFINDDUPPAGEINFO, *PGMMFINDDUPPAGEINFO;
4220
4221/**
4222 * RTAvlU32DoWithAll callback.
4223 *
4224 * @returns 0
4225 * @param pNode The node to search.
4226 * @param pvInfo Pointer to the input parameters
4227 */
4228static DECLCALLBACK(int) gmmR0FindDupPageInChunk(PAVLU32NODECORE pNode, void *pvInfo)
4229{
4230 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
4231 PGMMFINDDUPPAGEINFO pInfo = (PGMMFINDDUPPAGEINFO)pvInfo;
4232 PGVM pGVM = pInfo->pGVM;
4233 PGMM pGMM = pInfo->pGMM;
4234 uint8_t *pbChunk;
4235
4236 /* Only take chunks not mapped into this VM process; not entirely correct. */
4237 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
4238 {
4239 int rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
4240 if (rc != VINF_SUCCESS)
4241 goto end;
4242
4243 /*
4244 * Look for duplicate pages
4245 */
4246 unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
4247 while (iPage-- > 0)
4248 {
4249 if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
4250 {
4251 uint8_t *pbDestPage = pbChunk + (iPage << PAGE_SHIFT);
4252
4253 if (!memcmp(pInfo->pSourcePage, pbDestPage, PAGE_SIZE))
4254 {
4255 pInfo->fFoundDuplicate = true;
4256 break;
4257 }
4258 }
4259 }
4260 gmmR0UnmapChunk(pGMM, pGVM, pChunk);
4261 }
4262end:
4263 if (pInfo->fFoundDuplicate)
4264 return 1; /* stop search */
4265 else
4266 return 0;
4267}
4268
4269/**
4270 * Find a duplicate of the specified page in other active VMs
4271 *
4272 * @returns VBox status code.
4273 * @param pVM VM handle
4274 * @param pReq Request packet
4275 */
4276GMMR0DECL(int) GMMR0FindDuplicatePageReq(PVM pVM, PGMMFINDDUPLICATEPAGEREQ pReq)
4277{
4278 /*
4279 * Validate input and pass it on.
4280 */
4281 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
4282 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
4283 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
4284
4285 PGMM pGMM;
4286 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4287
4288 /*
4289 * Take the sempahore and do some more validations.
4290 */
4291 int rc = RTSemFastMutexRequest(pGMM->Mtx);
4292 AssertRC(rc);
4293 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4294 {
4295 PGVM pGVM;
4296 rc = GVMMR0ByVM(pVM, &pGVM);
4297 if (RT_FAILURE(rc))
4298 goto end;
4299
4300 uint8_t *pbChunk;
4301 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, pReq->idPage >> GMM_CHUNKID_SHIFT);
4302 if (!pChunk)
4303 {
4304 AssertFailed();
4305 goto end;
4306 }
4307
4308 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
4309 {
4310 AssertFailed();
4311 goto end;
4312 }
4313
4314 uint8_t *pbSourcePage = pbChunk + ((pReq->idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
4315
4316 PGMMPAGE pPage = gmmR0GetPage(pGMM, pReq->idPage);
4317 if (!pPage)
4318 {
4319 AssertFailed();
4320 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
4321 goto end;
4322 }
4323 GMMFINDDUPPAGEINFO Info;
4324
4325 Info.pGVM = pGVM;
4326 Info.pGMM = pGMM;
4327 Info.pSourcePage = pbSourcePage;
4328 Info.fFoundDuplicate = false;
4329 RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0FindDupPageInChunk, &Info);
4330
4331 pReq->fDuplicate = Info.fFoundDuplicate;
4332 }
4333 else
4334 rc = VERR_INTERNAL_ERROR_5;
4335
4336end:
4337 RTSemFastMutexRelease(pGMM->Mtx);
4338 return rc;
4339}
4340
4341#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
4342
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette