VirtualBox

source: vbox/trunk/src/VBox/VMM/GMM.cpp@ 28549

Last change on this file since 28549 was 28434, checked in by vboxsync, 15 years ago

*: whitespace cleanups by scm and two manually picked nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 13.1 KB
Line 
1/* $Id: GMM.cpp 28434 2010-04-17 18:08:28Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager, ring-3 request wrappers.
4 */
5
6/*
7 * Copyright (C) 2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_GMM
27#include <VBox/gmm.h>
28#include <VBox/vmm.h>
29#include <VBox/vm.h>
30#include <VBox/sup.h>
31#include <VBox/err.h>
32#include <VBox/param.h>
33
34#include <iprt/assert.h>
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/string.h>
38
39
40/**
41 * @see GMMR0InitialReservation
42 */
43GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
44 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
45{
46 GMMINITIALRESERVATIONREQ Req;
47 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
48 Req.Hdr.cbReq = sizeof(Req);
49 Req.cBasePages = cBasePages;
50 Req.cShadowPages = cShadowPages;
51 Req.cFixedPages = cFixedPages;
52 Req.enmPolicy = enmPolicy;
53 Req.enmPriority = enmPriority;
54 return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
55}
56
57
58/**
59 * @see GMMR0UpdateReservation
60 */
61GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
62{
63 GMMUPDATERESERVATIONREQ Req;
64 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
65 Req.Hdr.cbReq = sizeof(Req);
66 Req.cBasePages = cBasePages;
67 Req.cShadowPages = cShadowPages;
68 Req.cFixedPages = cFixedPages;
69 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
70}
71
72
73/**
74 * Prepares a GMMR0AllocatePages request.
75 *
76 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
77 * @param pVM Pointer to the shared VM structure.
78 * @param[out] ppReq Where to store the pointer to the request packet.
79 * @param cPages The number of pages that's to be allocated.
80 * @param enmAccount The account to charge.
81 */
82GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
83{
84 uint32_t cb = RT_OFFSETOF(GMMALLOCATEPAGESREQ, aPages[cPages]);
85 PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
86 if (!pReq)
87 return VERR_NO_TMP_MEMORY;
88
89 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
90 pReq->Hdr.cbReq = cb;
91 pReq->enmAccount = enmAccount;
92 pReq->cPages = cPages;
93 NOREF(pVM);
94 *ppReq = pReq;
95 return VINF_SUCCESS;
96}
97
98
99/**
100 * Performs a GMMR0AllocatePages request.
101 * This will call VMSetError on failure.
102 *
103 * @returns VBox status code.
104 * @param pVM Pointer to the shared VM structure.
105 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
106 */
107GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
108{
109 for (unsigned i = 0; ; i++)
110 {
111 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
112 if (RT_SUCCESS(rc))
113 {
114#ifdef LOG_ENABLED
115 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
116 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
117 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
118#endif
119 return rc;
120 }
121 if (rc != VERR_GMM_SEED_ME)
122 return VMSetError(pVM, rc, RT_SRC_POS,
123 N_("GMMR0AllocatePages failed to allocate %u pages"),
124 pReq->cPages);
125 Assert(i < pReq->cPages);
126
127 /*
128 * Seed another chunk.
129 */
130 void *pvChunk;
131 rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
132 if (RT_FAILURE(rc))
133 return VMSetError(pVM, rc, RT_SRC_POS,
134 N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"),
135 pReq->cPages);
136
137 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
138 if (RT_FAILURE(rc))
139 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed"));
140 }
141}
142
143
144/**
145 * Cleans up a GMMR0AllocatePages request.
146 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
147 */
148GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
149{
150 RTMemTmpFree(pReq);
151}
152
153
154/**
155 * Prepares a GMMR0FreePages request.
156 *
157 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
158 * @param pVM Pointer to the shared VM structure.
159 * @param[out] ppReq Where to store the pointer to the request packet.
160 * @param cPages The number of pages that's to be freed.
161 * @param enmAccount The account to charge.
162 */
163GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
164{
165 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
166 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
167 if (!pReq)
168 return VERR_NO_TMP_MEMORY;
169
170 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
171 pReq->Hdr.cbReq = cb;
172 pReq->enmAccount = enmAccount;
173 pReq->cPages = cPages;
174 NOREF(pVM);
175 *ppReq = pReq;
176 return VINF_SUCCESS;
177}
178
179
180/**
181 * Re-prepares a GMMR0FreePages request.
182 *
183 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
184 * @param pVM Pointer to the shared VM structure.
185 * @param pReq A request buffer previously returned by
186 * GMMR3FreePagesPrepare().
187 * @param cPages The number of pages originally passed to
188 * GMMR3FreePagesPrepare().
189 * @param enmAccount The account to charge.
190 */
191GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
192{
193 Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
194 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
195 pReq->enmAccount = enmAccount;
196 pReq->cPages = cPages;
197 NOREF(pVM);
198}
199
200
201/**
202 * Performs a GMMR0FreePages request.
203 * This will call VMSetError on failure.
204 *
205 * @returns VBox status code.
206 * @param pVM Pointer to the shared VM structure.
207 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
208 * @param cActualPages The number of pages actually freed.
209 */
210GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
211{
212 /*
213 * Adjust the request if we ended up with fewer pages than anticipated.
214 */
215 if (cActualPages != pReq->cPages)
216 {
217 AssertReturn(cActualPages < pReq->cPages, VERR_INTERNAL_ERROR);
218 if (!cActualPages)
219 return VINF_SUCCESS;
220 pReq->cPages = cActualPages;
221 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cActualPages]);
222 }
223
224 /*
225 * Do the job.
226 */
227 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
228 if (RT_SUCCESS(rc))
229 return rc;
230 AssertRC(rc);
231 return VMSetError(pVM, rc, RT_SRC_POS,
232 N_("GMMR0FreePages failed to free %u pages"),
233 pReq->cPages);
234}
235
236
237/**
238 * Cleans up a GMMR0FreePages request.
239 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
240 */
241GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
242{
243 RTMemTmpFree(pReq);
244}
245
246
247/**
248 * Frees allocated pages, for bailing out on failure.
249 *
250 * This will not call VMSetError on failure but will use AssertLogRel instead.
251 *
252 * @param pVM Pointer to the shared VM structure.
253 * @param pAllocReq The allocation request to undo.
254 */
255GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
256{
257 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
258 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
259 AssertLogRelReturnVoid(pReq);
260
261 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
262 pReq->Hdr.cbReq = cb;
263 pReq->enmAccount = pAllocReq->enmAccount;
264 pReq->cPages = pAllocReq->cPages;
265 uint32_t iPage = pAllocReq->cPages;
266 while (iPage-- > 0)
267 {
268 Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
269 pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
270 }
271
272 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
273 AssertLogRelRC(rc);
274
275 RTMemTmpFree(pReq);
276}
277
278
279/**
280 * @see GMMR0BalloonedPages
281 */
282GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
283{
284 GMMBALLOONEDPAGESREQ Req;
285 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
286 Req.Hdr.cbReq = sizeof(Req);
287 Req.enmAction = enmAction;
288 Req.cBalloonedPages = cBalloonedPages;
289
290 return VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
291}
292
293/**
294 * @see GMMR0QueryVMMMemoryStatsReq
295 */
296GMMR3DECL(int) GMMR3QueryVMMMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages)
297{
298 GMMMEMSTATSREQ Req;
299 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
300 Req.Hdr.cbReq = sizeof(Req);
301 Req.cAllocPages = 0;
302 Req.cFreePages = 0;
303 Req.cBalloonedPages = 0;
304
305 *pcTotalAllocPages = 0;
306 *pcTotalFreePages = 0;
307 *pcTotalBalloonPages = 0;
308
309 /* Must be callable from any thread, so can't use VMMR3CallR0. */
310 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0, VMMR0_DO_GMM_QUERY_VMM_MEM_STATS, 0, &Req.Hdr);
311 if (rc == VINF_SUCCESS)
312 {
313 *pcTotalAllocPages = Req.cAllocPages;
314 *pcTotalFreePages = Req.cFreePages;
315 *pcTotalBalloonPages = Req.cBalloonedPages;
316 }
317 return rc;
318}
319
320/**
321 * @see GMMR0MapUnmapChunk
322 */
323GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
324{
325 GMMMAPUNMAPCHUNKREQ Req;
326 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
327 Req.Hdr.cbReq = sizeof(Req);
328 Req.idChunkMap = idChunkMap;
329 Req.idChunkUnmap = idChunkUnmap;
330 Req.pvR3 = NULL;
331 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
332 if (RT_SUCCESS(rc) && ppvR3)
333 *ppvR3 = Req.pvR3;
334 return rc;
335}
336
337/**
338 * @see GMMR0FreeLargePage
339 */
340GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
341{
342 GMMFREELARGEPAGEREQ Req;
343 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
344 Req.Hdr.cbReq = sizeof(Req);
345 Req.idPage = idPage;
346 return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
347}
348
349/**
350 * @see GMMR0SeedChunk
351 */
352GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3)
353{
354 return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL);
355}
356
357
358/**
359 * @see GMMR0RegisterSharedModule
360 */
361GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
362 unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions)
363{
364 PGMMREGISTERSHAREDMODULEREQ pReq;
365 int rc;
366
367 /* Sanity check. */
368 AssertReturn(cRegions < VMMDEVSHAREDREGIONDESC_MAX, VERR_INVALID_PARAMETER);
369
370 pReq = (PGMMREGISTERSHAREDMODULEREQ)RTMemAllocZ(RT_OFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[cRegions]));
371 AssertReturn(pReq, VERR_NO_MEMORY);
372
373 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
374 pReq->Hdr.cbReq = sizeof(*pReq);
375 pReq->GCBaseAddr = GCBaseAddr;
376 pReq->cbModule = cbModule;
377 pReq->cRegions = cRegions;
378 for (unsigned i = 0; i < cRegions; i++)
379 pReq->aRegions[i] = pRegions[i];
380
381 if ( RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName) != VINF_SUCCESS
382 || RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion) != VINF_SUCCESS)
383 {
384 rc = VERR_BUFFER_OVERFLOW;
385 goto end;
386 }
387
388 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
389end:
390 RTMemFree(pReq);
391 return rc;
392}
393
394/**
395 * @see GMMR0RegisterSharedModule
396 */
397GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
398{
399 GMMUNREGISTERSHAREDMODULEREQ Req;
400 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
401 Req.Hdr.cbReq = sizeof(Req);
402
403 Req.GCBaseAddr = GCBaseAddr;
404 Req.cbModule = cbModule;
405
406 if ( RTStrCopy(Req.szName, sizeof(Req.szName), pszModuleName) != VINF_SUCCESS
407 || RTStrCopy(Req.szVersion, sizeof(Req.szVersion), pszVersion) != VINF_SUCCESS)
408 {
409 return VERR_BUFFER_OVERFLOW;
410 }
411
412 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &Req.Hdr);
413}
414
415/**
416 * @see GMMR0CheckSharedModules
417 */
418GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
419{
420 return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
421}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette