VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 29437

Last change on this file since 29437 was 29424, checked in by vboxsync, 15 years ago

Shared paging updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.1 KB
Line 
1/* $Id: PGMR0.cpp 29424 2010-05-12 15:11:09Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/gmm.h>
24#include "../PGMInternal.h"
25#include <VBox/vm.h>
26#include "../PGMInline.h"
27#include <VBox/log.h>
28#include <VBox/err.h>
29#include <iprt/assert.h>
30#include <iprt/mem.h>
31
32RT_C_DECLS_BEGIN
33#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
34#include "PGMR0Bth.h"
35#undef PGM_BTH_NAME
36
37#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
38#include "PGMR0Bth.h"
39#undef PGM_BTH_NAME
40
41#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
42#include "PGMR0Bth.h"
43#undef PGM_BTH_NAME
44
45#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
46#include "PGMR0Bth.h"
47#undef PGM_BTH_NAME
48
49RT_C_DECLS_END
50
51
52/**
53 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
54 *
55 * @returns The following VBox status codes.
56 * @retval VINF_SUCCESS on success. FF cleared.
57 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
58 *
59 * @param pVM The VM handle.
60 * @param pVCpu The VMCPU handle.
61 *
62 * @remarks Must be called from within the PGM critical section. The caller
63 * must clear the new pages.
64 */
65VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
66{
67 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
68
69 /*
70 * Check for error injection.
71 */
72 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
73 return VERR_NO_MEMORY;
74
75 /*
76 * Try allocate a full set of handy pages.
77 */
78 uint32_t iFirst = pVM->pgm.s.cHandyPages;
79 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
80 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
81 if (!cPages)
82 return VINF_SUCCESS;
83 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
84 if (RT_SUCCESS(rc))
85 {
86 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
87 {
88 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
89 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
90 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
91 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
92 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
93 }
94
95 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
96 }
97 else if (rc != VERR_GMM_SEED_ME)
98 {
99 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
100 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
101 && iFirst < PGM_HANDY_PAGES_MIN)
102 {
103
104#ifdef VBOX_STRICT
105 /* We're ASSUMING that GMM has updated all the entires before failing us. */
106 uint32_t i;
107 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
108 {
109 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
110 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
111 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
112 }
113#endif
114
115 /*
116 * Reduce the number of pages until we hit the minimum limit.
117 */
118 do
119 {
120 cPages >>= 2;
121 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
122 cPages = PGM_HANDY_PAGES_MIN - iFirst;
123 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
124 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
125 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
126 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
127 if (RT_SUCCESS(rc))
128 {
129#ifdef VBOX_STRICT
130 i = iFirst + cPages;
131 while (i-- > 0)
132 {
133 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
134 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
135 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
136 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
137 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
138 }
139
140 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
141 {
142 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
143 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
144 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
145 }
146#endif
147
148 pVM->pgm.s.cHandyPages = iFirst + cPages;
149 }
150 }
151
152 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
153 {
154 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
155 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
156 }
157 }
158
159
160 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
161 return rc;
162}
163
164/**
165 * Worker function for PGMR3PhysAllocateLargeHandyPage
166 *
167 * @returns The following VBox status codes.
168 * @retval VINF_SUCCESS on success.
169 * @retval VINF_EM_NO_MEMORY if we're out of memory.
170 *
171 * @param pVM The VM handle.
172 * @param pVCpu The VMCPU handle.
173 *
174 * @remarks Must be called from within the PGM critical section. The caller
175 * must clear the new pages.
176 */
177VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
178{
179 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
180
181 Assert(!pVM->pgm.s.cLargeHandyPages);
182 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
183 if (RT_SUCCESS(rc))
184 pVM->pgm.s.cLargeHandyPages = 1;
185
186 return rc;
187}
188
189/**
190 * #PF Handler for nested paging.
191 *
192 * @returns VBox status code (appropriate for trap handling and GC return).
193 * @param pVM VM Handle.
194 * @param pVCpu VMCPU Handle.
195 * @param enmShwPagingMode Paging mode for the nested page tables
196 * @param uErr The trap error code.
197 * @param pRegFrame Trap register frame.
198 * @param pvFault The fault address.
199 */
200VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault)
201{
202 int rc;
203
204 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGp eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
205 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
206 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
207
208 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
209 AssertMsg(enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, ("enmShwPagingMode=%d\n", enmShwPagingMode));
210
211#ifdef VBOX_WITH_STATISTICS
212 /*
213 * Error code stats.
214 */
215 if (uErr & X86_TRAP_PF_US)
216 {
217 if (!(uErr & X86_TRAP_PF_P))
218 {
219 if (uErr & X86_TRAP_PF_RW)
220 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
221 else
222 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
223 }
224 else if (uErr & X86_TRAP_PF_RW)
225 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
226 else if (uErr & X86_TRAP_PF_RSVD)
227 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
228 else if (uErr & X86_TRAP_PF_ID)
229 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
230 else
231 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
232 }
233 else
234 { /* Supervisor */
235 if (!(uErr & X86_TRAP_PF_P))
236 {
237 if (uErr & X86_TRAP_PF_RW)
238 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
239 else
240 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
241 }
242 else if (uErr & X86_TRAP_PF_RW)
243 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
244 else if (uErr & X86_TRAP_PF_ID)
245 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
246 else if (uErr & X86_TRAP_PF_RSVD)
247 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
248 }
249#endif
250
251 /*
252 * Call the worker.
253 *
254 * We pretend the guest is in protected mode without paging, so we can use existing code to build the
255 * nested page tables.
256 */
257 bool fLockTaken = false;
258 switch(enmShwPagingMode)
259 {
260 case PGMMODE_32_BIT:
261 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
262 break;
263 case PGMMODE_PAE:
264 case PGMMODE_PAE_NX:
265 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
266 break;
267 case PGMMODE_AMD64:
268 case PGMMODE_AMD64_NX:
269 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
270 break;
271 case PGMMODE_EPT:
272 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
273 break;
274 default:
275 AssertFailed();
276 rc = VERR_INVALID_PARAMETER;
277 break;
278 }
279 if (fLockTaken)
280 {
281 Assert(PGMIsLockOwner(pVM));
282 pgmUnlock(pVM);
283 }
284 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
285 rc = VINF_SUCCESS;
286 else
287 /* Note: hack alert for difficult to reproduce problem. */
288 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
289 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
290 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
291 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
292 {
293 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
294 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
295 rc = VINF_SUCCESS;
296 }
297
298 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
299 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
300 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
301 return rc;
302}
303
304#ifdef VBOX_WITH_PAGE_SHARING
305/**
306 * Check a registered module for shared page changes
307 *
308 * @returns The following VBox status codes.
309 *
310 * @param pVM The VM handle.
311 * @param idCpu VCPU id
312 * @param pModule Module description
313 * @param pGVM Pointer to the GVM instance data.
314 */
315VMMR0DECL(int) PGMR0SharedModuleCheckRegion(PVM pVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PGVM pGVM)
316{
317 int rc = VINF_SUCCESS;
318 PGMMSHAREDPAGEDESC paPageDesc = NULL;
319 uint32_t cbPreviousRegion = 0;
320 bool fFlushTLBs = false;
321 PVMCPU pVCpu = &pVM->aCpus[idCpu];
322
323 Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule));
324
325 pgmLock(pVM);
326
327 /* Check every region of the shared module. */
328 for (unsigned i = 0; i < pModule->cRegions; i++)
329 {
330 Assert((pModule->aRegions[i].cbRegion & 0xfff) == 0);
331 Assert((pModule->aRegions[i].GCRegionAddr & 0xfff) == 0);
332
333 RTGCPTR GCRegion = pModule->aRegions[i].GCRegionAddr;
334 unsigned cbRegion = pModule->aRegions[i].cbRegion & ~0xfff;
335 unsigned idxPage = 0;
336 bool fValidChanges = false;
337
338 if (cbPreviousRegion < cbRegion)
339 {
340 if (paPageDesc)
341 RTMemFree(paPageDesc);
342
343 paPageDesc = (PGMMSHAREDPAGEDESC)RTMemAlloc((cbRegion >> PAGE_SHIFT) * sizeof(*paPageDesc));
344 if (!paPageDesc)
345 {
346 AssertFailed();
347 rc = VERR_NO_MEMORY;
348 goto end;
349 }
350 cbPreviousRegion = cbRegion;
351 }
352
353 while (cbRegion)
354 {
355 RTGCPHYS GCPhys;
356 uint64_t fFlags;
357
358 rc = PGMGstGetPage(pVCpu, GCRegion, &fFlags, &GCPhys);
359 if ( rc == VINF_SUCCESS
360 && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */
361 {
362 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
363 if ( pPage
364 && !PGM_PAGE_IS_SHARED(pPage))
365 {
366 fValidChanges = true;
367 paPageDesc[idxPage].uHCPhysPageId = PGM_PAGE_GET_PAGEID(pPage);
368 paPageDesc[idxPage].HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
369 paPageDesc[idxPage].GCPhys = GCPhys;
370 }
371 else
372 paPageDesc[idxPage].uHCPhysPageId = NIL_GMM_PAGEID;
373 }
374 else
375 paPageDesc[idxPage].uHCPhysPageId = NIL_GMM_PAGEID;
376
377 idxPage++;
378 GCRegion += PAGE_SIZE;
379 cbRegion -= PAGE_SIZE;
380 }
381
382 if (fValidChanges)
383 {
384 rc = GMMR0SharedModuleCheckRange(pGVM, pModule, i, idxPage, paPageDesc);
385 AssertRC(rc);
386 if (RT_FAILURE(rc))
387 break;
388
389 for (unsigned i = 0; i < idxPage; i++)
390 {
391 /* Any change for this page? */
392 if (paPageDesc[i].uHCPhysPageId != NIL_GMM_PAGEID)
393 {
394 /** todo: maybe cache these to prevent the nth lookup. */
395 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPageDesc[i].GCPhys);
396 if (!pPage)
397 {
398 /* Should never happen. */
399 AssertFailed();
400 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
401 goto end;
402 }
403 Assert(!PGM_PAGE_IS_SHARED(pPage));
404
405 Log(("PGMR0SharedModuleCheck: shared page gc phys %RGp host %RHp->%RHp\n", paPageDesc[i].GCPhys, PGM_PAGE_GET_HCPHYS(pPage), paPageDesc[i].HCPhys));
406 if (paPageDesc[i].HCPhys != PGM_PAGE_GET_HCPHYS(pPage))
407 {
408 bool fFlush = false;
409
410 /* Page was replaced by an existing shared version of it; clear all references first. */
411 rc = pgmPoolTrackUpdateGCPhys(pVM, paPageDesc[i].GCPhys, pPage, true /* clear the entries */, &fFlush);
412 if (RT_FAILURE(rc))
413 {
414 AssertRC(rc);
415 goto end;
416 }
417 Assert(rc == VINF_SUCCESS || (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)));
418 if (rc = VINF_SUCCESS)
419 fFlushTLBs |= fFlush;
420
421 /* Update the physical address and page id now. */
422 PGM_PAGE_SET_HCPHYS(pPage, paPageDesc[i].HCPhys);
423 PGM_PAGE_SET_PAGEID(pPage, paPageDesc[i].uHCPhysPageId);
424
425 /* Invalidate page map TLB entry for this page too. */
426 PGMPhysInvalidatePageMapTLBEntry(pVM, paPageDesc[i].GCPhys);
427 }
428 /* else nothing changed (== this page is now a shared page), so no need to flush anything. */
429
430 pVM->pgm.s.cSharedPages++;
431 pVM->pgm.s.cPrivatePages--;
432 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_SHARED);
433 }
434 }
435 }
436 else
437 rc = VINF_SUCCESS; /* nothing to do. */
438 }
439
440end:
441 pgmUnlock(pVM);
442 if (fFlushTLBs)
443 PGM_INVL_ALL_VCPU_TLBS(pVM);
444
445 if (paPageDesc)
446 RTMemFree(paPageDesc);
447
448 return rc;
449}
450#endif
451
452#if 0
453/**
454 * Shared module registration helper (called on the way out).
455 *
456 * @param pVM The VM handle.
457 * @param pReq Registration request info
458 */
459static DECLCALLBACK(void) pgmR3SharedModuleRegisterHelper(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
460{
461 int rc;
462
463 rc = GMMR3RegisterSharedModule(pVM, pReq);
464 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SHARED_MODULE_COLLISION || rc == VINF_PGM_SHARED_MODULE_ALREADY_REGISTERED);
465 if (rc == VINF_PGM_SHARED_MODULE_ALREADY_REGISTERED)
466 {
467 PVMCPU pVCpu = VMMGetCpu(pVM);
468 unsigned cFlushedPages = 0;
469
470 /** todo count copy-on-write actions in the trap handler so we don't have to check everything all the time! */
471
472 /* Count the number of shared pages that were changed (copy-on-write). */
473 for (unsigned i = 0; i < pReq->cRegions; i++)
474 {
475 Assert((pReq->aRegions[i].cbRegion & 0xfff) == 0);
476 Assert((pReq->aRegions[i].GCRegionAddr & 0xfff) == 0);
477
478 RTGCPTR GCRegion = pReq->aRegions[i].GCRegionAddr;
479 uint32_t cbRegion = pReq->aRegions[i].cbRegion & ~0xfff;
480
481 while (cbRegion)
482 {
483 RTGCPHYS GCPhys;
484 uint64_t fFlags;
485
486 rc = PGMGstGetPage(pVCpu, GCRegion, &fFlags, &GCPhys);
487 if ( rc == VINF_SUCCESS
488 && !(fFlags & X86_PTE_RW))
489 {
490 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
491 if ( pPage
492 && !PGM_PAGE_IS_SHARED(pPage))
493 {
494 cFlushedPages++;
495 }
496 }
497
498 GCRegion += PAGE_SIZE;
499 cbRegion -= PAGE_SIZE;
500 }
501 }
502
503 if (cFlushedPages > 32)
504 rc = VINF_SUCCESS; /* force recheck below */
505 }
506 /* Full (re)check needed? */
507 if (rc == VINF_SUCCESS)
508 {
509 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
510 pReq->Hdr.cbReq = RT_OFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
511
512 /* We must stall other VCPUs as we'd otherwise have to send IPI flush commands for every single change we make. */
513 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3SharedModuleRegRendezvous, pReq);
514 AssertRC(rc);
515 }
516 RTMemFree(pReq);
517 return;
518}
519#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette