- Timestamp:
- Nov 6, 2021 3:21:57 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r92170 r92248 2067 2067 * @{ 2068 2068 */ 2069 /** The GMM is out of pages and needs to be give another chunk of user memory that2070 * it can lock down and borrow pages from. */2071 #define VERR_GMM_SEED_ME (-3800)2072 2069 /** Unable to allocate more pages from the host system. */ 2073 2070 #define VERR_GMM_OUT_OF_MEMORY (-3801) -
trunk/include/VBox/vmm/gmm.h
r82989 r92248 415 415 GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages); 416 416 GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3); 417 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3);418 417 GMMR0DECL(int) GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv); 419 418 GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, … … 791 790 GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage); 792 791 GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3); 793 GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);794 792 GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize); 795 793 GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages); -
trunk/include/VBox/vmm/vmm.h
r92200 r92248 318 318 /** Call GMMR0MapUnmapChunk(). */ 319 319 VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 320 /** Call GMMR0SeedChunk(). */321 VMMR0_DO_GMM_SEED_CHUNK,322 320 /** Call GMMR0RegisterSharedModule. */ 323 321 VMMR0_DO_GMM_REGISTER_SHARED_MODULE, -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r92202 r92248 192 192 #if defined(RT_OS_WINDOWS) || defined(RT_OS_DARWIN) || defined(DOXYGEN_RUNNING) 193 193 # define VBOX_USE_CRIT_SECT_FOR_GIANT 194 #endif195 196 #if defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) && !defined(RT_OS_DARWIN) && 0197 /** Enable the legacy mode code (will be dropped soon). */198 # define GMM_WITH_LEGACY_MODE199 194 #endif 200 195 … … 465 460 /** Indicates that the chunk is a large page (2MB). */ 466 461 #define GMM_CHUNK_FLAGS_LARGE_PAGE UINT16_C(0x0001) 467 #ifdef GMM_WITH_LEGACY_MODE468 /** Indicates that the chunk was locked rather than allocated directly. */469 # define GMM_CHUNK_FLAGS_SEEDED UINT16_C(0x0002)470 #endif471 462 /** @} */ 472 463 … … 579 570 uint64_t cBalloonedPages; 580 571 581 #ifndef GMM_WITH_LEGACY_MODE 582 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 572 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 583 573 /** Whether #RTR0MemObjAllocPhysNC works. */ 584 574 bool fHasWorkingAllocPhysNC; 585 # 575 #else 586 576 bool fPadding; 587 # endif588 #else589 /** The legacy allocation mode indicator.590 * This is determined at initialization time. */591 bool fLegacyAllocationMode;592 577 #endif 593 578 /** The bound memory mode indicator. … … 829 814 if (RT_SUCCESS(rc)) 830 815 { 831 #ifndef GMM_WITH_LEGACY_MODE832 816 /* 833 817 * Figure out how we're going to allocate stuff (only applicable to … … 835 819 */ 836 820 pGMM->fBoundMemoryMode = false; 837 # 821 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 838 822 pGMM->fHasWorkingAllocPhysNC = false; 839 823 … … 849 833 SUPR0Printf("GMMR0Init: Warning! RTR0MemObjAllocPhysNC(, %u, NIL_RTHCPHYS) -> %d!\n", GMM_CHUNK_SIZE, rc); 850 834 # endif 851 #else /* GMM_WITH_LEGACY_MODE */852 /*853 * Check and see if RTR0MemObjAllocPhysNC works.854 */855 # if 0 /* later, see @bufref{3170}. */856 RTR0MEMOBJ MemObj;857 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);858 if (RT_SUCCESS(rc))859 {860 rc = RTR0MemObjFree(MemObj, true);861 AssertRC(rc);862 }863 else if (rc == VERR_NOT_SUPPORTED)864 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;865 else866 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);867 # else868 # if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)869 pGMM->fLegacyAllocationMode = false;870 # if ARCH_BITS == 32871 /* Don't reuse possibly partial chunks because of the virtual872 address space limitation. */873 pGMM->fBoundMemoryMode = true;874 # else875 pGMM->fBoundMemoryMode = false;876 # endif877 # else878 pGMM->fLegacyAllocationMode = true;879 pGMM->fBoundMemoryMode = true;880 # endif881 # endif882 #endif /* GMM_WITH_LEGACY_MODE */883 835 884 836 /* … … 894 846 895 847 g_pGMM = pGMM; 896 #ifdef GMM_WITH_LEGACY_MODE 897 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 898 #elif defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) 848 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 899 849 LogFlow(("GMMInit: pGMM=%p fBoundMemoryMode=%RTbool fHasWorkingAllocPhysNC=%RTbool\n", pGMM, pGMM->fBoundMemoryMode, pGMM->fHasWorkingAllocPhysNC)); 900 850 #else … … 2213 2163 * Registers a new chunk of memory. 2214 2164 * 2215 * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk.2165 * This is called by gmmR0AllocateOneChunk. 2216 2166 * 2217 2167 * @returns VBox status code. On success, the giant GMM lock will be held, the … … 2235 2185 Assert(pGMM->hMtxOwner != RTThreadNativeSelf()); 2236 2186 Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode); 2237 #ifdef GMM_WITH_LEGACY_MODE2238 Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE || fChunkFlags == GMM_CHUNK_FLAGS_SEEDED);2239 #else2240 2187 Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE); 2241 #endif2242 2188 2243 2189 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM … … 2245 2191 * Get a ring-0 mapping of the object. 2246 2192 */ 2247 # ifdef GMM_WITH_LEGACY_MODE2248 uint8_t *pbMapping = !(fChunkFlags & GMM_CHUNK_FLAGS_SEEDED) ? (uint8_t *)RTR0MemObjAddress(hMemObj) : NULL;2249 # else2250 2193 uint8_t *pbMapping = (uint8_t *)RTR0MemObjAddress(hMemObj); 2251 # endif2252 2194 if (!pbMapping) 2253 2195 { … … 2360 2302 2361 2303 RTR0MEMOBJ hMemObj; 2362 #ifndef GMM_WITH_LEGACY_MODE2363 2304 int rc; 2364 # 2305 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 2365 2306 if (pGMM->fHasWorkingAllocPhysNC) 2366 2307 rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS); 2367 2308 else 2368 # 2309 #endif 2369 2310 rc = RTR0MemObjAllocPage(&hMemObj, GMM_CHUNK_SIZE, false /*fExecutable*/); 2370 #else2371 int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);2372 #endif2373 2311 if (RT_SUCCESS(rc)) 2374 2312 { … … 2656 2594 * @returns VBox status code: 2657 2595 * @retval VINF_SUCCESS on success. 2658 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or2659 * gmmR0AllocateMoreChunks is necessary.2660 2596 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 2661 2597 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, … … 2721 2657 } 2722 2658 2723 #ifdef GMM_WITH_LEGACY_MODE2724 /*2725 * If we're in legacy memory mode, it's easy to figure if we have2726 * sufficient number of pages up-front.2727 */2728 if ( pGMM->fLegacyAllocationMode2729 && pGVM->gmm.s.Private.cFreePages < cPages)2730 {2731 Assert(pGMM->fBoundMemoryMode);2732 return VERR_GMM_SEED_ME;2733 }2734 #endif2735 2736 2659 /* 2737 2660 * Update the accounts before we proceed because we might be leaving the … … 2748 2671 pGVM->gmm.s.Stats.cPrivatePages += cPages; 2749 2672 pGMM->cAllocatedPages += cPages; 2750 2751 #ifdef GMM_WITH_LEGACY_MODE2752 /*2753 * Part two of it's-easy-in-legacy-memory-mode.2754 */2755 if (pGMM->fLegacyAllocationMode)2756 {2757 uint32_t iPage = gmmR0AllocatePagesInBoundMode(pGVM, 0, cPages, paPages);2758 AssertReleaseReturn(iPage == cPages, VERR_GMM_ALLOC_PAGES_IPE);2759 return VINF_SUCCESS;2760 }2761 #endif2762 2673 2763 2674 /* … … 2903 2814 * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't 2904 2815 * owned by the VM. 2905 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.2906 2816 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 2907 2817 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, … … 3100 3010 * @retval VINF_SUCCESS on success. 3101 3011 * @retval VERR_NOT_OWNER if the caller is not an EMT. 3102 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.3103 3012 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 3104 3013 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, … … 3227 3136 if (RT_FAILURE(rc)) 3228 3137 return rc; 3229 3230 #ifdef GMM_WITH_LEGACY_MODE3231 // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */3232 // if (pGMM->fLegacyAllocationMode)3233 // return VERR_NOT_SUPPORTED;3234 #endif3235 3138 3236 3139 *pHCPhys = NIL_RTHCPHYS; … … 3331 3234 return rc; 3332 3235 3333 #ifdef GMM_WITH_LEGACY_MODE3334 // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */3335 // if (pGMM->fLegacyAllocationMode)3336 // return VERR_NOT_SUPPORTED;3337 #endif3338 3339 3236 gmmR0MutexAcquire(pGMM); 3340 3237 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) … … 3478 3375 * This shouldn't happen, so screw lock contention... 3479 3376 */ 3480 if ( pChunk->cMappingsX 3481 #ifdef GMM_WITH_LEGACY_MODE 3482 && (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 3483 #endif 3484 && pGVM) 3377 if (pChunk->cMappingsX && pGVM) 3485 3378 gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); 3486 3379 … … 3622 3515 || pChunk->pFreePrev == NULL /** @todo this is probably misfiring, see reset... */)) 3623 3516 { /* likely */ } 3624 #ifdef GMM_WITH_LEGACY_MODE3625 else if (RT_LIKELY(pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)))3626 { /* likely */ }3627 #endif3628 3517 else 3629 3518 gmmR0FreeChunk(pGMM, NULL, pChunk, false); … … 4150 4039 { 4151 4040 RT_NOREF_PV(pGMM); 4152 #ifdef GMM_WITH_LEGACY_MODE4153 Assert(!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE));4154 #endif4155 4041 4156 4042 /* … … 4198 4084 static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem) 4199 4085 { 4200 #ifdef GMM_WITH_LEGACY_MODE 4201 if (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 4202 { 4203 #endif 4204 /* 4205 * Lock the chunk and if possible leave the giant GMM lock. 4206 */ 4207 GMMR0CHUNKMTXSTATE MtxState; 4208 int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, 4209 fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); 4210 if (RT_SUCCESS(rc)) 4211 { 4212 rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); 4213 gmmR0ChunkMutexRelease(&MtxState, pChunk); 4214 } 4215 return rc; 4216 #ifdef GMM_WITH_LEGACY_MODE 4217 } 4218 4219 if (pChunk->hGVM == pGVM->hSelf) 4220 return VINF_SUCCESS; 4221 4222 Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x (legacy)\n", pChunk->Core.Key, pGVM, pGVM->hSelf)); 4223 return VERR_GMM_CHUNK_NOT_MAPPED; 4224 #endif 4086 /* 4087 * Lock the chunk and if possible leave the giant GMM lock. 4088 */ 4089 GMMR0CHUNKMTXSTATE MtxState; 4090 int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, 4091 fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); 4092 if (RT_SUCCESS(rc)) 4093 { 4094 rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); 4095 gmmR0ChunkMutexRelease(&MtxState, pChunk); 4096 } 4097 return rc; 4225 4098 } 4226 4099 … … 4239 4112 static int gmmR0MapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3) 4240 4113 { 4241 #ifdef GMM_WITH_LEGACY_MODE4242 /*4243 * If we're in legacy mode this is simple.4244 */4245 if (pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))4246 {4247 if (pChunk->hGVM != pGVM->hSelf)4248 {4249 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));4250 return VERR_GMM_CHUNK_NOT_FOUND;4251 }4252 4253 *ppvR3 = RTR0MemObjAddressR3(pChunk->hMemObj);4254 return VINF_SUCCESS;4255 }4256 #else4257 4114 RT_NOREF(pGMM); 4258 #endif4259 4115 4260 4116 /* … … 4497 4353 4498 4354 return GMMR0MapUnmapChunk(pGVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3); 4499 }4500 4501 4502 /**4503 * Legacy mode API for supplying pages.4504 *4505 * The specified user address points to a allocation chunk sized block that4506 * will be locked down and used by the GMM when the GM asks for pages.4507 *4508 * @returns VBox status code.4509 * @param pGVM The global (ring-0) VM structure.4510 * @param idCpu The VCPU id.4511 * @param pvR3 Pointer to the chunk size memory block to lock down.4512 */4513 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3)4514 {4515 #ifdef GMM_WITH_LEGACY_MODE4516 /*4517 * Validate input and get the basics.4518 */4519 PGMM pGMM;4520 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);4521 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);4522 if (RT_FAILURE(rc))4523 return rc;4524 4525 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);4526 AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);4527 4528 if (!pGMM->fLegacyAllocationMode)4529 {4530 Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));4531 return VERR_NOT_SUPPORTED;4532 }4533 4534 /*4535 * Lock the memory and add it as new chunk with our hGVM.4536 * (The GMM locking is done inside gmmR0RegisterChunk.)4537 */4538 RTR0MEMOBJ hMemObj;4539 rc = RTR0MemObjLockUser(&hMemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);4540 if (RT_SUCCESS(rc))4541 {4542 rc = gmmR0RegisterChunk(pGMM, &pGVM->gmm.s.Private, hMemObj, pGVM->hSelf, pGVM->pSession, GMM_CHUNK_FLAGS_SEEDED, NULL);4543 if (RT_SUCCESS(rc))4544 gmmR0MutexRelease(pGMM);4545 else4546 RTR0MemObjFree(hMemObj, true /* fFreeMappings */);4547 }4548 4549 LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));4550 return rc;4551 #else4552 RT_NOREF(pGVM, idCpu, pvR3);4553 return VERR_NOT_SUPPORTED;4554 #endif4555 4355 } 4556 4356 -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r92157 r92248 177 177 pGVM->pgm.s.cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages); 178 178 } 179 else if (rc != VERR_GMM_SEED_ME)179 else 180 180 { 181 181 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT … … 232 232 } 233 233 234 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)234 if (RT_FAILURE(rc)) 235 235 { 236 236 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages)); … … 238 238 } 239 239 } 240 241 240 242 241 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc)); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r92200 r92248 1960 1960 break; 1961 1961 1962 case VMMR0_DO_GMM_SEED_CHUNK:1963 if (pReqHdr)1964 return VERR_INVALID_PARAMETER;1965 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);1966 break;1967 1968 1962 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE: 1969 1963 if (idCpu == NIL_VMCPUID) -
trunk/src/VBox/VMM/VMMR3/GMM.cpp
r82968 r92248 104 104 GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq) 105 105 { 106 for (unsigned i = 0; ; i++) 106 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr); 107 if (RT_SUCCESS(rc)) 107 108 { 108 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);109 if (RT_SUCCESS(rc))110 {111 109 #ifdef LOG_ENABLED 112 113 114 110 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++) 111 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n", 112 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys)); 115 113 #endif 116 return rc; 117 } 118 if (rc != VERR_GMM_SEED_ME) 119 return VMSetError(pVM, rc, RT_SRC_POS, 120 N_("GMMR0AllocatePages failed to allocate %u pages"), 121 pReq->cPages); 122 Assert(i < pReq->cPages); 123 124 /* 125 * Seed another chunk. 126 */ 127 void *pvChunk; 128 rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk); 129 if (RT_FAILURE(rc)) 130 return VMSetError(pVM, rc, RT_SRC_POS, 131 N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"), 132 pReq->cPages); 133 134 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL); 135 if (RT_FAILURE(rc)) 136 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed")); 114 return rc; 137 115 } 116 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages); 138 117 } 139 118 … … 379 358 380 359 /** 381 * @see GMMR0SeedChunk382 */383 GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3)384 {385 return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL);386 }387 388 389 /**390 360 * @see GMMR0RegisterSharedModule 391 361 */ -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r92186 r92248 251 251 * @section sec_pgmPhys_Definitions Definitions 252 252 * 253 * Allocation chunk - A RTR0MemObjAllocPhysNC o bject and the tracking254 * m achinery associated with it.253 * Allocation chunk - A RTR0MemObjAllocPhysNC or RTR0MemObjAllocPhys allocate 254 * memory object and the tracking machinery associated with it. 255 255 * 256 256 * … … 581 581 * -# Do the read/write according to monitoring flags and everything. 582 582 * -# Leave the critsect. 583 *584 *585 * @section sec_pgmPhys_Fallback Fallback586 *587 * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC588 * API and thus require a fallback.589 *590 * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator591 * will return to the ring-3 caller (and later ring-0) and asking it to seed592 * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will593 * then perform an SUPR3PageAlloc(cbChunk >> PAGE_SHIFT) call and make a594 * "SeededAllocPages" call to ring-0.595 *596 * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable597 * all page sharing (zero page detection will continue). It will also force598 * all allocations to come from the VM which seeded the page. Both these599 * measures are taken to make sure that there will never be any need for600 * mapping anything into ring-3 - everything will be mapped already.601 *602 * Whether we'll continue to use the current MM locked memory management603 * for this I don't quite know (I'd prefer not to and just ditch that all604 * together), we'll see what's simplest to do.605 *606 583 * 607 584 * -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r92218 r92248 5962 5962 int rcSeed = VINF_SUCCESS; 5963 5963 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); 5964 while (rc == VERR_GMM_SEED_ME)5965 {5966 void *pvChunk;5967 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);5968 if (RT_SUCCESS(rc))5969 {5970 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);5971 if (RT_FAILURE(rc))5972 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);5973 }5974 if (RT_SUCCESS(rc))5975 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);5976 }5977 5978 5964 /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */ 5979 5965 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
Note:
See TracChangeset
for help on using the changeset viewer.