VirtualBox

Changeset 92248 in vbox for trunk


Ignore:
Timestamp:
Nov 6, 2021 3:21:57 PM (3 years ago)
Author:
vboxsync
Message:

VMM/GMM: Removed all the legacy mode code (disabled everywhere since r146982). bugref:10093

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r92170 r92248  
    20672067 * @{
    20682068 */
    2069 /** The GMM is out of pages and needs to be give another chunk of user memory that
    2070  * it can lock down and borrow pages from. */
    2071 #define VERR_GMM_SEED_ME                            (-3800)
    20722069/** Unable to allocate more pages from the host system. */
    20732070#define VERR_GMM_OUT_OF_MEMORY                      (-3801)
  • trunk/include/VBox/vmm/gmm.h

    r82989 r92248  
    415415GMMR0DECL(int)  GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
    416416GMMR0DECL(int)  GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
    417 GMMR0DECL(int)  GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3);
    418417GMMR0DECL(int)  GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv);
    419418GMMR0DECL(int)  GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
     
    791790GMMR3DECL(int)  GMMR3FreeLargePage(PVM pVM,  uint32_t idPage);
    792791GMMR3DECL(int)  GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
    793 GMMR3DECL(int)  GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);
    794792GMMR3DECL(int)  GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize);
    795793GMMR3DECL(int)  GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages);
  • trunk/include/VBox/vmm/vmm.h

    r92200 r92248  
    318318    /** Call GMMR0MapUnmapChunk(). */
    319319    VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
    320     /** Call GMMR0SeedChunk(). */
    321     VMMR0_DO_GMM_SEED_CHUNK,
    322320    /** Call GMMR0RegisterSharedModule. */
    323321    VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r92202 r92248  
    192192#if defined(RT_OS_WINDOWS) || defined(RT_OS_DARWIN) || defined(DOXYGEN_RUNNING)
    193193# define VBOX_USE_CRIT_SECT_FOR_GIANT
    194 #endif
    195 
    196 #if defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) && !defined(RT_OS_DARWIN) && 0
    197 /** Enable the legacy mode code (will be dropped soon). */
    198 # define GMM_WITH_LEGACY_MODE
    199194#endif
    200195
     
    465460/** Indicates that the chunk is a large page (2MB). */
    466461#define GMM_CHUNK_FLAGS_LARGE_PAGE  UINT16_C(0x0001)
    467 #ifdef GMM_WITH_LEGACY_MODE
    468 /** Indicates that the chunk was locked rather than allocated directly. */
    469 # define GMM_CHUNK_FLAGS_SEEDED     UINT16_C(0x0002)
    470 #endif
    471462/** @}  */
    472463
     
    579570    uint64_t            cBalloonedPages;
    580571
    581 #ifndef GMM_WITH_LEGACY_MODE
    582 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
     572#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
    583573    /** Whether #RTR0MemObjAllocPhysNC works.   */
    584574    bool                fHasWorkingAllocPhysNC;
    585 # else
     575#else
    586576    bool                fPadding;
    587 # endif
    588 #else
    589     /** The legacy allocation mode indicator.
    590      * This is determined at initialization time. */
    591     bool                fLegacyAllocationMode;
    592577#endif
    593578    /** The bound memory mode indicator.
     
    829814        if (RT_SUCCESS(rc))
    830815        {
    831 #ifndef GMM_WITH_LEGACY_MODE
    832816            /*
    833817             * Figure out how we're going to allocate stuff (only applicable to
     
    835819             */
    836820            pGMM->fBoundMemoryMode = false;
    837 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
     821#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
    838822            pGMM->fHasWorkingAllocPhysNC = false;
    839823
     
    849833                SUPR0Printf("GMMR0Init: Warning! RTR0MemObjAllocPhysNC(, %u, NIL_RTHCPHYS) -> %d!\n", GMM_CHUNK_SIZE, rc);
    850834# endif
    851 #else /* GMM_WITH_LEGACY_MODE */
    852             /*
    853              * Check and see if RTR0MemObjAllocPhysNC works.
    854              */
    855 # if 0 /* later, see @bufref{3170}. */
    856             RTR0MEMOBJ MemObj;
    857             rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
    858             if (RT_SUCCESS(rc))
    859             {
    860                 rc = RTR0MemObjFree(MemObj, true);
    861                 AssertRC(rc);
    862             }
    863             else if (rc == VERR_NOT_SUPPORTED)
    864                 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
    865             else
    866                 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
    867 # else
    868 #  if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
    869             pGMM->fLegacyAllocationMode = false;
    870 #   if ARCH_BITS == 32
    871             /* Don't reuse possibly partial chunks because of the virtual
    872                address space limitation. */
    873             pGMM->fBoundMemoryMode      = true;
    874 #   else
    875             pGMM->fBoundMemoryMode      = false;
    876 #   endif
    877 #  else
    878             pGMM->fLegacyAllocationMode = true;
    879             pGMM->fBoundMemoryMode      = true;
    880 #  endif
    881 # endif
    882 #endif /* GMM_WITH_LEGACY_MODE */
    883835
    884836            /*
     
    894846
    895847            g_pGMM = pGMM;
    896 #ifdef GMM_WITH_LEGACY_MODE
    897             LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
    898 #elif defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     848#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
    899849            LogFlow(("GMMInit: pGMM=%p fBoundMemoryMode=%RTbool fHasWorkingAllocPhysNC=%RTbool\n", pGMM, pGMM->fBoundMemoryMode, pGMM->fHasWorkingAllocPhysNC));
    900850#else
     
    22132163 * Registers a new chunk of memory.
    22142164 *
    2215  * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk.
     2165 * This is called by gmmR0AllocateOneChunk.
    22162166 *
    22172167 * @returns VBox status code.  On success, the giant GMM lock will be held, the
     
    22352185    Assert(pGMM->hMtxOwner != RTThreadNativeSelf());
    22362186    Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
    2237 #ifdef GMM_WITH_LEGACY_MODE
    2238     Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE || fChunkFlags == GMM_CHUNK_FLAGS_SEEDED);
    2239 #else
    22402187    Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE);
    2241 #endif
    22422188
    22432189#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
     
    22452191     * Get a ring-0 mapping of the object.
    22462192     */
    2247 # ifdef GMM_WITH_LEGACY_MODE
    2248     uint8_t *pbMapping = !(fChunkFlags & GMM_CHUNK_FLAGS_SEEDED) ? (uint8_t *)RTR0MemObjAddress(hMemObj) : NULL;
    2249 # else
    22502193    uint8_t *pbMapping = (uint8_t *)RTR0MemObjAddress(hMemObj);
    2251 # endif
    22522194    if (!pbMapping)
    22532195    {
     
    23602302
    23612303    RTR0MEMOBJ hMemObj;
    2362 #ifndef GMM_WITH_LEGACY_MODE
    23632304    int rc;
    2364 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
     2305#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
    23652306    if (pGMM->fHasWorkingAllocPhysNC)
    23662307        rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
    23672308    else
    2368 # endif
     2309#endif
    23692310        rc = RTR0MemObjAllocPage(&hMemObj, GMM_CHUNK_SIZE, false /*fExecutable*/);
    2370 #else
    2371     int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
    2372 #endif
    23732311    if (RT_SUCCESS(rc))
    23742312    {
     
    26562594 * @returns VBox status code:
    26572595 * @retval  VINF_SUCCESS on success.
    2658  * @retval  VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
    2659  *          gmmR0AllocateMoreChunks is necessary.
    26602596 * @retval  VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
    26612597 * @retval  VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
     
    27212657    }
    27222658
    2723 #ifdef GMM_WITH_LEGACY_MODE
    2724     /*
    2725      * If we're in legacy memory mode, it's easy to figure if we have
    2726      * sufficient number of pages up-front.
    2727      */
    2728     if (   pGMM->fLegacyAllocationMode
    2729         && pGVM->gmm.s.Private.cFreePages < cPages)
    2730     {
    2731         Assert(pGMM->fBoundMemoryMode);
    2732         return VERR_GMM_SEED_ME;
    2733     }
    2734 #endif
    2735 
    27362659    /*
    27372660     * Update the accounts before we proceed because we might be leaving the
     
    27482671    pGVM->gmm.s.Stats.cPrivatePages += cPages;
    27492672    pGMM->cAllocatedPages           += cPages;
    2750 
    2751 #ifdef GMM_WITH_LEGACY_MODE
    2752     /*
    2753      * Part two of it's-easy-in-legacy-memory-mode.
    2754      */
    2755     if (pGMM->fLegacyAllocationMode)
    2756     {
    2757         uint32_t iPage = gmmR0AllocatePagesInBoundMode(pGVM, 0, cPages, paPages);
    2758         AssertReleaseReturn(iPage == cPages, VERR_GMM_ALLOC_PAGES_IPE);
    2759         return VINF_SUCCESS;
    2760     }
    2761 #endif
    27622673
    27632674    /*
     
    29032814 * @retval  VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
    29042815 *          owned by the VM.
    2905  * @retval  VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
    29062816 * @retval  VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
    29072817 * @retval  VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
     
    31003010 * @retval  VINF_SUCCESS on success.
    31013011 * @retval  VERR_NOT_OWNER if the caller is not an EMT.
    3102  * @retval  VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
    31033012 * @retval  VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
    31043013 * @retval  VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
     
    32273136    if (RT_FAILURE(rc))
    32283137        return rc;
    3229 
    3230 #ifdef GMM_WITH_LEGACY_MODE
    3231     // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
    3232     // if (pGMM->fLegacyAllocationMode)
    3233     //     return VERR_NOT_SUPPORTED;
    3234 #endif
    32353138
    32363139    *pHCPhys = NIL_RTHCPHYS;
     
    33313234        return rc;
    33323235
    3333 #ifdef GMM_WITH_LEGACY_MODE
    3334     // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
    3335     // if (pGMM->fLegacyAllocationMode)
    3336     //     return VERR_NOT_SUPPORTED;
    3337 #endif
    3338 
    33393236    gmmR0MutexAcquire(pGMM);
    33403237    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
     
    34783375     * This shouldn't happen, so screw lock contention...
    34793376     */
    3480     if (    pChunk->cMappingsX
    3481 #ifdef GMM_WITH_LEGACY_MODE
    3482         &&  (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))
    3483 #endif
    3484         &&  pGVM)
     3377    if (pChunk->cMappingsX && pGVM)
    34853378        gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk);
    34863379
     
    36223515                  || pChunk->pFreePrev == NULL /** @todo this is probably misfiring, see reset... */))
    36233516    { /* likely */ }
    3624 #ifdef GMM_WITH_LEGACY_MODE
    3625     else if (RT_LIKELY(pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)))
    3626     { /* likely */ }
    3627 #endif
    36283517    else
    36293518        gmmR0FreeChunk(pGMM, NULL, pChunk, false);
     
    41504039{
    41514040    RT_NOREF_PV(pGMM);
    4152 #ifdef GMM_WITH_LEGACY_MODE
    4153     Assert(!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE));
    4154 #endif
    41554041
    41564042    /*
     
    41984084static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem)
    41994085{
    4200 #ifdef GMM_WITH_LEGACY_MODE
    4201     if (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))
    4202     {
    4203 #endif
    4204         /*
    4205          * Lock the chunk and if possible leave the giant GMM lock.
    4206          */
    4207         GMMR0CHUNKMTXSTATE MtxState;
    4208         int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk,
    4209                                         fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT);
    4210         if (RT_SUCCESS(rc))
    4211         {
    4212             rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk);
    4213             gmmR0ChunkMutexRelease(&MtxState, pChunk);
    4214         }
    4215         return rc;
    4216 #ifdef GMM_WITH_LEGACY_MODE
    4217     }
    4218 
    4219     if (pChunk->hGVM == pGVM->hSelf)
    4220         return VINF_SUCCESS;
    4221 
    4222     Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x (legacy)\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
    4223     return VERR_GMM_CHUNK_NOT_MAPPED;
    4224 #endif
     4086    /*
     4087     * Lock the chunk and if possible leave the giant GMM lock.
     4088     */
     4089    GMMR0CHUNKMTXSTATE MtxState;
     4090    int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk,
     4091                                    fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT);
     4092    if (RT_SUCCESS(rc))
     4093    {
     4094        rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk);
     4095        gmmR0ChunkMutexRelease(&MtxState, pChunk);
     4096    }
     4097    return rc;
    42254098}
    42264099
     
    42394112static int gmmR0MapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
    42404113{
    4241 #ifdef GMM_WITH_LEGACY_MODE
    4242     /*
    4243      * If we're in legacy mode this is simple.
    4244      */
    4245     if (pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))
    4246     {
    4247         if (pChunk->hGVM != pGVM->hSelf)
    4248         {
    4249             Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
    4250             return VERR_GMM_CHUNK_NOT_FOUND;
    4251         }
    4252 
    4253         *ppvR3 = RTR0MemObjAddressR3(pChunk->hMemObj);
    4254         return VINF_SUCCESS;
    4255     }
    4256 #else
    42574114    RT_NOREF(pGMM);
    4258 #endif
    42594115
    42604116    /*
     
    44974353
    44984354    return GMMR0MapUnmapChunk(pGVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
    4499 }
    4500 
    4501 
    4502 /**
    4503  * Legacy mode API for supplying pages.
    4504  *
    4505  * The specified user address points to a allocation chunk sized block that
    4506  * will be locked down and used by the GMM when the GM asks for pages.
    4507  *
    4508  * @returns VBox status code.
    4509  * @param   pGVM        The global (ring-0) VM structure.
    4510  * @param   idCpu       The VCPU id.
    4511  * @param   pvR3        Pointer to the chunk size memory block to lock down.
    4512  */
    4513 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3)
    4514 {
    4515 #ifdef GMM_WITH_LEGACY_MODE
    4516     /*
    4517      * Validate input and get the basics.
    4518      */
    4519     PGMM pGMM;
    4520     GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4521     int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    4522     if (RT_FAILURE(rc))
    4523         return rc;
    4524 
    4525     AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
    4526     AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);
    4527 
    4528     if (!pGMM->fLegacyAllocationMode)
    4529     {
    4530         Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));
    4531         return VERR_NOT_SUPPORTED;
    4532     }
    4533 
    4534     /*
    4535      * Lock the memory and add it as new chunk with our hGVM.
    4536      * (The GMM locking is done inside gmmR0RegisterChunk.)
    4537      */
    4538     RTR0MEMOBJ hMemObj;
    4539     rc = RTR0MemObjLockUser(&hMemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
    4540     if (RT_SUCCESS(rc))
    4541     {
    4542         rc = gmmR0RegisterChunk(pGMM, &pGVM->gmm.s.Private, hMemObj, pGVM->hSelf, pGVM->pSession, GMM_CHUNK_FLAGS_SEEDED, NULL);
    4543         if (RT_SUCCESS(rc))
    4544             gmmR0MutexRelease(pGMM);
    4545         else
    4546             RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
    4547     }
    4548 
    4549     LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));
    4550     return rc;
    4551 #else
    4552     RT_NOREF(pGVM, idCpu, pvR3);
    4553     return VERR_NOT_SUPPORTED;
    4554 #endif
    45554355}
    45564356
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r92157 r92248  
    177177        pGVM->pgm.s.cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages);
    178178    }
    179     else if (rc != VERR_GMM_SEED_ME)
     179    else
    180180    {
    181181        if (    (   rc == VERR_GMM_HIT_GLOBAL_LIMIT
     
    232232        }
    233233
    234         if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
     234        if (RT_FAILURE(rc))
    235235        {
    236236            LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
     
    238238        }
    239239    }
    240 
    241240
    242241    LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r92200 r92248  
    19601960            break;
    19611961
    1962         case VMMR0_DO_GMM_SEED_CHUNK:
    1963             if (pReqHdr)
    1964                 return VERR_INVALID_PARAMETER;
    1965             rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
    1966             break;
    1967 
    19681962        case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
    19691963            if (idCpu == NIL_VMCPUID)
  • trunk/src/VBox/VMM/VMMR3/GMM.cpp

    r82968 r92248  
    104104GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
    105105{
    106     for (unsigned i = 0; ; i++)
     106    int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
     107    if (RT_SUCCESS(rc))
    107108    {
    108         int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
    109         if (RT_SUCCESS(rc))
    110         {
    111109#ifdef LOG_ENABLED
    112             for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
    113                 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
    114                       pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
     110        for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
     111            Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
     112                  pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
    115113#endif
    116             return rc;
    117         }
    118         if (rc != VERR_GMM_SEED_ME)
    119             return VMSetError(pVM, rc, RT_SRC_POS,
    120                               N_("GMMR0AllocatePages failed to allocate %u pages"),
    121                               pReq->cPages);
    122         Assert(i < pReq->cPages);
    123 
    124         /*
    125          * Seed another chunk.
    126          */
    127         void *pvChunk;
    128         rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
    129         if (RT_FAILURE(rc))
    130             return VMSetError(pVM, rc, RT_SRC_POS,
    131                               N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"),
    132                               pReq->cPages);
    133 
    134         rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
    135         if (RT_FAILURE(rc))
    136             return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed"));
     114        return rc;
    137115    }
     116    return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages);
    138117}
    139118
     
    379358
    380359/**
    381  * @see GMMR0SeedChunk
    382  */
    383 GMMR3DECL(int)  GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3)
    384 {
    385     return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL);
    386 }
    387 
    388 
    389 /**
    390360 * @see GMMR0RegisterSharedModule
    391361 */
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r92186 r92248  
    251251 * @section sec_pgmPhys_Definitions       Definitions
    252252 *
    253  * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
    254  * machinery associated with it.
     253 * Allocation chunk - A RTR0MemObjAllocPhysNC or RTR0MemObjAllocPhys allocate
     254 * memory object and the tracking machinery associated with it.
    255255 *
    256256 *
     
    581581 *      -# Do the read/write according to monitoring flags and everything.
    582582 *      -# Leave the critsect.
    583  *
    584  *
    585  * @section sec_pgmPhys_Fallback            Fallback
    586  *
    587  * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC
    588  * API and thus require a fallback.
    589  *
    590  * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator
    591  * will return to the ring-3 caller (and later ring-0) and asking it to seed
    592  * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will
    593  * then perform an SUPR3PageAlloc(cbChunk >> PAGE_SHIFT) call and make a
    594  * "SeededAllocPages" call to ring-0.
    595  *
    596  * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable
    597  * all page sharing (zero page detection will continue). It will also force
    598  * all allocations to come from the VM which seeded the page. Both these
    599  * measures are taken to make sure that there will never be any need for
    600  * mapping anything into ring-3 - everything will be mapped already.
    601  *
    602  * Whether we'll continue to use the current MM locked memory management
    603  * for this I don't quite know (I'd prefer not to and just ditch that all
    604  * together), we'll see what's simplest to do.
    605  *
    606583 *
    607584 *
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r92218 r92248  
    59625962    int rcSeed  = VINF_SUCCESS;
    59635963    int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
    5964     while (rc == VERR_GMM_SEED_ME)
    5965     {
    5966         void *pvChunk;
    5967         rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
    5968         if (RT_SUCCESS(rc))
    5969         {
    5970             rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
    5971             if (RT_FAILURE(rc))
    5972                 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
    5973         }
    5974         if (RT_SUCCESS(rc))
    5975             rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
    5976     }
    5977 
    59785964    /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
    59795965    if (    rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette