VirtualBox

Changeset 37251 in vbox


Ignore:
Timestamp:
May 30, 2011 10:54:45 AM (14 years ago)
Author:
vboxsync
Message:

GMMR0: Removed unused code and moved some functions around.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r37250 r37251  
    588588
    589589
    590 /**
    591  * Page allocation strategy sketches.
    592  */
    593 typedef struct GMMR0ALLOCPAGESTRATEGY
    594 {
    595     uint32_t cTries;
    596 #if 0
    597     typedef enum GMMR0ALLOCPAGESTRATEGY
    598     {
    599         kGMMR0AllocPageStrategy_Invalid = 0,
    600         kGMMR0AllocPageStrategy_VM,
    601         kGMMR0AllocPageStrategy_NumaNode,
    602         kGMMR0AllocPageStrategy_AnythingGoes,
    603         kGMMR0AllocPageStrategy_End
    604     } GMMR0ALLOCPAGESTRATEGY;
    605 #endif
    606 } GMMR0ALLOCPAGESTRATEGY;
    607 /** Pointer to a page allocation strategy structure. */
    608 typedef GMMR0ALLOCPAGESTRATEGY *PGMMR0ALLOCPAGESTRATEGY;
    609 
    610 
    611590/*******************************************************************************
    612591*   Global Variables                                                           *
     
    18771856
    18781857
    1879 
    18801858/**
    18811859 * Frees a Chunk ID.
     
    19371915
    19381916    return pGMM->idChunkPrev = idChunk;
     1917}
     1918
     1919
     1920/**
     1921 * Allocates one private page.
     1922 *
     1923 * Worker for gmmR0AllocatePages.
     1924 *
     1925 * @param   pGMM        Pointer to the GMM instance data.
     1926 * @param   hGVM        The GVM handle of the VM requesting memory.
     1927 * @param   pChunk      The chunk to allocate it from.
     1928 * @param   pPageDesc   The page descriptor.
     1929 */
     1930static void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
     1931{
     1932    /* update the chunk stats. */
     1933    if (pChunk->hGVM == NIL_GVM_HANDLE)
     1934        pChunk->hGVM = hGVM;
     1935    Assert(pChunk->cFree);
     1936    pChunk->cFree--;
     1937    pChunk->cPrivate++;
     1938
     1939    /* unlink the first free page. */
     1940    const uint32_t iPage = pChunk->iFreeHead;
     1941    AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
     1942    PGMMPAGE pPage = &pChunk->aPages[iPage];
     1943    Assert(GMM_PAGE_IS_FREE(pPage));
     1944    pChunk->iFreeHead = pPage->Free.iNext;
     1945    Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
     1946          pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
     1947          pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
     1948
     1949    /* make the page private. */
     1950    pPage->u = 0;
     1951    AssertCompile(GMM_PAGE_STATE_PRIVATE == 0);
     1952    pPage->Private.hGVM = hGVM;
     1953    AssertCompile(NIL_RTHCPHYS >= GMM_GCPHYS_LAST);
     1954    AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
     1955    if (pPageDesc->HCPhysGCPhys <= GMM_GCPHYS_LAST)
     1956        pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
     1957    else
     1958        pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
     1959
     1960    /* update the page descriptor. */
     1961    pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, iPage);
     1962    Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
     1963    pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
     1964    pPageDesc->idSharedPage = NIL_GMM_PAGEID;
     1965}
     1966
     1967
     1968/**
     1969 * Picks the free pages from a chunk.
     1970 *
     1971 * @returns The new page descriptor table index.
     1972 * @param   pGMM                Pointer to the GMM instance data.
     1973 * @param   hGVM                The VM handle.
     1974 * @param   pChunk              The chunk.
     1975 * @param   iPage               The current page descriptor table index.
     1976 * @param   cPages              The total number of pages to allocate.
     1977 * @param   paPages             The page descriptor table (input + ouput).
     1978 */
     1979static uint32_t gmmR0AllocatePagesFromChunk(PGMM pGMM, uint16_t const hGVM, PGMMCHUNK pChunk, uint32_t iPage, uint32_t cPages,
     1980                                            PGMMPAGEDESC paPages)
     1981{
     1982    PGMMCHUNKFREESET pSet = pChunk->pSet; Assert(pSet);
     1983    gmmR0UnlinkChunk(pChunk);
     1984
     1985    for (; pChunk->cFree && iPage < cPages; iPage++)
     1986        gmmR0AllocatePage(pGMM, hGVM, pChunk, &paPages[iPage]);
     1987
     1988    gmmR0LinkChunk(pChunk, pSet);
     1989    return iPage;
    19391990}
    19401991
     
    20262077        rc = VERR_NO_MEMORY;
    20272078    return rc;
    2028 }
    2029 
    2030 
    2031 /**
    2032  * Allocate one new chunk and add it to the specified free set.
    2033  *
    2034  * @returns VBox status code.
    2035  * @param   pGMM            Pointer to the GMM instance.
    2036  * @param   pSet            Pointer to the set.
    2037  * @param   hGVM            The affinity of the new chunk.
    2038  *
    2039  * @remarks The giant mutex will be temporarily abandond during the allocation.
    2040  */
    2041 static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM)
    2042 {
    2043     /*
    2044      * Allocate the memory.
    2045      *
    2046      * Note! We leave the giant GMM lock temporarily as the allocation might
    2047      *       take a long time. gmmR0RegisterChunk reacquires it (ugly).
    2048      */
    2049     gmmR0MutexRelease(pGMM);
    2050 
    2051     RTR0MEMOBJ hMemObj;
    2052     int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
    2053     if (RT_SUCCESS(rc))
    2054     {
    2055         rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, hGVM, 0 /*fChunkFlags*/, NULL);
    2056         if (RT_SUCCESS(rc))
    2057             return rc;
    2058 
    2059         RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
    2060     }
    2061 
    2062     int rc2 = gmmR0MutexAcquire(pGMM);
    2063     AssertRCReturn(rc2, RT_FAILURE(rc) ? rc : rc2);
    2064     return rc;
    2065 }
    2066 
    2067 
    2068 /**
    2069  * Attempts to allocate more pages until the requested amount is met.
    2070  *
    2071  * @returns VBox status code.
    2072  * @param   pGMM        Pointer to the GMM instance data.
    2073  * @param   pGVM        The calling VM.
    2074  * @param   pSet        Pointer to the free set to grow.
    2075  * @param   cPages      The number of pages needed.
    2076  * @param   pStrategy   Pointer to the allocation strategy data.  This is input
    2077  *                      and output.
    2078  *
    2079  * @remarks Called owning the mutex, but will leave it temporarily while
    2080  *          allocating the memory!
    2081  */
    2082 static int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages,
    2083                                    PGMMR0ALLOCPAGESTRATEGY pStrategy)
    2084 {
    2085     Assert(!pGMM->fLegacyAllocationMode);
    2086 
    2087     if (!GMM_CHECK_SANITY_IN_LOOPS(pGMM))
    2088         return VERR_INTERNAL_ERROR_4;
    2089 
    2090     if (!pGMM->fBoundMemoryMode)
    2091     {
    2092         /*
    2093          * Try steal free chunks from the other set first. (Only take 100% free chunks.)
    2094          */
    2095         PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->PrivateX ? &pGMM->Shared : &pGMM->PrivateX;
    2096         while (     pSet->cFreePages < cPages
    2097                &&   pOtherSet->cFreePages >= GMM_CHUNK_NUM_PAGES)
    2098         {
    2099             PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_UNUSED_LIST];
    2100             if (!pChunk)
    2101                 break;
    2102             Assert(pChunk->cFree != GMM_CHUNK_NUM_PAGES);
    2103 
    2104             gmmR0UnlinkChunk(pChunk);
    2105             gmmR0LinkChunk(pChunk, pSet);
    2106         }
    2107 
    2108         /*
    2109          * If we need still more pages, allocate new chunks.
    2110          * Note! We will leave the mutex while doing the allocation,
    2111          */
    2112         while (pSet->cFreePages < cPages)
    2113         {
    2114             int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf);
    2115             if (RT_FAILURE(rc))
    2116                 return rc;
    2117             if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    2118                 return VERR_INTERNAL_ERROR_5;
    2119         }
    2120     }
    2121     else
    2122     {
    2123         /*
    2124          * The memory is bound to the VM allocating it, so we have to count
    2125          * the free pages carefully as well as making sure we brand them with
    2126          * our VM handle.
    2127          *
    2128          * Note! We will leave the mutex while doing the allocation,
    2129          */
    2130         uint16_t const hGVM = pGVM->hSelf;
    2131         for (;;)
    2132         {
    2133             /* Count and see if we've reached the goal. */
    2134             uint32_t cPagesFound = 0;
    2135             for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
    2136                 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
    2137                     if (pCur->hGVM == hGVM)
    2138                     {
    2139                         cPagesFound += pCur->cFree;
    2140                         if (cPagesFound >= cPages)
    2141                             break;
    2142                     }
    2143             if (cPagesFound >= cPages)
    2144                 break;
    2145 
    2146             /* Allocate more. */
    2147             int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM);
    2148             if (RT_FAILURE(rc))
    2149                 return rc;
    2150             if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    2151                 return VERR_INTERNAL_ERROR_5;
    2152         }
    2153     }
    2154 
    2155     return VINF_SUCCESS;
    2156 }
    2157 
    2158 
    2159 /**
    2160  * Allocates one private page.
    2161  *
    2162  * Worker for gmmR0AllocatePages.
    2163  *
    2164  * @param   pGMM        Pointer to the GMM instance data.
    2165  * @param   hGVM        The GVM handle of the VM requesting memory.
    2166  * @param   pChunk      The chunk to allocate it from.
    2167  * @param   pPageDesc   The page descriptor.
    2168  */
    2169 static void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
    2170 {
    2171     /* update the chunk stats. */
    2172     if (pChunk->hGVM == NIL_GVM_HANDLE)
    2173         pChunk->hGVM = hGVM;
    2174     Assert(pChunk->cFree);
    2175     pChunk->cFree--;
    2176     pChunk->cPrivate++;
    2177 
    2178     /* unlink the first free page. */
    2179     const uint32_t iPage = pChunk->iFreeHead;
    2180     AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
    2181     PGMMPAGE pPage = &pChunk->aPages[iPage];
    2182     Assert(GMM_PAGE_IS_FREE(pPage));
    2183     pChunk->iFreeHead = pPage->Free.iNext;
    2184     Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
    2185           pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
    2186           pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
    2187 
    2188     /* make the page private. */
    2189     pPage->u = 0;
    2190     AssertCompile(GMM_PAGE_STATE_PRIVATE == 0);
    2191     pPage->Private.hGVM = hGVM;
    2192     AssertCompile(NIL_RTHCPHYS >= GMM_GCPHYS_LAST);
    2193     AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
    2194     if (pPageDesc->HCPhysGCPhys <= GMM_GCPHYS_LAST)
    2195         pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
    2196     else
    2197         pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
    2198 
    2199     /* update the page descriptor. */
    2200     pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, iPage);
    2201     Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
    2202     pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
    2203     pPageDesc->idSharedPage = NIL_GMM_PAGEID;
    2204 }
    2205 
    2206 #if 0 /* the old allocator */
    2207 
    2208 /**
    2209  * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
    2210  *
    2211  * @returns VBox status code:
    2212  * @retval  VINF_SUCCESS on success.
    2213  * @retval  VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
    2214  *          gmmR0AllocateMoreChunks is necessary.
    2215  * @retval  VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
    2216  * @retval  VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
    2217  *          that is we're trying to allocate more than we've reserved.
    2218  *
    2219  * @param   pGMM                Pointer to the GMM instance data.
    2220  * @param   pGVM                Pointer to the shared VM structure.
    2221  * @param   cPages              The number of pages to allocate.
    2222  * @param   paPages             Pointer to the page descriptors.
    2223  *                              See GMMPAGEDESC for details on what is expected on input.
    2224  * @param   enmAccount          The account to charge.
    2225  * @param   pStrategy           Pointer to the allocation strategy data.  This
    2226  *                              is input and output.
    2227  */
    2228 static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount,
    2229                               PGMMR0ALLOCPAGESTRATEGY pStrategy)
    2230 {
    2231     /*
    2232      * Check allocation limits.
    2233      */
    2234     if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
    2235         return VERR_GMM_HIT_GLOBAL_LIMIT;
    2236 
    2237     switch (enmAccount)
    2238     {
    2239         case GMMACCOUNT_BASE:
    2240             if (RT_UNLIKELY(  pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages
    2241                             > pGVM->gmm.s.Reserved.cBasePages))
    2242             {
    2243                 Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
    2244                      pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
    2245                 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
    2246             }
    2247             break;
    2248         case GMMACCOUNT_SHADOW:
    2249             if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
    2250             {
    2251                 Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
    2252                      pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
    2253                 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
    2254             }
    2255             break;
    2256         case GMMACCOUNT_FIXED:
    2257             if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
    2258             {
    2259                 Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
    2260                      pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
    2261                 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
    2262             }
    2263             break;
    2264         default:
    2265             AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
    2266     }
    2267 
    2268     /*
    2269      * Check if we need to allocate more memory or not.  In bound memory mode this
    2270      * is a bit extra work but it's easier to do it upfront than bailing out later.
    2271      */
    2272     PGMMCHUNKFREESET pSet = pGMM->fBoundMemoryMode ? &pGVM->gmm.s.Private : &pGMM->PrivateX;
    2273     if (pSet->cFreePages < cPages)
    2274         return VERR_GMM_SEED_ME;
    2275 
    2276 /** @todo Rewrite this to use the page array for storing chunk IDs and other
    2277  *        state info needed to avoid the multipass sillyness. */
    2278     if (pGMM->fBoundMemoryMode)
    2279     {
    2280         uint16_t hGVM = pGVM->hSelf;
    2281         uint32_t cPagesFound = 0;
    2282         for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
    2283             for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
    2284                 if (pCur->hGVM == hGVM)
    2285                 {
    2286                     cPagesFound += pCur->cFree;
    2287                     if (cPagesFound >= cPages)
    2288                         break;
    2289                 }
    2290         if (cPagesFound < cPages)
    2291             return VERR_GMM_SEED_ME;
    2292     }
    2293 
    2294     /*
    2295      * Pick the pages.
    2296      * Try make some effort keeping VMs sharing private chunks.
    2297      */
    2298     uint16_t hGVM = pGVM->hSelf;
    2299     uint32_t iPage = 0;
    2300 
    2301     /* first round, pick from chunks with an affinity to the VM. */
    2302     for (unsigned i = 0; i < GMM_CHUNK_FREE_SET_UNUSED_LIST && iPage < cPages; i++)
    2303     {
    2304         PGMMCHUNK pCurFree = NULL;
    2305         PGMMCHUNK pCur = pSet->apLists[i];
    2306         while (pCur && iPage < cPages)
    2307         {
    2308             PGMMCHUNK pNext = pCur->pFreeNext;
    2309 
    2310             if (    pCur->hGVM == hGVM
    2311                 &&  pCur->cFree < GMM_CHUNK_NUM_PAGES)
    2312             {
    2313                 gmmR0UnlinkChunk(pCur);
    2314                 for (; pCur->cFree && iPage < cPages; iPage++)
    2315                     gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
    2316                 gmmR0LinkChunk(pCur, pSet);
    2317             }
    2318 
    2319             pCur = pNext;
    2320         }
    2321     }
    2322 
    2323     if (iPage < cPages)
    2324     {
    2325         /* second round, pick pages from the 100% empty chunks we just skipped above. */
    2326         PGMMCHUNK pCurFree = NULL;
    2327         PGMMCHUNK pCur = pSet->apLists[GMM_CHUNK_FREE_SET_UNUSED_LIST];
    2328         while (pCur && iPage < cPages)
    2329         {
    2330             PGMMCHUNK pNext = pCur->pFreeNext;
    2331             Assert(pCur->cFree == GMM_CHUNK_NUM_PAGES);
    2332 
    2333             if (   pCur->hGVM == hGVM
    2334                 || !pGMM->fBoundMemoryMode)
    2335             {
    2336                 gmmR0UnlinkChunk(pCur);
    2337                 for (; pCur->cFree && iPage < cPages; iPage++)
    2338                     gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
    2339                 gmmR0LinkChunk(pCur, pSet);
    2340             }
    2341 
    2342             pCur = pNext;
    2343         }
    2344     }
    2345 
    2346     if (    iPage < cPages
    2347         &&  !pGMM->fBoundMemoryMode)
    2348     {
    2349         /* third round, disregard affinity. */
    2350         unsigned i = RT_ELEMENTS(pSet->apLists);
    2351         while (i-- > 0 && iPage < cPages)
    2352         {
    2353             PGMMCHUNK pCurFree = NULL;
    2354             PGMMCHUNK pCur = pSet->apLists[i];
    2355             while (pCur && iPage < cPages)
    2356             {
    2357                 PGMMCHUNK pNext = pCur->pFreeNext;
    2358 
    2359                 if (    pCur->cFree >  GMM_CHUNK_NUM_PAGES / 2
    2360                     &&  cPages      >= GMM_CHUNK_NUM_PAGES / 2)
    2361                     pCur->hGVM = hGVM; /* change chunk affinity */
    2362 
    2363                 gmmR0UnlinkChunk(pCur);
    2364                 for (; pCur->cFree && iPage < cPages; iPage++)
    2365                     gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
    2366                 gmmR0LinkChunk(pCur, pSet);
    2367 
    2368                 pCur = pNext;
    2369             }
    2370         }
    2371     }
    2372 
    2373     /*
    2374      * Update the account.
    2375      */
    2376     switch (enmAccount)
    2377     {
    2378         case GMMACCOUNT_BASE:   pGVM->gmm.s.Allocated.cBasePages   += iPage; break;
    2379         case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
    2380         case GMMACCOUNT_FIXED:  pGVM->gmm.s.Allocated.cFixedPages  += iPage; break;
    2381         default:
    2382             AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
    2383     }
    2384     pGVM->gmm.s.cPrivatePages += iPage;
    2385     pGMM->cAllocatedPages     += iPage;
    2386 
    2387     AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
    2388 
    2389     /*
    2390      * Check if we've reached some threshold and should kick one or two VMs and tell
    2391      * them to inflate their balloons a bit more... later.
    2392      */
    2393 
    2394     return VINF_SUCCESS;
    2395 }
    2396 
    2397 
    2398 /**
    2399  * Determins the initial page allocation strategy and initializes the data
    2400  * structure.
    2401  *
    2402  * @param   pGMM                Pointer to the GMM instance data.
    2403  * @param   pGVM                Pointer to the shared VM structure.
    2404  * @param   pStrategy           The data structure to initialize.
    2405  */
    2406 static void gmmR0AllocatePagesInitStrategy(PGMM pGMM, PGVM pGVM, PGMMR0ALLOCPAGESTRATEGY pStrategy)
    2407 {
    2408     pStrategy->cTries = 0;
    2409 }
    2410 
    2411 #endif /* old allocator */
    2412 
    2413 
    2414 /**
    2415  * Picks the free pages from a chunk.
    2416  *
    2417  * @returns The new page descriptor table index.
    2418  * @param   pGMM                Pointer to the GMM instance data.
    2419  * @param   hGVM                The VM handle.
    2420  * @param   pChunk              The chunk.
    2421  * @param   iPage               The current page descriptor table index.
    2422  * @param   cPages              The total number of pages to allocate.
    2423  * @param   paPages             The page descriptor table (input + ouput).
    2424  */
    2425 static uint32_t gmmR0AllocatePagesFromChunk(PGMM pGMM, uint16_t const hGVM, PGMMCHUNK pChunk, uint32_t iPage, uint32_t cPages,
    2426                                             PGMMPAGEDESC paPages)
    2427 {
    2428     PGMMCHUNKFREESET pSet = pChunk->pSet; Assert(pSet);
    2429     gmmR0UnlinkChunk(pChunk);
    2430 
    2431     for (; pChunk->cFree && iPage < cPages; iPage++)
    2432         gmmR0AllocatePage(pGMM, hGVM, pChunk, &paPages[iPage]);
    2433 
    2434     gmmR0LinkChunk(pChunk, pSet);
    2435     return iPage;
    24362079}
    24372080
     
    30852728             * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
    30862729             */
    3087 #if 1
    30882730            rc = gmmR0AllocatePagesNew(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE);
    3089 #else
    3090             GMMR0ALLOCPAGESTRATEGY Strategy;
    3091             gmmR0AllocatePagesInitStrategy(pGMM, pGVM, &Strategy);
    3092             while (RT_SUCCESS(rc))
    3093             {
    3094                 rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE, &Strategy);
    3095                 if (    rc != VERR_GMM_SEED_ME
    3096                     ||  pGMM->fLegacyAllocationMode)
    3097                     break;
    3098                 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->PrivateX, cPagesToAlloc, &Strategy);
    3099             }
    3100 #endif
    31012731        }
    31022732        else
     
    31742804                      &&  pGVM->gmm.s.Reserved.cFixedPages
    31752805                      &&  pGVM->gmm.s.Reserved.cShadowPages))
    3176         {
    3177 #if 1
    31782806            rc = gmmR0AllocatePagesNew(pGMM, pGVM, cPages, paPages, enmAccount);
    3179 #else
    3180             /*
    3181              * gmmR0AllocatePages seed loop.
    3182              * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
    3183              */
    3184             GMMR0ALLOCPAGESTRATEGY Strategy;
    3185             gmmR0AllocatePagesInitStrategy(pGMM, pGVM, &Strategy);
    3186             while (RT_SUCCESS(rc))
    3187             {
    3188                 rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount, &Strategy);
    3189                 if (    rc != VERR_GMM_SEED_ME
    3190                     ||  pGMM->fLegacyAllocationMode)
    3191                     break;
    3192                 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->PrivateX, cPages, &Strategy);
    3193             }
    3194 #endif
    3195         }
    31962807        else
    31972808            rc = VERR_WRONG_ORDER;
     
    36153226}
    36163227
    3617 #ifdef VBOX_WITH_PAGE_SHARING  /** @todo move this away from here, this has nothing to do with the free() code. */
    3618 
    3619 /**
    3620  * Converts a private page to a shared page, the page is known to exist and be valid and such.
    3621  *
    3622  * @param   pGMM        Pointer to the GMM instance.
    3623  * @param   pGVM        Pointer to the GVM instance.
    3624  * @param   HCPhys      Host physical address
    3625  * @param   idPage      The Page ID
    3626  * @param   pPage       The page structure.
    3627  */
    3628 DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
    3629 {
    3630     PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
    3631     Assert(pChunk);
    3632     Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
    3633     Assert(GMM_PAGE_IS_PRIVATE(pPage));
    3634 
    3635     pChunk->cPrivate--;
    3636     pChunk->cShared++;
    3637 
    3638     pGMM->cSharedPages++;
    3639 
    3640     pGVM->gmm.s.cSharedPages++;
    3641     pGVM->gmm.s.cPrivatePages--;
    3642 
    3643     /* Modify the page structure. */
    3644     pPage->Shared.pfn     = (uint32_t)(uint64_t)(HCPhys >> PAGE_SHIFT);
    3645     pPage->Shared.cRefs   = 1;
    3646     pPage->Common.u2State = GMM_PAGE_STATE_SHARED;
    3647 }
    3648 
    3649 
    3650 /**
    3651  * Increase the use count of a shared page, the page is known to exist and be valid and such.
    3652  *
    3653  * @param   pGMM        Pointer to the GMM instance.
    3654  * @param   pGVM        Pointer to the GVM instance.
    3655  * @param   pPage       The page structure.
    3656  */
    3657 DECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
    3658 {
    3659     Assert(pGMM->cSharedPages > 0);
    3660     Assert(pGMM->cAllocatedPages > 0);
    3661 
    3662     pGMM->cDuplicatePages++;
    3663 
    3664     pPage->Shared.cRefs++;
    3665     pGVM->gmm.s.cSharedPages++;
    3666     pGVM->gmm.s.Allocated.cBasePages++;
    3667 }
    3668 
    3669 #endif /* VBOX_WITH_PAGE_SHARING */
    36703228
    36713229/**
     
    47744332}
    47754333
     4334
    47764335/**
    47774336 * Unregisters a shared module for the VM
     
    48664425}
    48674426
     4427
    48684428/**
    48694429 * VMMR0 request wrapper for GMMR0UnregisterSharedModule.
     
    48874447
    48884448#ifdef VBOX_WITH_PAGE_SHARING
     4449
     4450/**
     4451 * Increase the use count of a shared page, the page is known to exist and be valid and such.
     4452 *
     4453 * @param   pGMM        Pointer to the GMM instance.
     4454 * @param   pGVM        Pointer to the GVM instance.
     4455 * @param   pPage       The page structure.
     4456 */
     4457DECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
     4458{
     4459    Assert(pGMM->cSharedPages > 0);
     4460    Assert(pGMM->cAllocatedPages > 0);
     4461
     4462    pGMM->cDuplicatePages++;
     4463
     4464    pPage->Shared.cRefs++;
     4465    pGVM->gmm.s.cSharedPages++;
     4466    pGVM->gmm.s.Allocated.cBasePages++;
     4467}
     4468
     4469
     4470/**
     4471 * Converts a private page to a shared page, the page is known to exist and be valid and such.
     4472 *
     4473 * @param   pGMM        Pointer to the GMM instance.
     4474 * @param   pGVM        Pointer to the GVM instance.
     4475 * @param   HCPhys      Host physical address
     4476 * @param   idPage      The Page ID
     4477 * @param   pPage       The page structure.
     4478 */
     4479DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
     4480{
     4481    PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
     4482    Assert(pChunk);
     4483    Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
     4484    Assert(GMM_PAGE_IS_PRIVATE(pPage));
     4485
     4486    pChunk->cPrivate--;
     4487    pChunk->cShared++;
     4488
     4489    pGMM->cSharedPages++;
     4490
     4491    pGVM->gmm.s.cSharedPages++;
     4492    pGVM->gmm.s.cPrivatePages--;
     4493
     4494    /* Modify the page structure. */
     4495    pPage->Shared.pfn     = (uint32_t)(uint64_t)(HCPhys >> PAGE_SHIFT);
     4496    pPage->Shared.cRefs   = 1;
     4497    pPage->Common.u2State = GMM_PAGE_STATE_SHARED;
     4498}
     4499
    48894500
    48904501/**
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette