Changeset 37247 in vbox for trunk/src/VBox
- Timestamp:
- May 30, 2011 10:02:05 AM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r37242 r37247 688 688 * Internal Functions * 689 689 *******************************************************************************/ 690 static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM); 691 static bool gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 692 DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk); 693 DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet); 694 DECLINLINE(void) gmmR0SelectSetAndLinkChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 695 static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo); 696 static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem); 697 static void gmmR0FreeSharedPage(PGMM pGMM, PGVM pGVM, uint32_t idPage, PGMMPAGE pPage); 698 static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 699 static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM); 690 static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM); 691 static bool gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 692 DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk); 693 DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet); 694 DECLINLINE(void) gmmR0SelectSetAndLinkChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 695 static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo); 696 static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem); 697 DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, PGVM pGVM, uint32_t idPage, PGMMPAGE pPage); 698 DECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, PGVM pGVM, uint32_t idPage, PGMMPAGE pPage); 699 static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 700 static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM); 700 701 701 702 … … 2203 2204 } 2204 2205 2206 #if 0 /* the old allocator */ 2205 2207 2206 2208 /** … … 2407 2409 } 2408 2410 2411 #endif /* old allocator */ 2412 2413 2414 /** 2415 * Picks the free pages from a chunk. 2416 * 2417 * @returns The new page descriptor table index. 2418 * @param pGMM Pointer to the GMM instance data. 2419 * @param hGVM The VM handle. 2420 * @param pChunk The chunk. 2421 * @param iPage The current page descriptor table index. 2422 * @param cPages The total number of pages to allocate. 2423 * @param paPages The page descriptor table (input + ouput). 2424 */ 2425 static uint32_t gmmR0AllocatePagesFromChunk(PGMM pGMM, uint16_t const hGVM, PGMMCHUNK pChunk, uint32_t iPage, uint32_t cPages, 2426 PGMMPAGEDESC paPages) 2427 { 2428 PGMMCHUNKFREESET pSet = pChunk->pSet; Assert(pSet); 2429 gmmR0UnlinkChunk(pChunk); 2430 2431 for (; pChunk->cFree && iPage < cPages; iPage++) 2432 gmmR0AllocatePage(pGMM, hGVM, pChunk, &paPages[iPage]); 2433 2434 gmmR0LinkChunk(pChunk, pSet); 2435 return iPage; 2436 } 2437 2438 2439 /** 2440 * Allocate a new chunk, immediately pick the requested pages from it, and adds 2441 * what's remaining to the specified free set. 2442 * 2443 * @note This will leave the giant mutex while allocating the new chunk! 2444 * 2445 * @returns VBox status code. 2446 * @param pGMM Pointer to the GMM instance data. 2447 * @param pGVM Pointer to the kernel-only VM instace data. 2448 * @param pSet Pointer to the free set. 2449 * @param cPages The number of pages requested. 2450 * @param paPages The page descriptor table (input + output). 2451 * @param piPage The pointer to the page descriptor table index 2452 * variable. This will be updated. 2453 */ 2454 static int gmmR0AllocateChunkNew(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages, 2455 PGMMPAGEDESC paPages, uint32_t *piPage) 2456 { 2457 gmmR0MutexRelease(pGMM); 2458 2459 RTR0MEMOBJ hMemObj; 2460 int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS); 2461 if (RT_SUCCESS(rc)) 2462 { 2463 /** @todo Duplicate gmmR0RegisterChunk here so we can avoid chaining up the 2464 * free pages first and then unchaining them right afterwards. Instead 2465 * do as much work as possible without holding the giant lock. */ 2466 PGMMCHUNK pChunk; 2467 rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, pGVM->hSelf, 0 /*fChunkFlags*/, &pChunk); 2468 if (RT_SUCCESS(rc)) 2469 { 2470 *piPage = gmmR0AllocatePagesFromChunk(pGMM, pGVM->hSelf, pChunk, *piPage, cPages, paPages); 2471 return VINF_SUCCESS; 2472 } 2473 2474 /* bail out */ 2475 RTR0MemObjFree(hMemObj, false /* fFreeMappings */); 2476 } 2477 2478 int rc2 = gmmR0MutexAcquire(pGMM); 2479 AssertRCReturn(rc2, RT_FAILURE(rc) ? rc : rc2); 2480 return rc; 2481 2482 } 2483 2484 2485 /** 2486 * As a last restort we'll pick any page we can get. 2487 * 2488 * @returns The new page descriptor table index. 2489 * @param pGMM Pointer to the GMM instance data. 2490 * @param pGVM Pointer to the global VM structure. 2491 * @param pSet The set to pick from. 2492 * @param iPage The current page descriptor table index. 2493 * @param cPages The total number of pages to allocate. 2494 * @param paPages The page descriptor table (input + ouput). 2495 */ 2496 static uint32_t gmmR0AllocatePagesIndiscriminately(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, 2497 uint32_t iPage, uint32_t cPages, PGMMPAGEDESC paPages) 2498 { 2499 unsigned iList = RT_ELEMENTS(pSet->apLists); 2500 while (iList-- > 0) 2501 { 2502 PGMMCHUNK pChunk = pSet->apLists[iList]; 2503 while (pChunk) 2504 { 2505 PGMMCHUNK pNext = pChunk->pFreeNext; 2506 2507 iPage = gmmR0AllocatePagesFromChunk(pGMM, pGVM->hSelf, pChunk, iPage, cPages, paPages); 2508 if (iPage >= cPages) 2509 return iPage; 2510 2511 pChunk = pNext; 2512 } 2513 } 2514 return iPage; 2515 } 2516 2517 2518 /** 2519 * Pick pages from empty chunks on the same NUMA node. 2520 * 2521 * @returns The new page descriptor table index. 2522 * @param pGMM Pointer to the GMM instance data. 2523 * @param pGVM Pointer to the global VM structure. 2524 * @param pSet The set to pick from. 2525 * @param iPage The current page descriptor table index. 2526 * @param cPages The total number of pages to allocate. 2527 * @param paPages The page descriptor table (input + ouput). 2528 */ 2529 static uint32_t gmmR0AllocatePagesFromEmptyChunksOnSameNode(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, 2530 uint32_t iPage, uint32_t cPages, PGMMPAGEDESC paPages) 2531 { 2532 PGMMCHUNK pChunk = pSet->apLists[GMM_CHUNK_FREE_SET_UNUSED_LIST]; 2533 if (pChunk) 2534 { 2535 uint16_t const idNumaNode = gmmR0GetCurrentNumaNodeId(); 2536 while (pChunk) 2537 { 2538 PGMMCHUNK pNext = pChunk->pFreeNext; 2539 2540 if (pChunk->idNumaNode == idNumaNode) 2541 { 2542 pChunk->hGVM = pGVM->hSelf; 2543 iPage = gmmR0AllocatePagesFromChunk(pGMM, pGVM->hSelf, pChunk, iPage, cPages, paPages); 2544 if (iPage >= cPages) 2545 { 2546 pGVM->gmm.s.idLastChunkHint = pChunk->cFree ? pChunk->Core.Key : NIL_GMM_CHUNKID; 2547 return iPage; 2548 } 2549 } 2550 2551 pChunk = pNext; 2552 } 2553 } 2554 return iPage; 2555 } 2556 2557 2558 /** 2559 * Pick pages from non-empty chunks on the same NUMA node. 2560 * 2561 * @returns The new page descriptor table index. 2562 * @param pGMM Pointer to the GMM instance data. 2563 * @param pGVM Pointer to the global VM structure. 2564 * @param pSet The set to pick from. 2565 * @param iPage The current page descriptor table index. 2566 * @param cPages The total number of pages to allocate. 2567 * @param paPages The page descriptor table (input + ouput). 2568 */ 2569 static uint32_t gmmR0AllocatePagesFromSameNode(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, 2570 uint32_t iPage, uint32_t cPages, PGMMPAGEDESC paPages) 2571 { 2572 /** @todo start by picking from chunks with about the right size first? */ 2573 uint16_t const idNumaNode = gmmR0GetCurrentNumaNodeId(); 2574 unsigned iList = GMM_CHUNK_FREE_SET_UNUSED_LIST; 2575 while (iList-- > 0) 2576 { 2577 PGMMCHUNK pChunk = pSet->apLists[iList]; 2578 while (pChunk) 2579 { 2580 PGMMCHUNK pNext = pChunk->pFreeNext; 2581 2582 if (pChunk->idNumaNode == idNumaNode) 2583 { 2584 iPage = gmmR0AllocatePagesFromChunk(pGMM, pGVM->hSelf, pChunk, iPage, cPages, paPages); 2585 if (iPage >= cPages) 2586 { 2587 pGVM->gmm.s.idLastChunkHint = pChunk->cFree ? pChunk->Core.Key : NIL_GMM_CHUNKID; 2588 return iPage; 2589 } 2590 } 2591 2592 pChunk = pNext; 2593 } 2594 } 2595 return iPage; 2596 } 2597 2598 2599 /** 2600 * Pick pages that are in chunks already associated with the VM. 2601 * 2602 * @returns The new page descriptor table index. 2603 * @param pGMM Pointer to the GMM instance data. 2604 * @param pGVM Pointer to the global VM structure. 2605 * @param pSet The set to pick from. 2606 * @param iPage The current page descriptor table index. 2607 * @param cPages The total number of pages to allocate. 2608 * @param paPages The page descriptor table (input + ouput). 2609 */ 2610 static uint32_t gmmR0AllocatePagesAssociatedWithVM(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, 2611 uint32_t iPage, uint32_t cPages, PGMMPAGEDESC paPages) 2612 { 2613 uint16_t const hGVM = pGVM->hSelf; 2614 2615 /* Hint. */ 2616 if (pGVM->gmm.s.idLastChunkHint != NIL_GMM_CHUNKID) 2617 { 2618 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, pGVM->gmm.s.idLastChunkHint); 2619 if (pChunk && pChunk->cFree) 2620 { 2621 iPage = gmmR0AllocatePagesFromChunk(pGMM, hGVM, pChunk, iPage, cPages, paPages); 2622 if (iPage >= cPages) 2623 return iPage; 2624 } 2625 } 2626 2627 /* Scan. */ 2628 for (unsigned iList = 0; iList < RT_ELEMENTS(pSet->apLists); iList++) 2629 { 2630 PGMMCHUNK pChunk = pSet->apLists[iList]; 2631 while (pChunk) 2632 { 2633 PGMMCHUNK pNext = pChunk->pFreeNext; 2634 2635 if (pChunk->hGVM == hGVM) 2636 { 2637 iPage = gmmR0AllocatePagesFromChunk(pGMM, hGVM, pChunk, iPage, cPages, paPages); 2638 if (iPage >= cPages) 2639 { 2640 pGVM->gmm.s.idLastChunkHint = pChunk->cFree ? pChunk->Core.Key : NIL_GMM_CHUNKID; 2641 return iPage; 2642 } 2643 } 2644 2645 pChunk = pNext; 2646 } 2647 } 2648 return iPage; 2649 } 2650 2651 2652 2653 /** 2654 * Pick pages in bound memory mode. 2655 * 2656 * @returns The new page descriptor table index. 2657 * @param pGMM Pointer to the GMM instance data. 2658 * @param pGVM Pointer to the global VM structure. 2659 * @param iPage The current page descriptor table index. 2660 * @param cPages The total number of pages to allocate. 2661 * @param paPages The page descriptor table (input + ouput). 2662 */ 2663 static uint32_t gmmR0AllocatePagesInBoundMode(PGMM pGMM, PGVM pGVM, uint32_t iPage, uint32_t cPages, PGMMPAGEDESC paPages) 2664 { 2665 for (unsigned iList = 0; iList < RT_ELEMENTS(pGVM->gmm.s.Private.apLists); iList++) 2666 { 2667 PGMMCHUNK pChunk = pGVM->gmm.s.Private.apLists[iList]; 2668 while (pChunk) 2669 { 2670 Assert(pChunk->hGVM == pGVM->hSelf); 2671 PGMMCHUNK pNext = pChunk->pFreeNext; 2672 iPage = gmmR0AllocatePagesFromChunk(pGMM, pGVM->hSelf, pChunk, iPage, cPages, paPages); 2673 if (iPage >= cPages) 2674 return iPage; 2675 pChunk = pNext; 2676 } 2677 } 2678 return iPage; 2679 } 2680 2681 2682 /** 2683 * Checks if we should start picking pages from chunks of other VMs. 2684 * 2685 * @returns @c true if we should, @c false if we should first try allocate more 2686 * chunks. 2687 */ 2688 static bool gmmR0ShouldAllocatePagesInOtherChunks(PGVM pGVM) 2689 { 2690 /* 2691 * Don't allocate a new chunk if we're 2692 */ 2693 uint64_t cPgReserved = pGVM->gmm.s.Reserved.cBasePages 2694 + pGVM->gmm.s.Reserved.cFixedPages 2695 - pGVM->gmm.s.cBalloonedPages 2696 /** @todo what about shared pages? */; 2697 uint64_t cPgAllocated = pGVM->gmm.s.Allocated.cBasePages 2698 + pGVM->gmm.s.Allocated.cFixedPages; 2699 uint64_t cPgDelta = cPgReserved - cPgAllocated; 2700 if (cPgDelta < GMM_CHUNK_NUM_PAGES * 4) 2701 return true; 2702 /** @todo make the threshold configurable, also test the code to see if 2703 * this ever kicks in (we might be reserving too much or smth). */ 2704 2705 /* 2706 * Check how close we're to the max memory limit and how many fragments 2707 * there are?... 2708 */ 2709 /** @todo. */ 2710 2711 return false; 2712 } 2713 2714 2715 /** 2716 * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages. 2717 * 2718 * @returns VBox status code: 2719 * @retval VINF_SUCCESS on success. 2720 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or 2721 * gmmR0AllocateMoreChunks is necessary. 2722 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 2723 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, 2724 * that is we're trying to allocate more than we've reserved. 2725 * 2726 * @param pGMM Pointer to the GMM instance data. 2727 * @param pGVM Pointer to the shared VM structure. 2728 * @param cPages The number of pages to allocate. 2729 * @param paPages Pointer to the page descriptors. 2730 * See GMMPAGEDESC for details on what is expected on input. 2731 * @param enmAccount The account to charge. 2732 * 2733 * @remarks Call takes the giant GMM lock. 2734 */ 2735 static int gmmR0AllocatePagesNew(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount) 2736 { 2737 Assert(pGMM->hMtxOwner == RTThreadNativeSelf()); 2738 2739 /* 2740 * Check allocation limits. 2741 */ 2742 if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages)) 2743 return VERR_GMM_HIT_GLOBAL_LIMIT; 2744 2745 switch (enmAccount) 2746 { 2747 case GMMACCOUNT_BASE: 2748 if (RT_UNLIKELY( pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages 2749 > pGVM->gmm.s.Reserved.cBasePages)) 2750 { 2751 Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n", 2752 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages)); 2753 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2754 } 2755 break; 2756 case GMMACCOUNT_SHADOW: 2757 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages)) 2758 { 2759 Log(("gmmR0AllocatePages:Shadow: Reserved=%#x Allocated+Requested=%#x+%#x!\n", 2760 pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages)); 2761 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2762 } 2763 break; 2764 case GMMACCOUNT_FIXED: 2765 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages)) 2766 { 2767 Log(("gmmR0AllocatePages:Fixed: Reserved=%#x Allocated+Requested=%#x+%#x!\n", 2768 pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages)); 2769 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2770 } 2771 break; 2772 default: 2773 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 2774 } 2775 2776 /* 2777 * If we're in legacy memory mode, it's easy to figure if we have 2778 * sufficient number of pages up-front. 2779 */ 2780 if ( pGMM->fLegacyAllocationMode 2781 && pGVM->gmm.s.Private.cFreePages < cPages) 2782 { 2783 Assert(pGMM->fBoundMemoryMode); 2784 return VERR_GMM_SEED_ME; 2785 } 2786 2787 /* 2788 * Update the accounts before we proceed because we might be leaving the 2789 * protection of the global mutex and thus run the risk of permitting 2790 * too much memory to be allocated. 2791 */ 2792 switch (enmAccount) 2793 { 2794 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += cPages; break; 2795 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += cPages; break; 2796 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += cPages; break; 2797 default: AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 2798 } 2799 pGVM->gmm.s.cPrivatePages += cPages; 2800 pGMM->cAllocatedPages += cPages; 2801 2802 /* 2803 * Part two of it's-easy-in-legacy-memory-mode. 2804 */ 2805 uint32_t iPage = 0; 2806 if (pGMM->fLegacyAllocationMode) 2807 { 2808 iPage = gmmR0AllocatePagesInBoundMode(pGMM, pGVM, iPage, cPages, paPages); 2809 AssertReleaseReturn(iPage == cPages, VERR_INTERNAL_ERROR_3); 2810 return VINF_SUCCESS; 2811 } 2812 2813 /* 2814 * Bound mode is also relatively straightforward. 2815 */ 2816 int rc = VINF_SUCCESS; 2817 PGMMCHUNK pChunk; 2818 if (pGMM->fBoundMemoryMode) 2819 { 2820 iPage = gmmR0AllocatePagesInBoundMode(pGMM, pGVM, iPage, cPages, paPages); 2821 if (iPage < cPages) 2822 do 2823 rc = gmmR0AllocateChunkNew(pGMM, pGVM, &pGVM->gmm.s.Private, cPages, paPages, &iPage); 2824 while (iPage < cPages && RT_SUCCESS(rc)); 2825 } 2826 /* 2827 * Shared mode is trickier as we should try archive the same locality as 2828 * in bound mode, but smartly make use of non-full chunks allocated by 2829 * other VMs if we're low on memory. 2830 */ 2831 else 2832 { 2833 /* Pick the most optimal pages first. */ 2834 iPage = gmmR0AllocatePagesAssociatedWithVM(pGMM, pGVM, &pGMM->PrivateX, iPage, cPages, paPages); 2835 if (iPage < cPages) 2836 { 2837 /* Maybe we should try getting pages from chunks "belonging" to 2838 other VMs before allocating more chunks? */ 2839 if (gmmR0ShouldAllocatePagesInOtherChunks(pGVM)) 2840 iPage = gmmR0AllocatePagesFromSameNode(pGMM, pGVM, &pGMM->PrivateX, iPage, cPages, paPages); 2841 2842 /* Allocate memory from empty chunks. */ 2843 if (iPage < cPages) 2844 iPage = gmmR0AllocatePagesFromEmptyChunksOnSameNode(pGMM, pGVM, &pGMM->PrivateX, iPage, cPages, paPages); 2845 2846 /* Grab empty shared chunks. */ 2847 if (iPage < cPages) 2848 iPage = gmmR0AllocatePagesFromEmptyChunksOnSameNode(pGMM, pGVM, &pGMM->Shared, iPage, cPages, paPages); 2849 2850 /* 2851 * Ok, try allocate new chunks. 2852 */ 2853 if (iPage < cPages) 2854 { 2855 do 2856 rc = gmmR0AllocateChunkNew(pGMM, pGVM, &pGMM->PrivateX, cPages, paPages, &iPage); 2857 while (iPage < cPages && RT_SUCCESS(rc)); 2858 2859 /* If the host is out of memory, take whatever we can get. */ 2860 if ( rc == VERR_NO_MEMORY 2861 && pGMM->PrivateX.cFreePages + pGMM->Shared.cFreePages >= cPages - iPage) 2862 { 2863 iPage = gmmR0AllocatePagesIndiscriminately(pGMM, pGVM, &pGMM->PrivateX, iPage, cPages, paPages); 2864 if (iPage < cPages) 2865 iPage = gmmR0AllocatePagesIndiscriminately(pGMM, pGVM, &pGMM->Shared, iPage, cPages, paPages); 2866 AssertRelease(iPage == cPages); 2867 rc = VINF_SUCCESS; 2868 } 2869 } 2870 } 2871 } 2872 2873 /* 2874 * Clean up on failure. Since this is bound to be a low-memory condition 2875 * we will give back any empty chunks that might be hanging around. 2876 */ 2877 if (RT_FAILURE(rc)) 2878 { 2879 /* Update the statistics. */ 2880 pGVM->gmm.s.cPrivatePages -= cPages; 2881 pGMM->cAllocatedPages -= cPages - iPage; 2882 switch (enmAccount) 2883 { 2884 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= cPages; break; 2885 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= cPages; break; 2886 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= cPages; break; 2887 default: AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 2888 } 2889 2890 /* Release the pages. */ 2891 while (iPage-- > 0) 2892 { 2893 uint32_t idPage = paPages[iPage].idPage; 2894 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage); 2895 if (RT_LIKELY(pPage)) 2896 { 2897 Assert(GMM_PAGE_IS_PRIVATE(pPage)); 2898 Assert(pPage->Private.hGVM == pGVM->hSelf); 2899 gmmR0FreePrivatePage(pGMM, pGVM, idPage, pPage); 2900 } 2901 else 2902 AssertMsgFailed(("idPage=%#x\n", idPage)); 2903 } 2904 2905 /* Free empty chunks. */ 2906 /** @todo */ 2907 } 2908 return VINF_SUCCESS; 2909 } 2910 2409 2911 2410 2912 /** … … 2584 3086 * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex! 2585 3087 */ 2586 #if 03088 #if 1 2587 3089 rc = gmmR0AllocatePagesNew(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE); 2588 3090 #else … … 2674 3176 && pGVM->gmm.s.Reserved.cShadowPages)) 2675 3177 { 3178 #if 1 3179 rc = gmmR0AllocatePagesNew(pGMM, pGVM, cPages, paPages, enmAccount); 3180 #else 2676 3181 /* 2677 3182 * gmmR0AllocatePages seed loop. … … 2688 3193 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->PrivateX, cPages, &Strategy); 2689 3194 } 3195 #endif 2690 3196 } 2691 3197 else
Note:
See TracChangeset
for help on using the changeset viewer.