- Timestamp:
- Oct 2, 2007 9:17:01 AM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 25000
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/src/VBox/VMM/VMMR0/GMMR0.cpp ¶
r5123 r5135 483 483 * (The number of pages we've allocated from the host can be derived from this.) */ 484 484 uint32_t cChunks; 485 /** The number of current ballooned pages. */ 486 uint64_t cBalloonedPages; 485 487 486 488 /** The legacy mode indicator. … … 543 545 static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM); 544 546 static DECLCALLBACK(int) gmmR0FreeVMPagesInChunk(PAVLU32NODECORE pNode, void *pvhGVM); 547 static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage); 545 548 546 549 … … 1021 1024 * Finds a allocation chunk. 1022 1025 * 1026 * This is not expected to fail and will bitch if it does. 1027 * 1023 1028 * @returns Pointer to the allocation chunk, NULL if not found. 1024 1029 * @param pGMM Pointer to the GMM instance. … … 1034 1039 * Finds a page. 1035 1040 * 1041 * This is not expected to fail and will bitch if it does. 1042 * 1036 1043 * @returns Pointer to the page, NULL if not found. 1037 1044 * @param pGMM Pointer to the GMM instance. … … 1040 1047 DECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage) 1041 1048 { 1049 /* must log pages that aren't found. */ 1042 1050 return NULL; 1043 1051 } 1052 1044 1053 1045 1054 /** … … 1159 1168 1160 1169 return pGMM->idChunkPrev = idChunk; 1161 }1162 1163 1164 /**1165 * Frees a chunk, giving it back to the host OS.1166 *1167 * @param pGMM Pointer to the GMM instance.1168 * @param pChunk The chunk to free.1169 */1170 static void gmmR0FreeChunk(PGMM pGMM, PGMMCHUNK pChunk)1171 {1172 /*1173 * If there are current mappings of the chunk, then request the1174 * VMs to unmap them. Reposition the chunk in the free list so1175 * it won't be a likely candidate for allocations.1176 */1177 if (pChunk->cMappings)1178 {1179 /** @todo R0 -> VM request */1180 1181 }1182 else1183 {1184 /*1185 * Try free the memory object.1186 */1187 int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);1188 if (RT_SUCCESS(rc))1189 {1190 pChunk->MemObj = NIL_RTR0MEMOBJ;1191 1192 /*1193 * Unlink it from everywhere.1194 */1195 gmmR0UnlinkChunk(pChunk);1196 1197 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);1198 Assert(pCore == &pChunk->Core); NOREF(pCore);1199 1200 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pCore->Key)];1201 if (pTlbe->pChunk == pChunk)1202 {1203 pTlbe->idChunk = NIL_GMM_CHUNKID;1204 pTlbe->pChunk = NULL;1205 }1206 1207 Assert(pGMM->cChunks > 0);1208 pGMM->cChunks--;1209 1210 /*1211 * Free the Chunk ID and struct.1212 */1213 gmmR0FreeChunkId(pGMM, pChunk->Core.Key);1214 pChunk->Core.Key = NIL_GMM_CHUNKID;1215 1216 RTMemFree(pChunk->paMappings);1217 pChunk->paMappings = NULL;1218 1219 RTMemFree(pChunk);1220 }1221 else1222 AssertRC(rc);1223 }1224 }1225 1226 1227 /**1228 * Free page worker.1229 *1230 * The caller does all the statistic decrementing, we do all the incrementing.1231 *1232 * @param pGMM Pointer to the GMM instance data.1233 * @param pChunk Pointer to the chunk this page belongs to.1234 * @param pPage Pointer to the page.1235 */1236 static void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, PGMMPAGE pPage)1237 {1238 /*1239 * Put the page on the free list.1240 */1241 pPage->u = 0;1242 pPage->Free.u2State = GMM_PAGE_STATE_FREE;1243 Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX);1244 pPage->Free.iNext = pChunk->iFreeHead;1245 pChunk->iFreeHead = pPage - &pChunk->aPages[0];1246 1247 /*1248 * Update statistics (the cShared/cPrivate stats are up to date already),1249 * and relink the chunk if necessary.1250 */1251 if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0)1252 {1253 gmmR0UnlinkChunk(pChunk);1254 pChunk->cFree++;1255 gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private);1256 }1257 else1258 {1259 pChunk->cFree++;1260 pChunk->pSet->cPages++;1261 1262 /*1263 * If the chunk becomes empty, consider giving memory back to the host OS.1264 *1265 * The current strategy is to try give it back if there are other chunks1266 * in this free list, meaning if there are at least 240 free pages in this1267 * category. Note that since there are probably mappings of the chunk,1268 * it won't be freed up instantly, which probably screws up this logic1269 * a bit...1270 */1271 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES1272 && pChunk->pFreeNext1273 && pChunk->pFreePrev))1274 gmmR0FreeChunk(pGMM, pChunk);1275 }1276 }1277 1278 1279 /**1280 * Frees a shared page, the page is known to exist and be valid and such.1281 *1282 * @param pGMM Pointer to the GMM instance.1283 * @param idPage The Page ID1284 * @param pPage The page structure.1285 */1286 static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)1287 {1288 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);1289 Assert(pChunk);1290 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);1291 Assert(pChunk->cShared > 0);1292 Assert(pGMM->cSharedPages > 0);1293 Assert(pGMM->cAllocatedPages > 0);1294 1295 pChunk->cShared--;1296 pGMM->cAllocatedPages--;1297 pGMM->cSharedPages--;1298 gmmR0FreePageWorker(pGMM, pChunk, pPage);1299 1170 } 1300 1171 … … 1471 1342 case GMMACCOUNT_BASE: 1472 1343 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + cPages > pGVM->gmm.s.Reserved.cBasePages)) 1344 { 1345 Log(("gmmR0AllocatePages: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 1346 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages)); 1473 1347 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 1348 } 1474 1349 break; 1475 1350 case GMMACCOUNT_SHADOW: 1476 1351 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages)) 1352 { 1353 Log(("gmmR0AllocatePages: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 1354 pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages)); 1477 1355 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 1356 } 1478 1357 break; 1479 1358 case GMMACCOUNT_FIXED: 1480 1359 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages)) 1360 { 1361 Log(("gmmR0AllocatePages: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 1362 pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages)); 1481 1363 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 1364 } 1482 1365 break; 1483 1366 default: … … 1565 1448 switch (enmAccount) 1566 1449 { 1567 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage;1450 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; 1568 1451 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; 1569 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage;1452 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; 1570 1453 default: 1571 1454 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 1572 1455 } 1573 1456 pGVM->gmm.s.cPrivatePages += iPage; 1574 pGMM->cAllocatedPages += iPage;1457 pGMM->cAllocatedPages += iPage; 1575 1458 1576 1459 AssertMsgReturn(iPage == cPages, ("%d != %d\n", iPage, cPages), VERR_INTERNAL_ERROR); … … 1602 1485 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages) 1603 1486 { 1487 LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n", 1488 pVM, cPagesToUpdate, cPagesToAlloc, paPages)); 1489 1604 1490 /* 1605 1491 * Validate, get basics and take the semaphore. … … 1654 1540 /* 1655 1541 * Perform the updates. 1542 * Stop on the first error. 1656 1543 */ 1657 1544 for (iPage = 0; iPage < cPagesToUpdate; iPage++) … … 1664 1551 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage))) 1665 1552 { 1666 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_END && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_END); 1667 if (RT_LIKELY(paPages[iPage].HCPhysGCPhys) < GMM_GCPHYS_END) 1668 pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT; 1669 else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE) 1670 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; 1671 /* else: NIL_RTHCPHYS nothing */ 1672 1673 paPages[iPage].idPage = NIL_GMM_PAGEID; 1674 paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS; 1553 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf)) 1554 { 1555 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_END && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_END); 1556 if (RT_LIKELY(paPages[iPage].HCPhysGCPhys) < GMM_GCPHYS_END) 1557 pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT; 1558 else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE) 1559 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; 1560 /* else: NIL_RTHCPHYS nothing */ 1561 1562 paPages[iPage].idPage = NIL_GMM_PAGEID; 1563 paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS; 1564 } 1565 else 1566 { 1567 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not owner! hGVM=%#x hSelf=%#x\n", 1568 iPage, paPages[iPage].idPage, pPage->Private.hGVM, pGVM->hSelf)); 1569 rc = VERR_GMM_NOT_PAGE_OWNER; 1570 break; 1571 } 1675 1572 } 1676 1573 else 1574 { 1575 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private!\n", iPage, paPages[iPage].idPage)); 1677 1576 rc = VERR_GMM_PAGE_NOT_PRIVATE; 1577 break; 1578 } 1678 1579 } 1679 1580 else 1581 { 1582 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (private)\n", iPage, paPages[iPage].idPage)); 1680 1583 rc = VERR_GMM_PAGE_NOT_FOUND; 1584 break; 1585 } 1681 1586 } 1682 1587 … … 1690 1595 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_END && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_END); 1691 1596 Assert(pPage->Shared.cRefs); 1597 Assert(pGVM->gmm.s.cSharedPages); 1598 Assert(pGVM->gmm.s.Allocated.cBasePages); 1599 1600 pGVM->gmm.s.cSharedPages--; 1601 pGVM->gmm.s.Allocated.cBasePages--; 1692 1602 if (!--pPage->Shared.cRefs) 1693 {1694 Assert(pGVM->gmm.s.cSharedPages);1695 pGVM->gmm.s.cSharedPages--;1696 Assert(pGVM->gmm.s.Allocated.cBasePages);1697 pGVM->gmm.s.Allocated.cBasePages--;1698 1603 gmmR0FreeSharedPage(pGMM, paPages[iPage].idSharedPage, pPage); 1699 }1700 1604 1701 1605 paPages[iPage].idSharedPage = NIL_GMM_PAGEID; 1702 1606 } 1703 1607 else 1608 { 1609 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not shared!\n", iPage, paPages[iPage].idSharedPage)); 1704 1610 rc = VERR_GMM_PAGE_NOT_SHARED; 1611 break; 1612 } 1705 1613 } 1706 1614 else 1615 { 1616 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage)); 1707 1617 rc = VERR_GMM_PAGE_NOT_FOUND; 1618 break; 1619 } 1708 1620 } 1709 1621 } 1710 1622 1711 1623 /* 1712 * Andthe allocation.1624 * Join paths with GMMR0AllocatePages for the allocation. 1713 1625 */ 1714 1626 if (RT_SUCCESS(rc)) … … 1719 1631 1720 1632 RTSemFastMutexRelease(pGMM->Mtx); 1721 LogFlow(("GMMR0 UpdateReservation: returns %Rrc\n", rc));1633 LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc)); 1722 1634 return rc; 1723 1635 } … … 1815 1727 1816 1728 /** 1729 * Frees a chunk, giving it back to the host OS. 1730 * 1731 * @param pGMM Pointer to the GMM instance. 1732 * @param pChunk The chunk to free. 1733 */ 1734 static void gmmR0FreeChunk(PGMM pGMM, PGMMCHUNK pChunk) 1735 { 1736 /* 1737 * If there are current mappings of the chunk, then request the 1738 * VMs to unmap them. Reposition the chunk in the free list so 1739 * it won't be a likely candidate for allocations. 1740 */ 1741 if (pChunk->cMappings) 1742 { 1743 /** @todo R0 -> VM request */ 1744 1745 } 1746 else 1747 { 1748 /* 1749 * Try free the memory object. 1750 */ 1751 int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */); 1752 if (RT_SUCCESS(rc)) 1753 { 1754 pChunk->MemObj = NIL_RTR0MEMOBJ; 1755 1756 /* 1757 * Unlink it from everywhere. 1758 */ 1759 gmmR0UnlinkChunk(pChunk); 1760 1761 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key); 1762 Assert(pCore == &pChunk->Core); NOREF(pCore); 1763 1764 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pCore->Key)]; 1765 if (pTlbe->pChunk == pChunk) 1766 { 1767 pTlbe->idChunk = NIL_GMM_CHUNKID; 1768 pTlbe->pChunk = NULL; 1769 } 1770 1771 Assert(pGMM->cChunks > 0); 1772 pGMM->cChunks--; 1773 1774 /* 1775 * Free the Chunk ID and struct. 1776 */ 1777 gmmR0FreeChunkId(pGMM, pChunk->Core.Key); 1778 pChunk->Core.Key = NIL_GMM_CHUNKID; 1779 1780 RTMemFree(pChunk->paMappings); 1781 pChunk->paMappings = NULL; 1782 1783 RTMemFree(pChunk); 1784 } 1785 else 1786 AssertRC(rc); 1787 } 1788 } 1789 1790 1791 /** 1792 * Free page worker. 1793 * 1794 * The caller does all the statistic decrementing, we do all the incrementing. 1795 * 1796 * @param pGMM Pointer to the GMM instance data. 1797 * @param pChunk Pointer to the chunk this page belongs to. 1798 * @param pPage Pointer to the page. 1799 */ 1800 static void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, PGMMPAGE pPage) 1801 { 1802 /* 1803 * Put the page on the free list. 1804 */ 1805 pPage->u = 0; 1806 pPage->Free.u2State = GMM_PAGE_STATE_FREE; 1807 Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX); 1808 pPage->Free.iNext = pChunk->iFreeHead; 1809 pChunk->iFreeHead = pPage - &pChunk->aPages[0]; 1810 1811 /* 1812 * Update statistics (the cShared/cPrivate stats are up to date already), 1813 * and relink the chunk if necessary. 1814 */ 1815 if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0) 1816 { 1817 gmmR0UnlinkChunk(pChunk); 1818 pChunk->cFree++; 1819 gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private); 1820 } 1821 else 1822 { 1823 pChunk->cFree++; 1824 pChunk->pSet->cPages++; 1825 1826 /* 1827 * If the chunk becomes empty, consider giving memory back to the host OS. 1828 * 1829 * The current strategy is to try give it back if there are other chunks 1830 * in this free list, meaning if there are at least 240 free pages in this 1831 * category. Note that since there are probably mappings of the chunk, 1832 * it won't be freed up instantly, which probably screws up this logic 1833 * a bit... 1834 */ 1835 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES 1836 && pChunk->pFreeNext 1837 && pChunk->pFreePrev)) 1838 gmmR0FreeChunk(pGMM, pChunk); 1839 } 1840 } 1841 1842 1843 /** 1844 * Frees a shared page, the page is known to exist and be valid and such. 1845 * 1846 * @param pGMM Pointer to the GMM instance. 1847 * @param idPage The Page ID 1848 * @param pPage The page structure. 1849 */ 1850 DECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage) 1851 { 1852 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT); 1853 Assert(pChunk); 1854 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES); 1855 Assert(pChunk->cShared > 0); 1856 Assert(pGMM->cSharedPages > 0); 1857 Assert(pGMM->cAllocatedPages > 0); 1858 Assert(!pPage->Shared.cRefs); 1859 1860 pChunk->cShared--; 1861 pGMM->cAllocatedPages--; 1862 pGMM->cSharedPages--; 1863 gmmR0FreePageWorker(pGMM, pChunk, pPage); 1864 } 1865 1866 1867 /** 1868 * Frees a private page, the page is known to exist and be valid and such. 1869 * 1870 * @param pGMM Pointer to the GMM instance. 1871 * @param idPage The Page ID 1872 * @param pPage The page structure. 1873 */ 1874 DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage) 1875 { 1876 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT); 1877 Assert(pChunk); 1878 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES); 1879 Assert(pChunk->cPrivate > 0); 1880 Assert(pGMM->cAllocatedPages > 0); 1881 1882 pChunk->cPrivate--; 1883 pGMM->cAllocatedPages--; 1884 gmmR0FreePageWorker(pGMM, pChunk, pPage); 1885 } 1886 1887 1888 /** 1889 * Common worker for GMMR0FreePages and GMMR0BalloonedPages. 1890 * 1891 * @returns VBox status code: 1892 * @retval xxx 1893 * 1894 * @param pGMM Pointer to the GMM instance data. 1895 * @param pGVM Pointer to the shared VM structure. 1896 * @param cPages The number of pages to free. 1897 * @param paPages Pointer to the page descriptors. 1898 * @param enmAccount The account this relates to. 1899 */ 1900 static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount) 1901 { 1902 /* 1903 * Check that the request isn't impossible wrt to the account status. 1904 */ 1905 switch (enmAccount) 1906 { 1907 case GMMACCOUNT_BASE: 1908 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages)) 1909 { 1910 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages)); 1911 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 1912 } 1913 break; 1914 case GMMACCOUNT_SHADOW: 1915 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages < cPages)) 1916 { 1917 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cShadowPages, cPages)); 1918 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 1919 } 1920 break; 1921 case GMMACCOUNT_FIXED: 1922 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages < cPages)) 1923 { 1924 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cFixedPages, cPages)); 1925 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 1926 } 1927 break; 1928 default: 1929 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 1930 } 1931 1932 /* 1933 * Walk the descriptors and free the pages. 1934 * 1935 * Statistics (except the account) are being updated as we go along, 1936 * unlike the alloc code. Also, stop on the first error. 1937 */ 1938 int rc = VINF_SUCCESS; 1939 uint32_t iPage; 1940 for (iPage = 0; iPage < cPages; iPage++) 1941 { 1942 uint32_t idPage = paPages[iPage].idPage; 1943 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage); 1944 if (RT_LIKELY(pPage)) 1945 { 1946 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage))) 1947 { 1948 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf)) 1949 { 1950 Assert(pGVM->gmm.s.cPrivatePages); 1951 pGVM->gmm.s.cPrivatePages--; 1952 gmmR0FreePrivatePage(pGMM, idPage, pPage); 1953 } 1954 else 1955 { 1956 Log(("gmmR0AllocatePages: #%#x/%#x: not owner! hGVM=%#x hSelf=%#x\n", iPage, idPage, 1957 pPage->Private.hGVM, pGVM->hEMT)); 1958 rc = VERR_GMM_NOT_PAGE_OWNER; 1959 break; 1960 } 1961 } 1962 else if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage))) 1963 { 1964 Assert(pGVM->gmm.s.cSharedPages); 1965 pGVM->gmm.s.cSharedPages--; 1966 Assert(pPage->Shared.cRefs); 1967 if (!--pPage->Shared.cRefs) 1968 gmmR0FreeSharedPage(pGMM, idPage, pPage); 1969 } 1970 else 1971 { 1972 Log(("gmmR0AllocatePages: #%#x/%#x: already free!\n", iPage, idPage)); 1973 rc = VERR_GMM_PAGE_ALREADY_FREE; 1974 break; 1975 } 1976 } 1977 else 1978 { 1979 Log(("gmmR0AllocatePages: #%#x/%#x: not found!\n", iPage, idPage)); 1980 rc = VERR_GMM_PAGE_NOT_FOUND; 1981 break; 1982 } 1983 paPages[iPage].idPage = NIL_GMM_PAGEID; 1984 } 1985 1986 /* 1987 * Update the account. 1988 */ 1989 switch (enmAccount) 1990 { 1991 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= iPage; 1992 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= iPage; 1993 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= iPage; 1994 default: 1995 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR); 1996 } 1997 1998 /* 1999 * Any threshold stuff to be done here? 2000 */ 2001 2002 return rc; 2003 } 2004 2005 2006 /** 1817 2007 * Free one or more pages. 1818 2008 * … … 1830 2020 GMMR0DECL(int) GMMR0FreePages(PVM pVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount) 1831 2021 { 1832 return VERR_NOT_IMPLEMENTED; 2022 LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount)); 2023 2024 /* 2025 * Validate input and get the basics. 2026 */ 2027 PGMM pGMM; 2028 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2029 PGVM pGVM = GVMMR0ByVM(pVM); 2030 if (!pGVM) 2031 return VERR_INVALID_PARAMETER; 2032 if (pGVM->hEMT != RTThreadNativeSelf()) 2033 return VERR_NOT_OWNER; 2034 2035 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); 2036 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER); 2037 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER); 2038 2039 for (unsigned iPage = 0; iPage < cPages; iPage++) 2040 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST 2041 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/, 2042 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER); 2043 2044 /* 2045 * Take the semaphore and call the worker function. 2046 */ 2047 int rc = RTSemFastMutexRequest(pGMM->Mtx); 2048 AssertRC(rc); 2049 2050 rc = gmmR0FreePages(pGMM, pGVM, cPages, paPages, enmAccount); 2051 2052 RTSemFastMutexRelease(pGMM->Mtx); 2053 LogFlow(("GMMR0FreePages: returns %Rrc\n", rc)); 2054 return rc; 1833 2055 } 1834 2056 … … 1860 2082 1861 2083 /** 1862 * Report ballooned pages optionally together with be page to free. 1863 * 1864 * The pages to be freed are always base (RAM) pages. 2084 * Report back on a memory ballooning request. 2085 * 2086 * The request may or may not have been initiated by the GMM. If it was initiated 2087 * by the GMM it is important that this function is called even if no pages was 2088 * ballooned. 2089 * 2090 * Since the whole purpose of ballooning is to free up guest RAM pages, this API 2091 * may also be given a set of related pages to be freed. These pages are assumed 2092 * to be on the base account. 1865 2093 * 1866 2094 * @returns VBox status code: … … 1871 2099 * @param cPagesToFree The number of pages to be freed. 1872 2100 * @param paPages Pointer to the page descriptors for the pages that's to be freed. 2101 * @param fCompleted Indicates whether the ballooning request was completed (true) or 2102 * if there is more pages to come (false). If the ballooning was not 2103 * not triggered by the GMM, don't set this. 1873 2104 * @thread EMT. 1874 2105 */ 1875 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages) 1876 { 1877 return VERR_NOT_IMPLEMENTED; 2106 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted) 2107 { 2108 LogFlow(("GMMR0BalloonedPages: pVM=%p cBalloonedPages=%#x cPagestoFree=%#x paPages=%p enmAccount=%d fCompleted=%RTbool\n", 2109 pVM, cBalloonedPages, cPagesToFree, paPages, fCompleted)); 2110 2111 /* 2112 * Validate input and get the basics. 2113 */ 2114 PGMM pGMM; 2115 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2116 PGVM pGVM = GVMMR0ByVM(pVM); 2117 if (!pGVM) 2118 return VERR_INVALID_PARAMETER; 2119 if (pGVM->hEMT != RTThreadNativeSelf()) 2120 return VERR_NOT_OWNER; 2121 2122 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); 2123 AssertMsgReturn(cBalloonedPages >= 0 && cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER); 2124 AssertMsgReturn(cPagesToFree >= 0 && cPagesToFree <= cBalloonedPages, ("%#x\n", cPagesToFree), VERR_INVALID_PARAMETER); 2125 2126 for (unsigned iPage = 0; iPage < cPagesToFree; iPage++) 2127 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST 2128 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/, 2129 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER); 2130 2131 /* 2132 * Take the sempahore and do some more validations. 2133 */ 2134 int rc = RTSemFastMutexRequest(pGMM->Mtx); 2135 AssertRC(rc); 2136 if (pGVM->gmm.s.Allocated.cBasePages >= cPagesToFree) 2137 { 2138 /* 2139 * Record the ballooned memory. 2140 */ 2141 pGMM->cBalloonedPages += cBalloonedPages; 2142 if (pGVM->gmm.s.cReqBalloonedPages) 2143 { 2144 pGVM->gmm.s.cBalloonedPages += cBalloonedPages; 2145 pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages; 2146 if (fCompleted) 2147 { 2148 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx; / VM: Total=%#llx Req=%#llx Actual=%#llx (completed)\n", cBalloonedPages, 2149 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages)); 2150 2151 /* 2152 * Anything we need to do here now when the request has been completed? 2153 */ 2154 pGVM->gmm.s.cReqBalloonedPages = 0; 2155 } 2156 else 2157 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages, 2158 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages)); 2159 } 2160 else 2161 { 2162 pGVM->gmm.s.cBalloonedPages += cBalloonedPages; 2163 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n", 2164 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages)); 2165 } 2166 2167 /* 2168 * Any pages to free? 2169 */ 2170 if (cPagesToFree) 2171 rc = gmmR0FreePages(pGMM, pGVM, cPagesToFree, paPages, GMMACCOUNT_BASE); 2172 } 2173 else 2174 { 2175 rc = VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 2176 } 2177 2178 RTSemFastMutexRelease(pGMM->Mtx); 2179 LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc)); 2180 return rc; 1878 2181 } 1879 2182 … … 1900 2203 VERR_INVALID_PARAMETER); 1901 2204 1902 return GMMR0BalloonedPages(pVM, pReq->cBalloonedPages, pReq->cPagesToFree, &pReq->aPages[0]); 2205 return GMMR0BalloonedPages(pVM, pReq->cBalloonedPages, pReq->cPagesToFree, &pReq->aPages[0], pReq->fCompleted); 2206 } 2207 2208 2209 /** 2210 * Report balloon deflating. 2211 * 2212 * @returns VBox status code: 2213 * @retval xxx 2214 * 2215 * @param pVM Pointer to the shared VM structure. 2216 * @param cPages The number of pages that was let out of the balloon. 2217 * @thread EMT. 2218 */ 2219 GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, uint32_t cPages) 2220 { 2221 LogFlow(("GMMR0DeflatedBalloon: pVM=%p cPages=%#x\n", pVM, cPages)); 2222 2223 /* 2224 * Validate input and get the basics. 2225 */ 2226 PGMM pGMM; 2227 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2228 PGVM pGVM = GVMMR0ByVM(pVM); 2229 if (!pGVM) 2230 return VERR_INVALID_PARAMETER; 2231 if (pGVM->hEMT != RTThreadNativeSelf()) 2232 return VERR_NOT_OWNER; 2233 2234 AssertMsgReturn(cPages >= 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER); 2235 2236 /* 2237 * Take the sempahore and do some more validations. 2238 */ 2239 int rc = RTSemFastMutexRequest(pGMM->Mtx); 2240 AssertRC(rc); 2241 2242 if (pGVM->gmm.s.cBalloonedPages < cPages) 2243 { 2244 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages); 2245 2246 /* 2247 * Record it. 2248 */ 2249 pGMM->cBalloonedPages -= cPages; 2250 pGVM->gmm.s.cBalloonedPages -= cPages; 2251 if (pGVM->gmm.s.cReqDeflatePages) 2252 { 2253 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n", cPages, 2254 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages)); 2255 2256 /* 2257 * Anything we need to do here now when the request has been completed? 2258 */ 2259 pGVM->gmm.s.cReqDeflatePages = 0; 2260 } 2261 else 2262 { 2263 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx\n", cPages, 2264 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages)); 2265 } 2266 } 2267 else 2268 { 2269 Log(("GMMR0DeflatedBalloon: cBalloonedPages=%#llx cPages=%#x\n", pGVM->gmm.s.cBalloonedPages, cPages)); 2270 rc = VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH; 2271 } 2272 2273 RTSemFastMutexRelease(pGMM->Mtx); 2274 LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc)); 2275 return rc; 1903 2276 } 1904 2277 -
TabularUnified trunk/src/VBox/VMM/VMMR0/GMMR0Internal.h ¶
r5086 r5135 63 63 /** The max number of pages that can be ballooned. */ 64 64 uint64_t cMaxBalloonedPages; 65 /** The number of pages we've currently requested the guest to give us. */ 65 /** The number of pages we've currently requested the guest to give us. 66 * This is 0 if no pages currently requested. */ 66 67 uint64_t cReqBalloonedPages; 68 /** The number of pages the guest has given us in response to the request. 69 * This is not reset on request completed and may be used in later decisions. */ 70 uint64_t cReqActuallyBalloonedPages; 71 /** The number of pages we've currently requested the guest to take back. */ 72 uint64_t cReqDeflatePages; 67 73 /** Whether ballooning is enabled or not. */ 68 74 bool fBallooningEnabled; -
TabularUnified trunk/src/VBox/VMM/VMMR0/VMMR0.cpp ¶
r5106 r5135 747 747 case VMMR0_DO_GMM_BALLOONED_PAGES: 748 748 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr); 749 case VMMR0_DO_GMM_DEFLATED_BALLOON: 750 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg); 749 751 750 752 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK: … … 902 904 case VMMR0_DO_VMMR0_INIT: 903 905 case VMMR0_DO_VMMR0_TERM: 906 case VMMR0_DO_GMM_INITIAL_RESERVATION: 907 case VMMR0_DO_GMM_UPDATE_RESERVATION: 908 case VMMR0_DO_GMM_ALLOCATE_PAGES: 909 case VMMR0_DO_GMM_FREE_PAGES: 910 case VMMR0_DO_GMM_BALLOONED_PAGES: 911 case VMMR0_DO_GMM_DEFLATED_BALLOON: 912 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK: 913 case VMMR0_DO_GMM_SEED_CHUNK: 904 914 { 915 /** @todo validate this EMT claim... GVM knows. */ 905 916 VMMR0ENTRYEXARGS Args; 906 917 Args.pVM = pVM;
Note:
See TracChangeset
for help on using the changeset viewer.