- Timestamp:
- Mar 5, 2009 8:17:00 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r17371 r17421 3045 3045 int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); 3046 3046 int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv); 3047 int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv); 3047 3048 int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv); 3048 3049 int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv); -
trunk/src/VBox/VMM/PGMPhys.cpp
r17371 r17421 2413 2413 { 2414 2414 pgmLock(pVM); 2415 2416 /* 2417 * Allocate more pages, noting down the index of the first new page. 2418 */ 2419 uint32_t iClear = pVM->pgm.s.cHandyPages; 2415 2420 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); 2416 2421 if (rc == VERR_GMM_SEED_ME) … … 2426 2431 } 2427 2432 } 2433 2434 /* 2435 * Clear the pages. 2436 */ 2437 if (RT_SUCCESS(rc)) 2438 { 2439 while (iClear < pVM->pgm.s.cHandyPages) 2440 { 2441 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear]; 2442 void *pv; 2443 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv); 2444 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc)); 2445 ASMMemZeroPage(pv); 2446 iClear++; 2447 } 2448 } 2449 2450 /** @todo Do proper VERR_EM_NO_MEMORY reporting. */ 2428 2451 pgmUnlock(pVM); 2429 2452 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY); -
trunk/src/VBox/VMM/PGMPool.cpp
r17140 r17421 483 483 } 484 484 pPage->Core.Key = MMPage2Phys(pVM, pPage->pvPageR3); 485 LogFlow(("PGMR3PoolGrow: insert page %RHp\n", pPage->Core.Key));486 485 pPage->GCPhys = NIL_RTGCPHYS; 487 486 pPage->enmKind = PGMPOOLKIND_FREE; 488 487 pPage->idx = pPage - &pPool->aPages[0]; 488 LogFlow(("PGMR3PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key)); 489 489 pPage->iNext = pPool->iFreeHead; 490 490 #ifdef PGMPOOL_WITH_USER_TRACKING -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r17372 r17421 306 306 * actual VM RAM committment, that's too much work for now. 307 307 */ 308 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect)); 308 309 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)); 309 310 if ( !pVM->pgm.s.cHandyPages … … 313 314 ) 314 315 { 315 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <=RT_ELEMENTS(pVM->pgm.s.aHandyPages)));316 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages))); 316 317 #ifdef IN_RING3 317 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);318 int rc = PGMR3PhysAllocateHandyPages(pVM); 318 319 #elif defined(IN_RING0) 319 /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */320 320 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0); 321 321 #else … … 345 345 if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2) 346 346 { 347 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <=RT_ELEMENTS(pVM->pgm.s.aHandyPages)));347 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1, RT_ELEMENTS(pVM->pgm.s.aHandyPages))); 348 348 VM_FF_SET(pVM, VM_FF_TO_R3); 349 349 } … … 389 389 return rc; 390 390 } 391 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect)); 391 392 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys)); 392 393 Assert(!PGM_PAGE_IS_MMIO(pPage)); … … 418 419 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared)); 419 420 pVM->pgm.s.cSharedPages--; 420 /** @todo err.. what about copying the page content? */421 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */ 421 422 } 422 423 else … … 425 426 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero); 426 427 pVM->pgm.s.cZeroPages--; 427 /** @todo verify that the handy page is zero! */ 428 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID); 428 429 } 429 430 … … 481 482 482 483 /** 484 * Internal usage: Map the page specified by its GMM ID. 485 * 486 * This is similar to pgmPhysPageMap 487 * 488 * @returns VBox status code. 489 * 490 * @param pVM The VM handle. 491 * @param idPage The Page ID. 492 * @param HCPhys The physical address (for RC). 493 * @param ppv Where to store the mapping address. 494 * 495 * @remarks Called from within the PGM critical section. 496 */ 497 int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv) 498 { 499 /* 500 * Validation. 501 */ 502 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect)); 503 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 504 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT; 505 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER); 506 507 #ifdef IN_RC 508 /* 509 * Map it by HCPhys. 510 */ 511 return PGMDynMapHCPage(pVM, HCPhys, ppv); 512 513 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 514 /* 515 * Map it by HCPhys. 516 */ 517 return pgmR0DynMapHCPageInlined(pVM, HCPhys, ppv); 518 519 #else 520 /* 521 * Find/make Chunk TLB entry for the mapping chunk. 522 */ 523 PPGMCHUNKR3MAP pMap; 524 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)]; 525 if (pTlbe->idChunk == idChunk) 526 { 527 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits)); 528 pMap = pTlbe->pChunk; 529 } 530 else 531 { 532 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses)); 533 534 /* 535 * Find the chunk, map it if necessary. 536 */ 537 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk); 538 if (!pMap) 539 { 540 # ifdef IN_RING0 541 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk); 542 AssertRCReturn(rc, rc); 543 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk); 544 Assert(pMap); 545 # else 546 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap); 547 if (RT_FAILURE(rc)) 548 return rc; 549 # endif 550 } 551 552 /* 553 * Enter it into the Chunk TLB. 554 */ 555 pTlbe->idChunk = idChunk; 556 pTlbe->pChunk = pMap; 557 pMap->iAge = 0; 558 } 559 560 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT); 561 return VINF_SUCCESS; 562 #endif 563 } 564 565 566 /** 483 567 * Maps a page into the current virtual address space so it can be accessed. 484 568 * … … 498 582 int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv) 499 583 { 584 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect)); 585 500 586 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 501 587 /* … … 1368 1454 AssertFailed(); 1369 1455 } 1370 #endif1371 1456 1372 1457 … … 1384 1469 VMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) 1385 1470 { 1386 #ifdef IN_RING3 1387 bool fGrabbedLock = false; 1388 #endif 1389 1390 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n")); 1391 if (cbRead == 0) 1392 return; 1393 1471 AssertMsgReturnVoid(cbRead > 0, ("don't even think about reading zero bytes!\n")); 1394 1472 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead)); 1395 1473 1396 #ifdef IN_RING3 1397 if (!VM_IS_EMT(pVM)) 1398 { 1399 pgmLock(pVM); 1400 fGrabbedLock = true; 1401 } 1402 #endif 1474 pgmLock(pVM); 1403 1475 1404 1476 /* … … 1422 1494 unsigned iPage = off >> PAGE_SHIFT; 1423 1495 PPGMPAGE pPage = &pRam->aPages[iPage]; 1424 #ifdef VBOX_WITH_NEW_PHYS_CODE1425 1496 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK); 1497 if (cb > cbRead) 1498 cb = cbRead; 1426 1499 1427 1500 /* … … 1443 1516 pRam->GCPhys + off, pPage, rc)); 1444 1517 } 1518 1519 /* next page */ 1445 1520 if (cb >= cbRead) 1446 goto l_End; 1447 1448 #else /* old code */ 1521 { 1522 pgmUnlock(pVM); 1523 return; 1524 } 1525 cbRead -= cb; 1526 off += cb; 1527 pvBuf = (char *)pvBuf + cb; 1528 } /* walk pages in ram range. */ 1529 1530 GCPhys = pRam->GCPhysLast + 1; 1531 } 1532 else 1533 { 1534 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead)); 1535 1536 /* 1537 * Unassigned address space. 1538 */ 1539 if (!pRam) 1540 break; 1541 size_t cb = pRam->GCPhys - GCPhys; 1542 if (cb >= cbRead) 1543 { 1544 #if 0 /** @todo enable this later. */ 1545 memset(pvBuf, 0xff, cbRead); 1546 #else 1547 memset(pvBuf, 0, cbRead); 1548 #endif 1549 break; 1550 } 1551 1552 #if 0 /** @todo enable this later. */ 1553 memset(pvBuf, 0xff, cb); 1554 #else 1555 memset(pvBuf, 0, cb); 1556 #endif 1557 cbRead -= cb; 1558 pvBuf = (char *)pvBuf + cb; 1559 GCPhys += cb; 1560 } 1561 } /* Ram range walk */ 1562 1563 pgmUnlock(pVM); 1564 } 1565 1566 #else /* Old PGMPhysRead */ 1567 1568 /** 1569 * Read physical memory. 1570 * 1571 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you 1572 * want to ignore those. 1573 * 1574 * @param pVM VM Handle. 1575 * @param GCPhys Physical address start reading from. 1576 * @param pvBuf Where to put the read bits. 1577 * @param cbRead How many bytes to read. 1578 */ 1579 VMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) 1580 { 1581 #ifdef IN_RING3 1582 bool fGrabbedLock = false; 1583 #endif 1584 1585 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n")); 1586 if (cbRead == 0) 1587 return; 1588 1589 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead)); 1590 1591 #ifdef IN_RING3 1592 if (!VM_IS_EMT(pVM)) 1593 { 1594 pgmLock(pVM); 1595 fGrabbedLock = true; 1596 } 1597 #endif 1598 1599 /* 1600 * Copy loop on ram ranges. 1601 */ 1602 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1603 for (;;) 1604 { 1605 /* Find range. */ 1606 while (pRam && GCPhys > pRam->GCPhysLast) 1607 pRam = pRam->CTX_SUFF(pNext); 1608 /* Inside range or not? */ 1609 if (pRam && GCPhys >= pRam->GCPhys) 1610 { 1611 /* 1612 * Must work our way thru this page by page. 1613 */ 1614 RTGCPHYS off = GCPhys - pRam->GCPhys; 1615 while (off < pRam->cb) 1616 { 1617 unsigned iPage = off >> PAGE_SHIFT; 1618 PPGMPAGE pPage = &pRam->aPages[iPage]; 1449 1619 size_t cb; 1450 1620 … … 1615 1785 } 1616 1786 } 1617 #endif /* old code */1618 1787 1619 1788 cbRead -= cb; … … 1653 1822 } 1654 1823 1655 1824 #endif /* Old PGMPhysRead */ 1656 1825 #ifdef VBOX_WITH_NEW_PHYS_CODE 1826 1657 1827 /** 1658 1828 * Deals with writing to a page with one or more WRITE or ALL access handlers. … … 1668 1838 AssertFailed(); 1669 1839 } 1670 #endif1671 1840 1672 1841 … … 1684 1853 VMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite) 1685 1854 { 1686 #ifdef IN_RING31687 bool fGrabbedLock = false;1688 #endif1689 1690 1855 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n")); 1691 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n")); 1692 if (cbWrite == 0) 1693 return; 1694 1856 AssertMsgReturnVoid(cbWrite > 0, ("don't even think about writing zero bytes!\n")); 1695 1857 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite)); 1696 1858 1697 #ifdef IN_RING3 1698 if (!VM_IS_EMT(pVM)) 1699 { 1700 pgmLock(pVM); 1701 fGrabbedLock = true; 1702 } 1703 #endif 1859 pgmLock(pVM); 1860 1704 1861 /* 1705 1862 * Copy loop on ram ranges. … … 1722 1879 RTGCPTR iPage = off >> PAGE_SHIFT; 1723 1880 PPGMPAGE pPage = &pRam->aPages[iPage]; 1724 #ifdef VBOX_WITH_NEW_PHYS_CODE1725 1881 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK); 1882 if (cb > cbWrite) 1883 cb = cbWrite; 1726 1884 1727 1885 /* … … 1743 1901 pRam->GCPhys + off, pPage, rc)); 1744 1902 } 1903 1904 /* next page */ 1745 1905 if (cb >= cbWrite) 1746 goto l_End; 1747 1748 1749 #else /* old code */ 1906 { 1907 pgmUnlock(pVM); 1908 return; 1909 } 1910 1911 cbWrite -= cb; 1912 off += cb; 1913 pvBuf = (const char *)pvBuf + cb; 1914 } /* walk pages in ram range */ 1915 1916 GCPhys = pRam->GCPhysLast + 1; 1917 } 1918 else 1919 { 1920 /* 1921 * Unassigned address space, skip it. 1922 */ 1923 if (!pRam) 1924 break; 1925 size_t cb = pRam->GCPhys - GCPhys; 1926 if (cb >= cbWrite) 1927 break; 1928 cbWrite -= cb; 1929 pvBuf = (const char *)pvBuf + cb; 1930 GCPhys += cb; 1931 } 1932 } /* Ram range walk */ 1933 1934 pgmUnlock(pVM); 1935 } 1936 1937 #else /* Old PGMPhysWrite */ 1938 1939 /** 1940 * Write to physical memory. 1941 * 1942 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you 1943 * want to ignore those. 1944 * 1945 * @param pVM VM Handle. 1946 * @param GCPhys Physical address to write to. 1947 * @param pvBuf What to write. 1948 * @param cbWrite How many bytes to write. 1949 */ 1950 VMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite) 1951 { 1952 #ifdef IN_RING3 1953 bool fGrabbedLock = false; 1954 #endif 1955 1956 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n")); 1957 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n")); 1958 if (cbWrite == 0) 1959 return; 1960 1961 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite)); 1962 1963 #ifdef IN_RING3 1964 if (!VM_IS_EMT(pVM)) 1965 { 1966 pgmLock(pVM); 1967 fGrabbedLock = true; 1968 } 1969 #endif 1970 /* 1971 * Copy loop on ram ranges. 1972 */ 1973 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1974 for (;;) 1975 { 1976 /* Find range. */ 1977 while (pRam && GCPhys > pRam->GCPhysLast) 1978 pRam = pRam->CTX_SUFF(pNext); 1979 /* Inside range or not? */ 1980 if (pRam && GCPhys >= pRam->GCPhys) 1981 { 1982 /* 1983 * Must work our way thru this page by page. 1984 */ 1985 RTGCPTR off = GCPhys - pRam->GCPhys; 1986 while (off < pRam->cb) 1987 { 1988 RTGCPTR iPage = off >> PAGE_SHIFT; 1989 PPGMPAGE pPage = &pRam->aPages[iPage]; 1750 1990 1751 1991 /* Physical chunk in dynamically allocated range not present? */ … … 1995 2235 } 1996 2236 } 1997 #endif /* old code */1998 2237 1999 2238 cbWrite -= cb; … … 2027 2266 } 2028 2267 2268 #endif /* Old PGMPhysWrite */ 2029 2269 2030 2270 /** -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r17371 r17421 1569 1569 * 1570 1570 * @returns VBox status code: 1571 * @retval xxx 1571 * @retval VINF_SUCCESS on success. 1572 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary. 1573 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 1574 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, 1575 * that is we're trying to allocate more than we've reserved. 1572 1576 * 1573 1577 * @param pGMM Pointer to the GMM instance data. … … 1720 1724 * 1721 1725 * The handy pages are always taken from the 'base' memory account. 1726 * The allocated pages are not cleared and will contains random garbage. 1722 1727 * 1723 1728 * @returns VBox status code: 1724 * @retval xxx 1729 * @retval VINF_SUCCESS on success. 1730 * @retval VERR_NOT_OWNER if the caller is not an EMT. 1731 * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found. 1732 * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a 1733 * private page. 1734 * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a 1735 * shared page. 1736 * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't 1737 * owned by the VM. 1738 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary. 1739 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 1740 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, 1741 * that is we're trying to allocate more than we've reserved. 1725 1742 * 1726 1743 * @param pVM Pointer to the shared VM structure. … … 1888 1905 * 1889 1906 * This is typically used for ROMs and MMIO2 (VRAM) during VM creation. 1907 * The allocated pages are not cleared and will contains random garbage. 1890 1908 * 1891 1909 * @returns VBox status code: 1892 * @retval xxx 1910 * @retval VINF_SUCCESS on success. 1911 * @retval VERR_NOT_OWNER if the caller is not an EMT. 1912 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary. 1913 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. 1914 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, 1915 * that is we're trying to allocate more than we've reserved. 1893 1916 * 1894 1917 * @param pVM Pointer to the shared VM structure. -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r13824 r17421 60 60 * @param pVM The VM handle. 61 61 * 62 * @remarks Must be called from within the PGM critical section. 62 * @remarks Must be called from within the PGM critical section. The caller 63 * must clear the new pages. 63 64 */ 64 65 VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM) 65 66 { 66 return VERR_NOT_IMPLEMENTED; 67 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect)); 68 69 uint32_t iFirst = pVM->pgm.s.cHandyPages; 70 AssertMsgReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iFirst), VERR_INTERNAL_ERROR); 71 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst; 72 if (!cPages) 73 return VINF_SUCCESS; 74 75 int rc = GMMR0AllocateHandyPages(pVM, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]); 76 if (RT_SUCCESS(rc)) 77 { 78 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++) 79 { 80 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID); 81 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST); 82 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID); 83 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS); 84 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK)); 85 } 86 87 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages); 88 } 89 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc)); 90 return rc; 67 91 } 68 92 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r17305 r17421 179 179 */ 180 180 if (uSvnRev != VMMGetSvnRev()) 181 { 182 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev())); 181 183 return VERR_VERSION_MISMATCH; 184 } 182 185 if ( !VALID_PTR(pVM) 183 186 || pVM->pVMR0 != pVM)
Note:
See TracChangeset
for help on using the changeset viewer.