- Timestamp:
- Nov 23, 2008 10:49:36 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14493 r14494 44 44 /** The max size of the mapping cache (in pages). */ 45 45 #define PGMR0DYNMAP_MAX_PAGES ((8*_1M) >> PAGE_SHIFT) 46 /* * The max segment size. */ 47 /** @todo #define PGMR0DYNMAP_SEG_MAX_PAGES (_1M >> PAGE_SHIFT) */ 46 /** The small segment size that is adopted on out-of-memory conditions with a 47 * single big segment. */ 48 #define PGMR0DYNMAP_SMALL_SEG_PAGES 128 48 49 /** The number of pages we reserve per CPU. */ 49 50 #define PGMR0DYNMAP_PAGES_PER_CPU 64 … … 140 141 /** The current load. */ 141 142 uint32_t cLoad; 142 /** The max load .143 /** The max load ever. 143 144 * This is maintained to get trigger adding of more mapping space. */ 144 145 uint32_t cMaxLoad; … … 203 204 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs); 204 205 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis); 205 static int pgmR0DynMap Grow(PPGMR0DYNMAP pThis);206 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis); 206 207 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis); 207 208 … … 320 321 */ 321 322 VMCPUID idCpu = pVM->cCPUs; 323 AssertReturn(idCpu > 0 && idCpu <= VMCPU_MAX_CPU_COUNT, VERR_INTERNAL_ERROR); 322 324 while (idCpu-- > 0) 323 325 { … … 341 343 342 344 /* 343 * Reference and if necessary setup or growthe cache.345 * Reference and if necessary setup or expand the cache. 344 346 */ 345 347 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; … … 352 354 rc = pgmR0DynMapSetup(pThis); 353 355 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages)) 354 rc = pgmR0DynMapGrow(pThis); 355 if (RT_FAILURE(rc)) 356 rc = pgmR0DynMapExpand(pThis); 357 if (RT_SUCCESS(rc)) 358 pVM->pgm.s.pvR0DynMapUsed = pThis; 359 else 356 360 pThis->cUsers--; 357 361 … … 402 406 while (j-- > 0) 403 407 { 404 int32_t cRefs = pSet->aEntries[j].cRefs;408 int32_t cRefs = pSet->aEntries[j].cRefs; 405 409 uint32_t iPage = pSet->aEntries[j].iPage; 406 410 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage)); … … 408 412 pgmR0DynMapReleasePage(pThis, iPage, cRefs); 409 413 else 410 Assert MsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));414 AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages)); 411 415 412 416 pSet->aEntries[j].iPage = UINT16_MAX; … … 415 419 pSet->cEntries = PGMMAPSET_CLOSED; 416 420 } 421 else 422 AssertMsg(j == PGMMAPSET_CLOSED, ("cEntries=%#x\n", j)); 417 423 418 424 j = RT_ELEMENTS(pSet->aEntries); … … 433 439 } 434 440 else 435 Assert MsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));441 AssertLogRelMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis)); 436 442 437 443 RTSemFastMutexRelease(pThis->hInitLock); … … 445 451 * @returns Number of pages. 446 452 * @param pThis The dynamic mapping cache instance. 447 */ 448 static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis) 449 { 450 /* 451 * cCpus * PGMR0DYNMAP_PAGES_PER_CPU. 452 */ 453 RTCPUID cCpus = RTMpGetCount(); 454 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU; 453 * @param pcMinPages The minimal size in pages. 454 */ 455 static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis, uint32_t *pcMinPages) 456 { 457 Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES); 458 459 /* cCpus * PGMR0DYNMAP_PAGES_PER_CPU (/2). */ 460 RTCPUID cCpus = RTMpGetCount(); 461 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU; 462 uint32_t cMinPages = cCpus * (PGMR0DYNMAP_PAGES_PER_CPU / 2); 455 463 456 464 /* adjust against cMaxLoad. */ … … 462 470 cPages += PGMR0DYNMAP_PAGES_PER_CPU; 463 471 464 /* adjust against max size. */ 472 if (pThis->cMaxLoad > cMinPages) 473 cMinPages = pThis->cMaxLoad; 474 475 /* adjust against max and current size. */ 476 if (cPages < pThis->cPages) 477 cPages = pThis->cPages; 465 478 if (cPages > PGMR0DYNMAP_MAX_PAGES) 466 479 cPages = PGMR0DYNMAP_MAX_PAGES; 467 480 481 if (cMinPages < pThis->cPages) 482 cMinPages = pThis->cPages; 483 if (cMinPages > PGMR0DYNMAP_MAX_PAGES) 484 cMinPages = PGMR0DYNMAP_MAX_PAGES; 485 486 *pcMinPages = cMinPages; 468 487 return cPages; 469 488 } … … 577 596 * and restarting from CR3 for every change. 578 597 * 579 * @returns VBox status code, V ERR_TRY_AGAIN if we changed any mappings and had598 * @returns VBox status code, VINF_TRY_AGAIN if we changed any mappings and had 580 599 * to re-enable interrupts. 581 600 * @param pThis The dynamic mapping cache instance. … … 589 608 PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE) 590 609 { 610 Assert(!(ASMGetFlags() & X86_EFL_IF)); 611 591 612 void *pvEntry = NULL; 592 613 X86PGPAEUINT uEntry = ASMGetCR3(); … … 612 633 if (RT_SUCCESS(rc)) 613 634 { 614 rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj, &pPgLvl->a[i].u.pv, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ); 635 rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj, &pPgLvl->a[i].u.pv, 0 /* cbAlignment */, 636 RTMEM_PROT_WRITE | RTMEM_PROT_READ); 615 637 if (RT_SUCCESS(rc)) 616 638 { … … 619 641 pSeg->ahMemObjPTs[pSeg->cPTs++] = pPgLvl->a[i].hMemObj; 620 642 ASMIntDisable(); 621 return V ERR_TRY_AGAIN;643 return VINF_TRY_AGAIN; 622 644 } 623 645 … … 627 649 pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ; 628 650 pPgLvl->a[i].HCPhys = NIL_RTHCPHYS; 629 AssertReturn(rc != VERR_TRY_AGAIN, VERR_INTERNAL_ERROR);630 651 return rc; 631 652 } … … 739 760 rc = pgmR0DynMapPagingArrayMapPte(pThis, &PgLvl, pbPage, pSeg, cMaxPTs, 740 761 &pThis->paPages[iPage].uPte.pv); 741 while (rc == V ERR_TRY_AGAIN);762 while (rc == VINF_TRY_AGAIN); 742 763 if (RT_FAILURE(rc)) 743 764 break; 744 rc = VINF_SUCCESS;745 765 746 766 /* Save the PTE. */ … … 802 822 * Calc the size and add a segment of that size. 803 823 */ 804 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis); 824 uint32_t cMinPages; 825 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages); 805 826 AssertReturn(cPages, VERR_INTERNAL_ERROR); 806 return pgmR0DynMapAddSeg(pThis, cPages); 827 int rc = pgmR0DynMapAddSeg(pThis, cPages); 828 if (rc == VERR_NO_MEMORY) 829 { 830 /* 831 * Try adding smaller segments. 832 */ 833 do 834 rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES); 835 while (RT_SUCCESS(rc) && pThis->cPages < cPages); 836 if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages) 837 rc = VINF_SUCCESS; 838 if (rc == VERR_NO_MEMORY) 839 { 840 if (pThis->cPages) 841 pgmR0DynMapTearDown(pThis); 842 rc = VERR_PGM_DYNMAP_SETUP_ERROR; 843 } 844 } 845 Assert(ASMGetFlags() & X86_EFL_IF); 846 return rc; 807 847 } 808 848 … … 814 854 * @param pThis The dynamic mapping cache instance. 815 855 */ 816 static int pgmR0DynMap Grow(PPGMR0DYNMAP pThis)856 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis) 817 857 { 818 858 /* 819 859 * Calc the new target size and add a segment of the appropriate size. 820 860 */ 821 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis); 861 uint32_t cMinPages; 862 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages); 822 863 if (pThis->cPages >= cPages) 823 864 return VINF_SUCCESS; 824 865 825 866 uint32_t cAdd = cPages - pThis->cPages; 826 return pgmR0DynMapAddSeg(pThis, cAdd); 867 int rc = pgmR0DynMapAddSeg(pThis, cAdd); 868 if (rc == VERR_NO_MEMORY) 869 { 870 /* 871 * Try adding smaller segments. 872 */ 873 do 874 rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES); 875 while (RT_SUCCESS(rc) && pThis->cPages < cPages); 876 if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages) 877 rc = VINF_SUCCESS; 878 if (rc == VERR_NO_MEMORY) 879 rc = VERR_PGM_DYNMAP_EXPAND_ERROR; 880 } 881 Assert(ASMGetFlags() & X86_EFL_IF); 882 return rc; 827 883 } 828 884 … … 912 968 pSeg->ahMemObjPTs[iPT] = NIL_RTR0MEMOBJ; 913 969 } 914 rc = RTR0MemObjFree(pSeg->hMemObj, 970 rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc); 915 971 pSeg->hMemObj = NIL_RTR0MEMOBJ; 916 972 pSeg->pNext = NULL;
Note:
See TracChangeset
for help on using the changeset viewer.