- Timestamp:
- Nov 21, 2008 7:23:59 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14404 r14484 39 39 40 40 /******************************************************************************* 41 * Defined Constants And Macros * 42 *******************************************************************************/ 43 /** The max size of the mapping cache (in pages). */ 44 #define PGMR0DYNMAP_MAX_PAGES ((8*_1M) >> PAGE_SHIFT) 45 /* * The max segment size. */ 46 /** @todo #define PGMR0DYNMAP_SEG_MAX_PAGES (_1M >> PAGE_SHIFT) */ 47 /** The number of pages we reserve per CPU. */ 48 #define PGMR0DYNMAP_PAGES_PER_CPU 64 49 /** Calcs the overload threshold. Current set at 50%. */ 50 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 51 52 53 /******************************************************************************* 41 54 * Structures and Typedefs * 42 55 *******************************************************************************/ … … 59 72 /** The memory object for the virtual address range that we're abusing. */ 60 73 RTR0MEMOBJ hMemObj; 61 /** The memory object for the page tables. */62 RTR0MEMOBJ hMemObjPT;63 74 /** The start page in the cache. (I.e. index into the arrays.) */ 64 uint 32_t iPage;75 uint16_t iPage; 65 76 /** The number of pages this segment contributes. */ 66 uint32_t cPages; 77 uint16_t cPages; 78 /** The number of page tables. */ 79 uint16_t cPTs; 80 /** The memory objects for the page tables. */ 81 RTR0MEMOBJ ahMemObjPT[1]; 67 82 } PGMR0DYNMAPSEG; 68 83 /** Pointer to a ring-0 dynamic mapping cache segment. */ … … 134 149 /** List of segments. */ 135 150 PPGMR0DYNMAPSEG pSegHead; 151 /** The paging mode. */ 152 SUPPAGINGMODE enmPgMode; 136 153 } PGMR0DYNMAP; 137 154 /** Pointer to the ring-0 dynamic mapping cache */ … … 176 193 AssertLogRelReturn(pThis, VERR_NO_MEMORY); 177 194 int rc = VINF_SUCCESS; 178 SUPPAGINGMODE enmMode = SUPR0GetPagingMode();179 switch ( enmMode)195 pThis->enmPgMode = SUPR0GetPagingMode(); 196 switch (pThis->enmPgMode) 180 197 { 181 198 case SUPPAGINGMODE_32_BIT: … … 302 319 if (pThis->cUsers == 1) 303 320 rc = pgmR0DynMapSetup(pThis); 304 else if (pThis->cMaxLoad > pThis->cPages / 2)321 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages)) 305 322 rc = pgmR0DynMapGrow(pThis); 306 323 if (RT_FAILURE(rc)) … … 392 409 393 410 /** 394 * Called by PGMR0DynMapInitVM under the init lock. 411 * Calculate the new cache size based on cMaxLoad statistics. 412 * 413 * @returns Number of pages. 414 * @param pThis The dynamic mapping cache instance. 415 */ 416 static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis) 417 { 418 /* 419 * cCpus * PGMR0DYNMAP_PAGES_PER_CPU. 420 */ 421 RTCPUID cCpus = RTMpGetCount(); 422 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU; 423 424 /* adjust against cMaxLoad. */ 425 AssertMsg(pThis->cMaxLoad <= PGMR0DYNMAP_MAX_PAGES, ("%#x\n", pThis->cMaxLoad)); 426 if (pThis->cMaxLoad > PGMR0DYNMAP_MAX_PAGES) 427 pThis->cMaxLoad = 0; 428 429 while (pThis->cMaxLoad < PGMR0DYNMAP_CALC_OVERLOAD(cPages)) 430 cPages += PGMR0DYNMAP_PAGES_PER_CPU; 431 432 /* adjust against max size. */ 433 if (cPages > PGMR0DYNMAP_MAX_PAGES) 434 cPages = PGMR0DYNMAP_MAX_PAGES; 435 436 return cPages; 437 } 438 439 440 /** 441 * Adds a new segment of the specified size. 395 442 * 396 443 * @returns VBox status code. 397 444 * @param pThis The dynamic mapping cache instance. 398 */ 399 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis) 400 { 401 return VINF_SUCCESS; 445 * @param cPages The size of the new segment, give as a page count. 446 */ 447 static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages) 448 { 449 #if 0 450 int rc2; 451 452 /* 453 * Do the array rellocation first. 454 * (Too lazy to clean these up on failure.) 455 */ 456 void *pv = RTMemRealloc(pThis->paPages, sizeof(pThis->paPages[0]) * (pThis->cPages + cPages)); 457 if (!pv) 458 return VERR_NO_MEMORY; 459 pThis->paPages = (PPGMR0DYNMAPENTRY)pv; 460 461 pv = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages)); 462 if (!pv) 463 return VERR_NO_MEMORY; 464 pThis->pvSavedPTEs = pv; 465 466 /* 467 * Allocate the segment structure and pages memory. 468 */ 469 uint32_t cPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2; 470 PPGMR0DYNMAPSEG pSeg = RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cPTs])); 471 if (!pSeg) 472 return VERR_NO_MEMORY; 473 pSeg->pNext = NULL; 474 pSeg->cPages = cPages; 475 pSeg->iPage = pThis->cPages; 476 pSeg->cPTs = 0; 477 int rc = RTR0MemObjAllocPage(&pSeg->hMemObj, cPages << PAGE_SHIFT, false); 478 if (RT_SUCCESS(rc)) 479 { 480 /* 481 * Walk the paging hierarchy and map the relevant page tables. 482 */ 483 uint8_t *pbPage = RTR0MemObjAddress(pSeg->hMemObj); 484 AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage)); 485 uint32_t iPage = pThis->cPages; 486 uint32_t iEndPage = iPage + cPages; 487 struct 488 { 489 RTHCPHYS HCPhys; /**< The entry that's currently mapped */ 490 RTHCPHYS fPhysMask; /**< Mask for extracting HCPhys from uEntry. */ 491 RTR0MEMOBJ hMemObj; 492 RTR0MEMOBJ hMapObj; 493 uint64_t fPtrMask; 494 uint32_t fPtrShift; 495 uint64_t fAndMask; 496 uint64_t fResMask; 497 union 498 { 499 void *pv; 500 } u; 501 } a[4]; 502 RTCCUINTREG cr4 = ASMGetCR4(); 503 uint32_t cLevels; 504 switch (pThis->enmPgMode) 505 { 506 case SUPPAGINGMODE_32_BIT: 507 case SUPPAGINGMODE_32_BIT_GLOBAL: 508 cLevels = 2; 509 a[0].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 510 a[0].fResMask = X86_PDE_P | X86_PDE_RW; 511 a[0].fPtrMask = X86_PD_MASK; 512 a[0].fPtrShift = X86_PD_SHIFT; 513 a[1].fAndMask = X86_PTE_P | X86_PTE_RW; 514 a[1].fResMask = X86_PTE_P | X86_PTE_RW; 515 a[1].fPtrMask = X86_PT_MASK; 516 a[1].fPtrShift = X86_PT_SHIFT; 517 break; 518 519 case SUPPAGINGMODE_PAE: 520 case SUPPAGINGMODE_PAE_GLOBAL: 521 case SUPPAGINGMODE_PAE_NX: 522 case SUPPAGINGMODE_PAE_GLOBAL_NX: 523 cLevels = 3; 524 a[0].fAndMask = X86_PDPE_P; 525 a[0].fResMask = X86_PDPE_P; 526 a[0].fPtrMask = X86_PDPT_MASK_PAE; 527 a[0].fPtrShift = X86_PDPT_SHIFT; 528 a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 529 a[1].fResMask = X86_PDE_P | X86_PDE_RW; 530 a[1].fPtrMask = X86_PD_MASK; 531 a[1].fPtrShift = X86_PD_SHIFT; 532 a[2].fAndMask = X86_PTE_P | X86_PTE_RW; 533 a[2].fResMask = X86_PTE_P | X86_PTE_RW; 534 a[2].fPtrMask = X86_PT_MASK; 535 a[2].fPtrShift = X86_PT_SHIFT; 536 break; 537 538 case SUPPAGINGMODE_AMD64: 539 case SUPPAGINGMODE_AMD64_GLOBAL: 540 case SUPPAGINGMODE_AMD64_NX: 541 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 542 cLevels = 3; 543 a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW; 544 a[0].fResMask = X86_PML4E_P | X86_PML4E_RW; 545 a[0].fPtrMask = X86_PML4_MASK; 546 a[0].fPtrShift = X86_PML4_SHIFT; 547 a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */; 548 a[1].fResMask = X86_PDPE_P | X86_PDPE_RW; 549 a[1].fPtrMask = X86_PDPT_MASK_AMD64; 550 a[1].fPtrShift = X86_PDPT_SHIFT; 551 a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 552 a[2].fResMask = X86_PDE_P | X86_PDE_RW; 553 a[2].fPtrMask = X86_PD_MASK; 554 a[2].fPtrShift = X86_PD_SHIFT; 555 a[3].fAndMask = X86_PTE_P | X86_PTE_RW; 556 a[3].fResMask = X86_PTE_P | X86_PTE_RW; 557 a[3].fPtrMask = X86_PT_MASK; 558 a[3].fPtrShift = X86_PT_SHIFT; 559 break; 560 default: 561 cLevels = 0; 562 break; 563 } 564 for (uint32_t i = 0; i < RT_ELEMENTS(a); i++) 565 { 566 a[i].HCPhys = NIL_RTHCPHYS; 567 a[i].hMapObj = a[i].hMemObj = NIL_RTR0MEMOBJ; 568 a[i].u.pv = NULL; 569 } 570 571 for (; iPage < iEndPage && RT_SUCCESS(rc); iPage++, pbPage += PAGE_SIZE) 572 { 573 /* Initialize it */ 574 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS; 575 pThis->paPages[iPage].pvPage = pbPage; 576 pThis->paPages[iPage].cRefs = 0; 577 pThis->paPages[iPage].uPte.pPae = NULL; 578 RTCpuSetFill(&pThis->paPages[iPage].PendingSet); 579 580 /* 581 * Map its page table. 582 * 583 * This is a bit ASSUMPTIVE, it should really do a clean run thru 584 * the tables everything something was mapped and disable preemption 585 * or/and interrupts. 586 */ 587 X86PGPAEUINT uEntry = ASMGetCR3(); 588 for (unsigned i = 0; i < cLevels && RT_SUCCESS(rc); i++) 589 { 590 RTHCPHYS HCPhys = uEntry & a[i].fPhysMask; 591 if (a[i].HCPhys != HCPhys) 592 { 593 if (i + 1 != cLevels) 594 { 595 RTR0MemObjFree(a[i].hMemObj, true /* fFreeMappings */); 596 a[i].hMemObj = a[i].hMapObj = NIL_RTR0MEMOBJ; 597 } 598 rc = RTR0MemObjEnterPhys(&a[i].hMemObj, HCPhys, PAGE_SIZE); 599 if (RT_SUCCESS(rc)) 600 rc = RTR0MemObjMapKernel(&a[i].hMapObj, a[i].hMemObj, &a[i].u.pv, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ); 601 if (RT_FAILURE(rc)) 602 break; 603 } 604 605 } 606 607 608 } /* for each page */ 609 610 for (iPage = 0; i < cLevels; ) 611 612 rc2 = RTR0MemObjFree(hMemObjCR3, true /* fFreeMappings */); AssertRC(rc2); 613 614 rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc2); 615 } 616 RTMemFree(pSeg); 617 return rc; 618 #else 619 return VERR_NOT_IMPLEMENTED; 620 #endif 402 621 } 403 622 … … 409 628 * @param pThis The dynamic mapping cache instance. 410 629 */ 630 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis) 631 { 632 /* 633 * Calc the size and add a segment of that size. 634 */ 635 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis); 636 AssertReturn(cPages, VERR_INTERNAL_ERROR); 637 return pgmR0DynMapAddSeg(pThis, cPages); 638 } 639 640 641 /** 642 * Called by PGMR0DynMapInitVM under the init lock. 643 * 644 * @returns VBox status code. 645 * @param pThis The dynamic mapping cache instance. 646 */ 411 647 static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis) 412 648 { 413 return VINF_SUCCESS; 649 /* 650 * Calc the new target size and add a segment of the appropriate size. 651 */ 652 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis); 653 if (pThis->cPages >= cPages) 654 return VINF_SUCCESS; 655 656 uint32_t cAdd = cPages - pThis->cPages; 657 return pgmR0DynMapAddSeg(pThis, cAdd); 414 658 } 415 659 … … 493 737 pThis->pSegHead = pSeg->pNext; 494 738 495 int rc; 496 rc = RTR0MemObjFree(pSeg->hMemObjPT, true /* fFreeMappings */); AssertRC(rc); 497 pSeg->hMemObjPT = NIL_RTR0MEMOBJ; 739 uint32_t iPT = pSeg->cPTs; 740 while (iPT-- > 0) 741 { 742 rc = RTR0MemObjFree(pSeg->ahMemObjPT[iPT], true /* fFreeMappings */); AssertRC(rc); 743 pSeg->ahMemObjPT[iPT] = NIL_RTR0MEMOBJ; 744 } 498 745 rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc); 499 746 pSeg->hMemObj = NIL_RTR0MEMOBJ; 500 747 pSeg->pNext = NULL; 501 pSeg->iPage = UINT 32_MAX;748 pSeg->iPage = UINT16_MAX; 502 749 pSeg->cPages = 0; 750 pSeg->cPTs = 0; 503 751 RTMemFree(pSeg); 504 752 }
Note:
See TracChangeset
for help on using the changeset viewer.