- Timestamp:
- Apr 5, 2009 1:47:09 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/MM.cpp
r18665 r18718 456 456 457 457 /* 458 * Release locked memory.459 * (Associated record are released by the heap.)460 */461 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;462 while (pLockedMem)463 {464 int rc = SUPPageUnlock(pLockedMem->pv);465 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));466 switch (pLockedMem->eType)467 {468 case MM_LOCKED_TYPE_HYPER:469 rc = SUPPageFree(pLockedMem->pv, pLockedMem->cb >> PAGE_SHIFT);470 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));471 break;472 case MM_LOCKED_TYPE_HYPER_NOFREE:473 case MM_LOCKED_TYPE_HYPER_PAGES:474 case MM_LOCKED_TYPE_PHYS:475 /* nothing to do. */476 break;477 }478 /* next */479 pLockedMem = pLockedMem->pNext;480 }481 482 /*483 458 * Zero stuff to detect after termination use of the MM interface 484 459 */ 485 460 pVM->mm.s.offLookupHyper = NIL_OFFSET; 486 pVM->mm.s.pLockedMem = NULL;487 461 pVM->mm.s.pHyperHeapR3 = NULL; /* freed above. */ 488 462 pVM->mm.s.pHyperHeapR0 = NIL_RTR0PTR; /* freed above. */ … … 724 698 725 699 /** 726 * Locks physical memory which backs a virtual memory range (HC) adding727 * the required records to the pLockedMem list.728 *729 * @returns VBox status code.730 * @param pVM The VM handle.731 * @param pv Pointer to memory range which shall be locked down.732 * This pointer is page aligned.733 * @param cb Size of memory range (in bytes). This size is page aligned.734 * @param eType Memory type.735 * @param ppLockedMem Where to store the pointer to the created locked memory record.736 * This is optional, pass NULL if not used.737 * @param fSilentFailure Don't raise an error when unsuccessful. Upper layer with deal with it.738 */739 int mmR3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure)740 {741 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);742 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);743 744 if (ppLockedMem)745 *ppLockedMem = NULL;746 747 /*748 * Allocate locked mem structure.749 */750 unsigned cPages = (unsigned)(cb >> PAGE_SHIFT);751 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);752 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));753 if (!pLockedMem)754 return VERR_NO_MEMORY;755 pLockedMem->pv = pv;756 pLockedMem->cb = cb;757 pLockedMem->eType = eType;758 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));759 760 /*761 * Lock the memory.762 */763 int rc = SUPPageLock(pv, cPages, &pLockedMem->aPhysPages[0]);764 if (RT_SUCCESS(rc))765 {766 /*767 * Setup the reserved field.768 */769 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];770 for (unsigned c = cPages; c > 0; c--, pPhysPage++)771 pPhysPage->uReserved = (RTHCUINTPTR)pLockedMem;772 773 /*774 * Insert into the list.775 *776 * ASSUME no protected needed here as only one thread in the system can possibly777 * be doing this. No other threads will walk this list either we assume.778 */779 pLockedMem->pNext = pVM->mm.s.pLockedMem;780 pVM->mm.s.pLockedMem = pLockedMem;781 /* Set return value. */782 if (ppLockedMem)783 *ppLockedMem = pLockedMem;784 }785 else786 {787 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));788 MMR3HeapFree(pLockedMem);789 if (!fSilentFailure)790 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);791 }792 793 return rc;794 }795 796 797 /**798 * Maps a part of or an entire locked memory region into the guest context.799 *800 * @returns VBox status.801 * God knows what happens if we fail...802 * @param pVM VM handle.803 * @param pLockedMem Locked memory structure.804 * @param Addr GC Address where to start the mapping.805 * @param iPage Page number in the locked memory region.806 * @param cPages Number of pages to map.807 * @param fFlags See the fFlags argument of PGR3Map().808 */809 int mmR3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)810 {811 /*812 * Adjust ~0 argument813 */814 if (cPages == ~(size_t)0)815 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;816 Assert(cPages != ~0U);817 /* no incorrect arguments are accepted */818 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);819 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));820 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));821 822 /*823 * Map the pages.824 */825 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];826 while (cPages)827 {828 RTHCPHYS HCPhys = pPhysPage->Phys;829 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);830 if (RT_FAILURE(rc))831 {832 /** @todo how the hell can we do a proper bailout here. */833 return rc;834 }835 836 /* next */837 cPages--;838 iPage++;839 pPhysPage++;840 Addr += PAGE_SIZE;841 }842 843 return VINF_SUCCESS;844 }845 846 847 /**848 700 * Convert HC Physical address to HC Virtual address. 849 701 * … … 868 720 869 721 /* 870 * Iterate th e locked memory - very slow.722 * Iterate thru the lookup records for HMA. 871 723 */ 872 724 uint32_t off = HCPhys & PAGE_OFFSET_MASK; 873 725 HCPhys &= X86_PTE_PAE_PG_MASK; 874 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext) 875 { 876 size_t iPage = pCur->cb >> PAGE_SHIFT; 877 while (iPage-- > 0) 878 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys) 726 PMMLOOKUPHYPER pCur = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap) + pVM->mm.s.offLookupHyper); 727 for (;;) 728 { 729 switch (pCur->enmType) 730 { 731 case MMLOOKUPHYPERTYPE_LOCKED: 879 732 { 880 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off; 881 return VINF_SUCCESS; 733 PCRTHCPHYS paHCPhysPages = pCur->u.Locked.paHCPhysPages; 734 size_t iPage = pCur->cb >> PAGE_SHIFT; 735 while (iPage-- > 0) 736 if (paHCPhysPages[iPage] == HCPhys) 737 { 738 *ppv = (char *)pCur->u.Locked.pvR3 + (iPage << PAGE_SHIFT) + off; 739 return VINF_SUCCESS; 740 } 741 break; 882 742 } 743 744 case MMLOOKUPHYPERTYPE_HCPHYS: 745 if (pCur->u.HCPhys.HCPhys - HCPhys < pCur->cb) 746 { 747 *ppv = (uint8_t *)pCur->u.HCPhys.pvR3 + pCur->u.HCPhys.HCPhys - HCPhys + off; 748 return VINF_SUCCESS; 749 } 750 break; 751 752 case MMLOOKUPHYPERTYPE_GCPHYS: /* (for now we'll not allow these kind of conversions) */ 753 case MMLOOKUPHYPERTYPE_MMIO2: 754 case MMLOOKUPHYPERTYPE_DYNAMIC: 755 break; 756 757 default: 758 AssertMsgFailed(("enmType=%d\n", pCur->enmType)); 759 break; 760 } 761 762 /* next */ 763 if (pCur->offNext == (int32_t)NIL_OFFSET) 764 break; 765 pCur = (PMMLOOKUPHYPER)((uint8_t *)pCur + pCur->offNext); 883 766 } 884 767 /* give up */ -
trunk/src/VBox/VMM/MMHyper.cpp
r18430 r18718 184 184 { 185 185 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off; 186 u nsignedcPages = pLookup->cb >> PAGE_SHIFT;186 uint32_t cPages = pLookup->cb >> PAGE_SHIFT; 187 187 switch (pLookup->enmType) 188 188 { 189 189 case MMLOOKUPHYPERTYPE_LOCKED: 190 rc = mmR3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0); 191 break; 190 { 191 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages; 192 for (uint32_t i = 0; i < cPages; i++) 193 { 194 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0); 195 AssertRCReturn(rc, rc); 196 } 197 break; 198 } 192 199 193 200 case MMLOOKUPHYPERTYPE_HCPHYS: … … 198 205 { 199 206 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys; 200 const size_tcb = pLookup->cb;201 for (u nsignedoff = 0; off < cb; off += PAGE_SIZE)207 const uint32_t cb = pLookup->cb; 208 for (uint32_t off = 0; off < cb; off += PAGE_SIZE) 202 209 { 203 210 RTHCPHYS HCPhys; … … 565 572 { 566 573 /* 567 * C reate a locked memory record and tell PGM about this.574 * Copy the physical page addresses and tell PGM about them. 568 575 */ 569 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages])); 570 if (pLockedMem) 571 { 572 pLockedMem->pv = pvR3; 573 pLockedMem->cb = cPages << PAGE_SHIFT; 574 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES; 575 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u)); 576 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages); 577 if (paHCPhysPages) 578 { 576 579 for (size_t i = 0; i < cPages; i++) 577 580 { 578 581 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR); 579 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys; 580 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem; 582 paHCPhysPages[i] = paPages[i].Phys; 581 583 } 582 584 583 /* map the stuff into guest address space. */584 585 if (pVM->mm.s.fPGMInitialized) 585 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0); 586 { 587 for (size_t i = 0; i < cPages; i++) 588 { 589 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0); 590 AssertRCReturn(rc, rc); 591 } 592 } 586 593 if (RT_SUCCESS(rc)) 587 594 { 588 595 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED; 589 pLookup->u.Locked.pvR3 = pvR3;590 pLookup->u.Locked.pvR0 = pvR0;591 pLookup->u.Locked.p LockedMem = pLockedMem;596 pLookup->u.Locked.pvR3 = pvR3; 597 pLookup->u.Locked.pvR0 = pvR0; 598 pLookup->u.Locked.paHCPhysPages = paHCPhysPages; 592 599 593 600 /* done. */ … … 981 988 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3; 982 989 if (off < pLookup->cb) 983 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK)| (off & PAGE_OFFSET_MASK);990 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK); 984 991 break; 985 992 } … … 1095 1102 pLookup->u.Locked.pvR3, 1096 1103 pLookup->u.Locked.pvR0, 1097 sizeof(RTHCPTR) * 2, 1098 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree" 1099 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree" 1100 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages" 1101 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys" 1102 : "??", 1104 sizeof(RTHCPTR) * 2, "", 1103 1105 pLookup->pszDesc); 1104 1106 break; -
trunk/src/VBox/VMM/MMInternal.h
r18665 r18718 493 493 494 494 495 496 /**497 * Type of memory that's locked.498 */499 typedef enum MMLOCKEDTYPE500 {501 /** Hypervisor: Ring-3 memory locked by MM. */502 MM_LOCKED_TYPE_HYPER,503 /** Hypervisor: Ring-3 memory locked by MM that shouldn't be freed up. */504 MM_LOCKED_TYPE_HYPER_NOFREE,505 /** Hypervisor: Pre-locked ring-3 pages. */506 MM_LOCKED_TYPE_HYPER_PAGES,507 /** Guest: Physical VM memory (RAM & MMIO2). */508 MM_LOCKED_TYPE_PHYS509 } MMLOCKEDTYPE;510 /** Pointer to memory type. */511 typedef MMLOCKEDTYPE *PMMLOCKEDTYPE;512 513 514 /**515 * Converts a SUPPAGE pointer to a MMLOCKEDMEM pointer.516 * @returns Pointer to the MMLOCKEDMEM record the range is associated with.517 * @param pSupPage Pointer to SUPPAGE structure managed by MM.518 */519 #define MM_SUPRANGE_TO_MMLOCKEDMEM(pSupPage) ((PMMLOCKEDMEM)pSupPage->uReserved)520 521 522 /**523 * Locked memory record.524 */525 typedef struct MMLOCKEDMEM526 {527 /** Address (host mapping). */528 void *pv;529 /** Size. */530 size_t cb;531 /** Next record. */532 struct MMLOCKEDMEM *pNext;533 /** Record type. */534 MMLOCKEDTYPE eType;535 /** Type specific data. */536 union537 {538 /** Data for MM_LOCKED_TYPE_HYPER, MM_LOCKED_TYPE_HYPER_NOFREE and MM_LOCKED_TYPE_HYPER_PAGES. */539 struct540 {541 unsigned uNothing;542 } hyper;543 544 /** Data for MM_LOCKED_TYPE_PHYS. */545 struct546 {547 /** The GC physical address.548 * (Assuming that this is a linear range of GC physical pages.)549 */550 RTGCPHYS GCPhys;551 } phys;552 } u;553 554 /** Physical Page Array. (Variable length.)555 * The uReserved field contains pointer to the MMLOCKMEM record.556 * Use the macro MM_SUPPAGE_TO_MMLOCKEDMEM() to convert.557 *558 * For MM_LOCKED_TYPE_PHYS the low 12 bits of the pvPhys member559 * are bits (MM_RAM_FLAGS_*) and not part of the physical address.560 */561 SUPPAGE aPhysPages[1];562 } MMLOCKEDMEM;563 /** Pointer to locked memory. */564 typedef MMLOCKEDMEM *PMMLOCKEDMEM;565 566 567 495 /** 568 496 * Hypervisor memory mapping type. … … 609 537 /** Host context ring-0 pointer. Optional. */ 610 538 RTR0PTR pvR0; 611 /** Pointer to the locked mem record. */612 R3PTRTYPE(P MMLOCKEDMEM) pLockedMem;539 /** Pointer to an array containing the physical address of each page. */ 540 R3PTRTYPE(PRTHCPHYS) paHCPhysPages; 613 541 } Locked; 614 542 … … 712 640 /** Page pool pages in low memory R3 Ptr. */ 713 641 R3PTRTYPE(PMMPAGEPOOL) pPagePoolLowR3; 714 /** List of memory locks. (HC only) */715 R3PTRTYPE(PMMLOCKEDMEM) pLockedMem;716 642 717 643 /** Pointer to the dummy page. … … 769 695 int mmR3HyperInitPaging(PVM pVM); 770 696 771 int mmR3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure);772 int mmR3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags);773 774 697 const char *mmR3GetTagName(MMTAG enmTag); 775 698 -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r18665 r18718 214 214 GEN_CHECK_OFF(MM, pHyperHeapR3); 215 215 GEN_CHECK_OFF(MM, pHyperHeapR0); 216 GEN_CHECK_OFF(MM, pLockedMem);217 216 GEN_CHECK_OFF(MM, pPagePoolR3); 218 217 GEN_CHECK_OFF(MM, pPagePoolLowR3); … … 252 251 GEN_CHECK_OFF(MMLOOKUPHYPER, u.Locked.pvR3); 253 252 GEN_CHECK_OFF(MMLOOKUPHYPER, u.Locked.pvR0); 254 GEN_CHECK_OFF(MMLOOKUPHYPER, u.Locked.p LockedMem);253 GEN_CHECK_OFF(MMLOOKUPHYPER, u.Locked.paHCPhysPages); 255 254 GEN_CHECK_OFF(MMLOOKUPHYPER, u.HCPhys.pvR3); 256 255 GEN_CHECK_OFF(MMLOOKUPHYPER, u.HCPhys.HCPhys);
Note:
See TracChangeset
for help on using the changeset viewer.