Changeset 4689 in vbox for trunk/src/VBox
- Timestamp:
- Sep 11, 2007 9:18:54 AM (17 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r4665 r4689 575 575 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES]; 576 576 } PGMPHYSCACHE; 577 578 579 /** Pointer to an allocation chunk ring-3 mapping. */ 580 typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP; 581 /** Pointer to an allocation chunk ring-3 mapping pointer. */ 582 typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP; 583 584 /** 585 * Ring-3 tracking structore for an allocation chunk ring-3 mapping. 586 * 587 * The primary tree (Core) uses the chunk id as key. 588 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key. 589 */ 590 typedef struct PGMCHUNKR3MAP 591 { 592 /** The key is the chunk id. */ 593 AVLU32NODECORE Core; 594 /** The key is the ageing sequence number. */ 595 AVLLU32NODECORE AgeCore; 596 /** The current age thingy. */ 597 uint32_t iAge; 598 /** The current reference count. */ 599 uint32_t volatile cRefs; 600 /** The current permanent reference count. */ 601 uint32_t volatile cPermRefs; 602 /** The mapping address. */ 603 void *pv; 604 } PGMCHUNKR3MAP; 605 606 /** 607 * Allocation chunk ring-3 mapping TLB entry. 608 */ 609 typedef struct PGMCHUNKR3MAPTLBE 610 { 611 /** The chunk id. */ 612 uint32_t idChunk; 613 #if HC_ARCH_BITS == 64 614 uint32_t u32Padding; /**< alignment padding. */ 615 #endif 616 /** The chunk map. */ 617 HCPTRTYPE(PPGMCHUNKR3MAP) pChunk; 618 } PGMCHUNKR3MAPTLBE; 619 /** Pointer to the an allocation chunk ring-3 mapping TLB entry. */ 620 typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE; 621 622 /** The number of TLB entries in PGMCHUNKR3TLB. */ 623 #define PGMCHUNKR3MAPTLB_ENTRIES 32 624 625 /** 626 * Allocation chunk ring-3 mapping TLB. 627 * 628 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL. 629 * At first glance this might look kinda odd since AVL trees are 630 * supposed to give the most optimial lookup times of all trees 631 * due to their balancing. However, take a tree with 1023 nodes 632 * in it, that's 10 levels, meaning that most searches has to go 633 * down 9 levels before they find what they want. This isn't fast 634 * compared to a TLB hit. There is the factor of cache misses, 635 * and of course the problem with trees and branch prediction. 636 * This is why we use TLBs in front of most of the trees. 637 * 638 * @todo Generalize this TLB + AVL stuff, shouldn't be all that 639 * difficult when we switch to inlined AVL trees (from kStuff). 640 */ 641 typedef struct PGMCHUNKR3MAPTLB 642 { 643 /** The TLB entries. */ 644 PGMCHUNKR3MAPTLBE aEntries[PGMCHUNKR3MAPTLB_ENTRIES]; 645 } PGMCHUNKR3MAPTLB; 646 647 648 /** 649 * Ring-3 guest page mapping TLB entry. 650 * @remarks used in ring-0 as well at the moment. 651 */ 652 typedef struct PGMPAGER3MAPTLBE 653 { 654 /** The page id. */ 655 uint32_t idPage; 656 #if HC_ARCH_BITS == 64 657 uint32_t u32Padding; /**< alignment padding. */ 658 #endif 659 /** The guest page. */ 660 HCPTRTYPE(PPGMPAGE) pPage; 661 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */ 662 HCPTRTYPE(PPGMCHUNKR3MAP) pMap; 663 /** The address */ 664 HCPTRTYPE(void *) pv; 665 } PGMPAGER3MAPTLBE; 666 /** Pointer to an entry in the HC physical TLB. */ 667 typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE; 668 669 670 /** The number of entries in the ring-3 guest page mapping TLB. */ 671 #define PGMPAGER3MAPTLB_ENTRIES 64 672 673 /** 674 * Ring-3 guest page mapping TLB. 675 * @remarks used in ring-0 as well at the moment. 676 */ 677 typedef struct PGMPAGER3MAPTLB 678 { 679 /** The TLB entries. */ 680 PGMPAGER3MAPTLBE aEntries[PGMPAGER3MAPTLB_ENTRIES]; 681 } PGMPAGER3MAPTLB; 682 /** Pointer to the ring-3 guest page mapping TLB. */ 683 typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB; 684 685 577 686 578 687 … … 1501 1610 PGMPHYSCACHE pgmphyswritecache; 1502 1611 1612 /** 1613 * Data associated with managing the ring-3 mappings of the allocation chunks. 1614 */ 1615 struct 1616 { 1617 /** The chunk tree, ordered by chunk id. */ 1618 HCPTRTYPE(PAVLU32NODECORE) pTree; 1619 /** The chunk mapping TLB. */ 1620 PGMCHUNKR3MAPTLB Tlb; 1621 /** The number of mapped chunks. */ 1622 uint32_t c; 1623 /** The maximum number of mapped chunks. 1624 * @cfgm PGM/MaxRing3Chunks */ 1625 uint32_t cMax; 1626 /** The chunk age tree, ordered by ageing sequence number. */ 1627 HCPTRTYPE(PAVLLU32NODECORE) pAgeTree; 1628 /** The current time. */ 1629 uint32_t iNow; 1630 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */ 1631 uint32_t AgeingCountdown; 1632 } ChunkR3Map; 1633 1634 /** 1635 * The page mapping TLB for ring-3 and (for the time being) ring-0. 1636 */ 1637 PGMPAGER3MAPTLB PhysTlbHC; 1638 1503 1639 /** @name Release Statistics 1504 1640 * @{ */ -
trunk/src/VBox/VMM/PGMPhys.cpp
r4620 r4689 43 43 44 44 45 /* 46 * PGMR3PhysReadByte/Word/Dword 47 * PGMR3PhysWriteByte/Word/Dword 48 */ 49 50 #define PGMPHYSFN_READNAME PGMR3PhysReadByte 51 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte 52 #define PGMPHYS_DATASIZE 1 53 #define PGMPHYS_DATATYPE uint8_t 54 #include "PGMPhys.h" 55 56 #define PGMPHYSFN_READNAME PGMR3PhysReadWord 57 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord 58 #define PGMPHYS_DATASIZE 2 59 #define PGMPHYS_DATATYPE uint16_t 60 #include "PGMPhys.h" 61 62 #define PGMPHYSFN_READNAME PGMR3PhysReadDword 63 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword 64 #define PGMPHYS_DATASIZE 4 65 #define PGMPHYS_DATATYPE uint32_t 66 #include "PGMPhys.h" 67 68 69 45 70 46 71 /** … … 599 624 600 625 601 /* 602 * PGMR3PhysReadByte/Word/Dword 603 * PGMR3PhysWriteByte/Word/Dword 604 */ 605 606 #define PGMPHYSFN_READNAME PGMR3PhysReadByte 607 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte 608 #define PGMPHYS_DATASIZE 1 609 #define PGMPHYS_DATATYPE uint8_t 610 #include "PGMPhys.h" 611 612 #define PGMPHYSFN_READNAME PGMR3PhysReadWord 613 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord 614 #define PGMPHYS_DATASIZE 2 615 #define PGMPHYS_DATATYPE uint16_t 616 #include "PGMPhys.h" 617 618 #define PGMPHYSFN_READNAME PGMR3PhysReadDword 619 #define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword 620 #define PGMPHYS_DATASIZE 4 621 #define PGMPHYS_DATATYPE uint32_t 622 #include "PGMPhys.h" 626 /** 627 * Tree enumeration callback for dealing with age rollover. 628 * It will perform a simple compression of the current age. 629 */ 630 static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser) 631 { 632 /* Age compression - ASSUMES iNow == 4. */ 633 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode; 634 if (pChunk->iAge >= UINT32_C(0xffffff00)) 635 pChunk->iAge = 3; 636 else if (pChunk->iAge >= UINT32_C(0xfffff000)) 637 pChunk->iAge = 2; 638 else if (pChunk->iAge) 639 pChunk->iAge = 1; 640 else /* iAge = 0 */ 641 pChunk->iAge = 4; 642 643 /* reinsert */ 644 PVM pVM = (PVM)pvUser; 645 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key); 646 pChunk->AgeCore.Key = pChunk->iAge; 647 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore); 648 return 0; 649 } 650 651 652 /** 653 * Tree enumeration callback that updates the chunks that have 654 * been used since the last 655 */ 656 static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser) 657 { 658 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode; 659 if (!pChunk->iAge) 660 { 661 PVM pVM = (PVM)pvUser; 662 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key); 663 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow; 664 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore); 665 } 666 667 return 0; 668 } 669 670 671 /** 672 * Performs ageing of the ring-3 chunk mappings. 673 * 674 * @param pVM The VM handle. 675 */ 676 PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM) 677 { 678 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024); 679 pVM->pgm.s.ChunkR3Map.iNow++; 680 if (pVM->pgm.s.ChunkR3Map.iNow == 0) 681 { 682 pVM->pgm.s.ChunkR3Map.iNow = 4; 683 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM); 684 } 685 else 686 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM); 687 } 688 689 690 /** 691 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback(). 692 */ 693 typedef struct PGMR3PHYSCHUNKUNMAPCB 694 { 695 PVM pVM; /**< The VM handle. */ 696 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */ 697 } PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB; 698 699 700 /** 701 * Callback used to find the mapping that's been unused for 702 * the longest time. 703 */ 704 static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser) 705 { 706 do 707 { 708 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore)); 709 if ( pChunk->iAge 710 && !pChunk->cRefs) 711 { 712 /* 713 * Check that it's not in any of the TLBs. 714 */ 715 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM; 716 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++) 717 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk) 718 { 719 pChunk = NULL; 720 break; 721 } 722 if (pChunk) 723 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++) 724 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk) 725 { 726 pChunk = NULL; 727 break; 728 } 729 if (pChunk) 730 { 731 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk; 732 return 1; /* done */ 733 } 734 } 735 736 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */ 737 pNode = pNode->pList; 738 } while (pNode); 739 return 0; 740 } 741 742 743 /** 744 * Finds a good candidate for unmapping when the ring-3 mapping cache is full. 745 * 746 * The candidate will not be part of any TLBs, so no need to flush 747 * anything afterwards. 748 * 749 * @returns Chunk id. 750 * @param pVM The VM handle. 751 */ 752 static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM) 753 { 754 /* 755 * Do tree ageing first? 756 */ 757 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0) 758 PGMR3PhysChunkAgeing(pVM); 759 760 /* 761 * Enumerate the age tree starting with the left most node. 762 */ 763 PGMR3PHYSCHUNKUNMAPCB Args; 764 Args.pVM = pVM; 765 Args.pChunk = NULL; 766 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM)) 767 return Args.pChunk->Core.Key; 768 return INT32_MAX; 769 } 770 771 772 #define VMMR0_DO_PGM_MAP_CHUNK 0 // later 773 /** 774 * Argument package for the VMMR0_DO_PGM_MAP_CHUNK request. 775 */ 776 typedef struct PGMMAPCHUNKREQ 777 { 778 /** The chunk to map, UINT32_MAX if unmap only. (IN) */ 779 uint32_t idChunkMap; 780 /** The chunk to unmap, UINT32_MAX if map only. (IN) */ 781 uint32_t idChunkUnmap; 782 /** Where the mapping address is returned. (OUT) */ 783 RTR3PTR pvR3; 784 } PGMMAPCHUNKREQ; 785 786 787 /** 788 * Maps the given chunk into the ring-3 mapping cache. 789 * 790 * This will call ring-0. 791 * 792 * @returns VBox status code. 793 * @param pVM The VM handle. 794 * @param idChunk The chunk in question. 795 * @param ppChunk Where to store the chunk tracking structure. 796 * 797 * @remarks Called from within the PGM critical section. 798 */ 799 int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk) 800 { 801 int rc; 802 /* 803 * Allocate a new tracking structure first. 804 */ 805 #if 0 /* for later when we've got a separate mapping method for ring-0. */ 806 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk)); 807 AssertReturn(pChunk, VERR_NO_MEMORY); 808 #else 809 PPGMCHUNKR3MAP pChunk; 810 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk); 811 AssertRCReturn(rc, rc); 812 #endif 813 pChunk->Core.Key = idChunk; 814 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow; 815 pChunk->iAge = 0; 816 pChunk->cRefs = 0; 817 pChunk->cPermRefs = 0; 818 pChunk->pv = NULL; 819 820 /* 821 * Request the ring-0 part to map the chunk in question and if 822 * necessary unmap another one to make space in the mapping cache. 823 */ 824 PGMMAPCHUNKREQ Req; 825 Req.pvR3 = NULL; 826 Req.idChunkMap = idChunk; 827 Req.idChunkUnmap = INT32_MAX; 828 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax) 829 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM); 830 /** @todo SUPCallVMMR0Ex needs to support in+out or similar. */ 831 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_MAP_CHUNK, &Req, sizeof(Req)); 832 if (VBOX_SUCCESS(rc)) 833 { 834 /* 835 * Update the tree. 836 */ 837 /* insert the new one. */ 838 AssertPtr(Req.pvR3); 839 pChunk->pv = Req.pvR3; 840 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core); 841 AssertRelease(fRc); 842 pVM->pgm.s.ChunkR3Map.c++; 843 844 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore); 845 AssertRelease(fRc); 846 847 /* remove the unmapped one. */ 848 if (Req.idChunkUnmap != INT32_MAX) 849 { 850 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap); 851 AssertRelease(pUnmappedChunk); 852 pUnmappedChunk->pv = NULL; 853 pUnmappedChunk->Core.Key = UINT32_MAX; 854 #if 0 /* for later when we've got a separate mapping method for ring-0. */ 855 MMR3HeapFree(pUnmappedChunk); 856 #else 857 MMHyperFree(pVM, pUnmappedChunk); 858 #endif 859 pVM->pgm.s.ChunkR3Map.c--; 860 } 861 } 862 else 863 { 864 AssertRC(rc); 865 #if 0 /* for later when we've got a separate mapping method for ring-0. */ 866 MMR3HeapFree(pChunk); 867 #else 868 MMHyperFree(pVM, pChunk); 869 #endif 870 pChunk = NULL; 871 } 872 873 *ppChunk = pChunk; 874 return rc; 875 } 876 877 878 /** 879 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal. 880 * 881 * @returns see pgmR3PhysChunkMap. 882 * @param pVM The VM handle. 883 * @param idChunk The chunk to map. 884 */ 885 PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk) 886 { 887 PPGMCHUNKR3MAP pChunk; 888 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk); 889 } 890 -
trunk/src/VBox/VMM/VMM.cpp
r4186 r4689 2166 2166 2167 2167 /* 2168 * Maps an page allocation chunk into ring-3 so ring-0 can use it. 2169 */ 2170 case VMMCALLHOST_PGM_MAP_CHUNK: 2171 { 2172 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg); 2173 break; 2174 } 2175 #ifndef NEW_PHYS_CODE 2176 2177 case VMMCALLHOST_PGM_RAM_GROW_RANGE: 2178 { 2179 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg); 2180 break; 2181 } 2182 #endif 2183 2184 /* 2168 2185 * Acquire the PGM lock. 2169 2186 */ … … 2180 2197 { 2181 2198 REMR3ReplayHandlerNotifications(pVM); 2182 break;2183 }2184 2185 case VMMCALLHOST_PGM_RAM_GROW_RANGE:2186 {2187 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg);2188 2199 break; 2189 2200 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r4679 r4689 186 186 187 187 188 #ifdef IN_RING3189 190 /**191 * Tree enumeration callback for dealing with age rollover.192 * It will perform a simple compression of the current age.193 */194 static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)195 {196 /* ASSMES iNow = 4 */197 PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)pNode;198 if (pChunk->iAge >= UINT32_C(0xffffff00))199 pChunk->iAge = 3;200 else if (pChunk->iAge >= UINT32_C(0xfffff000))201 pChunk->iAge = 2;202 else if (pChunk->iAge)203 pChunk->iAge = 1;204 return 0;205 }206 207 208 /**209 * Tree enumeration callback that updates the chunks that have210 * been used since the last211 */212 static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)213 {214 PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)pNode;215 if (!pChunk->iAge)216 {217 PVM pVM = (PVM)pvUser;218 RTAvllU32Remove(&pVM->pgm.s.R3ChunkTlb.pAgeTree, pChunk->AgeCore.Key);219 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.R3ChunkTlb.iNow;220 RTAvllU32Insert(&pVM->pgm.s.R3ChunkTlb.pAgeTree, &pChunk->AgeCore);221 }222 223 return 0;224 }225 226 227 /**228 * Performs ageing of the ring-3 chunk mappings.229 *230 * @param pVM The VM handle.231 */232 PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)233 {234 pVM->pgm.s.R3ChunkMap.AgeingCountdown = RT_MIN(pVM->pgm.s.R3ChunkMap.cMax / 4, 1024);235 pVM->pgm.s.R3ChunkMap.iNow++;236 if (pVM->pgm.s.R3ChunkMap.iNow == 0)237 {238 pVM->pgm.s.R3ChunkMap.iNow = 20;239 RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);240 }241 RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);242 }243 244 245 /**246 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().247 */248 typedef struct PGMR3PHYSCHUNKUNMAPCB249 {250 PVM pVM; /**< The VM handle. */251 PPGMR3CHUNKMAP pChunk; /**< The chunk to unmap. */252 } PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;253 254 255 /**256 * Callback used to find the mapping that's been unused for257 * the longest time.258 */259 static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)260 {261 do262 {263 PPGMR3CHUNKMAP pChunk = (PPGMR3CHUNKMAP)((uint8_t *)pNode - RT_OFFSETOF(PGMR3CHUNKMAP, AgeCore));264 if ( pChunk->iAge265 && !pChunk->cRefs)266 {267 /*268 * Check that it's not in any of the TLBs.269 */270 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;271 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.R3ChunkTlb->aEntries); i++)272 if (pVM->pgm.s.R3ChunkTlb->aEntries[i].pChunk == pChunk)273 {274 pChunk = NULL;275 break;276 }277 if (pChunk)278 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(PhysTlb)->aEntries); i++)279 if (pVM->pgm.s.CTXSUFF(PhysTlb)->aEntries[i].pChunk == pChunk)280 {281 pChunk = NULL;282 break;283 }284 if (pChunk)285 {286 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;287 return 1; /* done */288 }289 }290 291 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */292 pNode = pNode->pList;293 } while (pNode);294 return 0;295 }296 297 298 /**299 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.300 *301 * The candidate will not be part of any TLBs, so no need to flush302 * anything afterwards.303 *304 * @returns Chunk id.305 * @param pVM The VM handle.306 */307 int pgmR3PhysChunkFindUnmapCandidate(PVM pVM)308 {309 /*310 * Do tree ageing first?311 */312 if (pVM->pgm.s.R3ChunkMap.AgeingCountdown-- == 0)313 pgmR3PhysChunkAgeing(pVM);314 315 /*316 * Enumerate the age tree starting with the left most node.317 */318 PGMR3PHYSCHUNKUNMAPCB Args;319 Args.pVM = pVM;320 Args.pChunk = NULL;321 if (RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))322 return Args.pChunk->idChunk;323 return INT32_MAX;324 }325 326 327 /**328 * Maps the given chunk into the ring-3 mapping cache.329 *330 * This will call ring-0.331 *332 * @returns VBox status code.333 * @param pVM The VM handle.334 * @param idChunk The chunk in question.335 * @param ppChunk Where to store the chunk tracking structure.336 *337 * @remarks Called from within the PGM critical section.338 */339 int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAPPING ppChunk)340 {341 /*342 * Allocate a new tracking structure first.343 */344 #if 0 /* for later when we've got a separate mapping method for ring-0. */345 PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));346 #else347 PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)MMHyperAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));348 #endif349 AssertReturn(pChunk, VERR_NO_MEMORY);350 pChunk->Core.Key = idChunk;351 pChunk->pv = NULL;352 pChunk->cRefs = 0;353 pChunk->iAge = 0;354 355 /*356 * Request the ring-0 part to map the chunk in question and if357 * necessary unmap another one to make space in the mapping cache.358 */359 PGMMAPCHUNKREQ Req;360 Req.pvR3 = NULL;361 Req.idChunkMap = idChunck;362 Req.idChunkUnmap = INT32_MAX;363 if (pVM->pgm.R3ChunkMap.c >= pVM->pgm.R3ChunkMap.cMax)364 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);365 /** @todo SUPCallVMMR0Ex needs to support in+out or similar. */366 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_MAP_CHUNK, &Req, sizeof(Req));367 if (VBOX_SUCCESS(rc))368 {369 /*370 * Update the tree.371 */372 /* insert the new one. */373 AssertPtr(Req.pvR3);374 pChunk->pv = Req.pvR3;375 bool fRc = RTAvlU32Insert(&pVM->pgm.s.R3ChunkMap.Tree, &pChunk->Core);376 AssertRelease(fRc);377 pVM->pgm.s.R3ChunkMap.c++;378 379 /* remove the unmapped one. */380 if (Req.idChunkUnmap != INT32_MAX)381 {382 PPGMCHUNKR3MAPPING pUnmappedChunk = (PPGMCHUNKR3MAPPING)RTAvlU32Remove(&pVM->pgm.s.R3ChunkMap.Tree, Req.idChunkUnmap);383 AssertRelease(pUnmappedChunk);384 pUnmappedChunk->pv = NULL;385 pUnmappedChunk->Key = INT32_MAX;386 #if 0 /* for later when we've got a separate mapping method for ring-0. */387 MMR3HeapFree(pUnmappedChunk);388 #else389 MMHyperFree(pVM, pUnmappedChunk);390 #endif391 pVM->pgm.R3ChunkMap.c--;392 }393 }394 else395 {396 AssertRC(rc);397 #if 0 /* for later when we've got a separate mapping method for ring-0. */398 MMR3HeapFree(pChunk);399 #else400 MMHyperFree(pVM, pChunk);401 #endif402 pChunk = NULL;403 }404 405 *ppChunk = pChunk;406 return rc;407 }408 #endif /* IN_RING3 */409 410 411 188 /** 412 189 * Maps a page into the current virtual address space so it can be accessed. … … 472 249 Assert(pChunk); 473 250 #else 474 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);251 int rc = PGMR3PhysChunkMap(pVM, idChunk, &pChunk); 475 252 if (VBOX_FAILURE(rc)) 476 253 return rc; … … 1913 1690 { 1914 1691 RTGCPHYS GCPhys; 1915 RTGCUINTPTR offset;1916 1692 int rc; 1917 1693 … … 1987 1763 { 1988 1764 RTGCPHYS GCPhys; 1989 RTGCUINTPTR offset;1990 1765 int rc; 1991 1766
Note:
See TracChangeset
for help on using the changeset viewer.