Changeset 4738 in vbox for trunk/src/VBox
- Timestamp:
- Sep 12, 2007 4:00:54 PM (17 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/EM.cpp
r4551 r4738 2359 2359 } 2360 2360 2361 /* 2362 * Allocate handy pages (just in case the above actions have consumed some pages). 2363 */ 2364 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)) 2365 { 2366 int rc = PGMR3PhysAllocateHandyPages(pVM); 2367 if (VBOX_FAILURE(rc)) 2368 return rc; 2369 } 2370 2361 2371 return VINF_SUCCESS; 2362 2372 } … … 2489 2499 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)) 2490 2500 rc = emR3HighPriorityPostForcedActions(pVM, rc); 2491 2492 #ifdef PGM_CACHE_VERY_STRICT2493 /*2494 * Page manager cache checks.2495 */2496 if ( rc == VINF_EM_RAW_INTERRUPT2497 || rc == VINF_EM_RAW_GUEST_TRAP2498 || rc == VINF_IOM_HC_IOPORT_READ2499 || rc == VINF_IOM_HC_IOPORT_WRITE2500 //|| rc == VINF_PATM_PATCH_INT32501 )2502 pgmCacheCheckPD(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4);2503 #endif2504 2501 2505 2502 #ifdef VBOX_STRICT … … 3071 3068 } 3072 3069 3070 /* 3071 * Allocate handy pages. 3072 */ 3073 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)) 3074 { 3075 rc2 = PGMR3PhysAllocateHandyPages(pVM); 3076 UPDATE_RC(); 3077 } 3078 3073 3079 /* 3074 3080 * Debugger Facility request. … … 3103 3109 #endif 3104 3110 /* check that we got them all */ 3105 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS )));3111 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES))); 3106 3112 } 3107 3113 -
trunk/src/VBox/VMM/Makefile.kmk
r4665 r4738 335 335 VMMR0/TRPMR0A.asm \ 336 336 VMMR0/PDMR0Device.cpp \ 337 VMMR0/PGMR0.cpp \ 337 338 VMMAll/EMAll.cpp \ 338 339 VMMAll/EMAllA.asm \ -
trunk/src/VBox/VMM/PGM.cpp
r4714 r4738 501 501 * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator 502 502 * will return to the ring-3 caller (and later ring-0) and asking it to seed 503 * the page allocator with some fresh pages (VERR_G VM_SEED_ME). Ring-3 will503 * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will 504 504 * then perform an SUPPageAlloc(cbChunk >> PAGE_SHIFT) call and make a 505 505 * "SeededAllocPages" call to ring-0. … … 1406 1406 STAM_REG(pVM, &pPGM->StatChunkR3MapTlbHits, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbHits", STAMUNIT_OCCURENCES, "TLB hits."); 1407 1407 STAM_REG(pVM, &pPGM->StatChunkR3MapTlbMisses, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbMisses", STAMUNIT_OCCURENCES, "TLB misses."); 1408 STAM_REG(pVM, &pPGM->StatPageReplaceShared, STAMTYPE_COUNTER, "/PGM/Page/ReplacedShared", STAMUNIT_OCCURENCES, "Times a shared page was replaced."); 1409 STAM_REG(pVM, &pPGM->StatPageReplaceZero, STAMTYPE_COUNTER, "/PGM/Page/ReplacedZero", STAMUNIT_OCCURENCES, "Times the zero page was replaced."); 1410 STAM_REG(pVM, &pPGM->StatPageHandyAllocs, STAMTYPE_COUNTER, "/PGM/Page/HandyAllocs", STAMUNIT_OCCURENCES, "Number of times we've allocated more handy pages."); 1411 STAM_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_OCCURENCES, "The total number of pages."); 1412 STAM_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_OCCURENCES, "The number of private pages."); 1413 STAM_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_OCCURENCES, "The number of shared pages."); 1414 STAM_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_OCCURENCES, "The number of zero backed pages."); 1408 1415 1409 1416 #ifdef PGMPOOL_WITH_GCPHYS_TRACKING -
trunk/src/VBox/VMM/PGMInternal.h
r4714 r4738 552 552 553 553 /** The chunk shift. (2^20 = 1 MB) */ 554 #define G PM_CHUNK_SHIFT 20554 #define GMM_CHUNK_SHIFT 20 555 555 /** The allocation chunk size. */ 556 #define G PM_CHUNK_SIZE (1U << GPM_CHUNK_SIZE_LOG2)556 #define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT) 557 557 /** The shift factor for converting a page id into a chunk id. */ 558 #define G PM_CHUNKID_SHIFT (GPM_CHUNK_SHIFT - PAGE_SHIFT)558 #define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT) 559 559 /** The NIL Chunk ID value. */ 560 #define NIL_G PM_CHUNKID 0560 #define NIL_GMM_CHUNKID 0 561 561 /** The NIL Page ID value. */ 562 #define NIL_G PM_PAGEID 0562 #define NIL_GMM_PAGEID 0 563 563 564 564 /** 565 565 * Get the Page ID. 566 * @returns The Page ID; NIL_G PM_PAGEID if it's a ZERO page.566 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page. 567 567 * @param pPage Pointer to the physical guest page tracking structure. 568 568 */ … … 585 585 /** 586 586 * Get the Chunk ID. 587 * @returns The Chunk ID; NIL_G PM_CHUNKID if it's a ZERO page.587 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page. 588 588 * @param pPage Pointer to the physical guest page tracking structure. 589 589 */ 590 #define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPage >> G PM_CHUNKID_SHIFT )590 #define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPage >> GMM_CHUNKID_SHIFT ) 591 591 /* later: 592 #if G PM_CHUNKID_SHIFT == 12592 #if GMM_CHUNKID_SHIFT == 12 593 593 # define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> 48) ) 594 #elif G PM_CHUNKID_SHIFT > 12595 # define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> (48 + (G PM_CHUNKID_SHIFT - 12)) )596 #elif G PM_CHUNKID_SHIFT < 12597 # define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhys >> 48) << (12 - G PM_CHUNKID_SHIFT) ) \598 | ( (uint32_t)((pPage)->HCPhys & 0xfff) >> G PM_CHUNKID_SHIFT ) )594 #elif GMM_CHUNKID_SHIFT > 12 595 # define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> (48 + (GMM_CHUNKID_SHIFT - 12)) ) 596 #elif GMM_CHUNKID_SHIFT < 12 597 # define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhys >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \ 598 | ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GMM_CHUNKID_SHIFT ) ) 599 599 #else 600 # error "G PM_CHUNKID_SHIFT isn't defined or something."600 # error "GMM_CHUNKID_SHIFT isn't defined or something." 601 601 #endif 602 602 */ … … 607 607 * @param pPage Pointer to the physical guest page tracking structure. 608 608 */ 609 #define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPage & (RT_BIT_32(G PM_CHUNKID_SHIFT) - 1) )609 #define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPage & (RT_BIT_32(GMM_CHUNKID_SHIFT) - 1) ) 610 610 /* later: 611 #if G PM_CHUNKID_SHIFT <= 12612 # define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & (RT_BIT_32(G PM_CHUNKID_SHIFT) - 1)) )611 #if GMM_CHUNKID_SHIFT <= 12 612 # define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & (RT_BIT_32(GMM_CHUNKID_SHIFT) - 1)) ) 613 613 #else 614 614 # define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & 0xfff) \ 615 | ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(G PM_CHUNKID_SHIFT - 12) - 1) ) )615 | ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) ) 616 616 #endif 617 617 */ … … 637 637 */ 638 638 #define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2State == PGM_PAGE_STATE_ZERO ) 639 640 /** 641 * Checks if the page is backed by a SHARED page. 642 * @returns true/false. 643 * @param pPage Pointer to the physical guest page tracking structure. 644 */ 645 #define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2State == PGM_PAGE_STATE_SHARED ) 639 646 640 647 … … 1845 1852 /** The GC mapping of the zero page. */ 1846 1853 RTGCPTR pvZeroPgGC; 1847 #if GC_ARCH_BITS == 321854 #if GC_ARCH_BITS != 32 1848 1855 uint32_t u32ZeroAlignment; /**< Alignment padding. */ 1849 1856 #endif 1850 1857 /** @}*/ 1851 1858 1859 /** The number of handy pages. */ 1860 uint32_t cHandyPages; 1861 /** 1862 * Array of handy pages. 1863 * 1864 * This array is used in a two way communication between pgmPhysAllocPage 1865 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as 1866 * an intermediary. 1867 * 1868 * The size of this array is important, see pgmPhysEnsureHandyPage for details. 1869 * (The current size of 32 pages, means 128 KB of memory.) 1870 */ 1871 struct 1872 { 1873 /** The host physical address before pgmPhysAllocPage uses it, 1874 * and the guest physical address afterwards. 1875 * This is NIL_RTHCPHYS if the array entry isn't valid. 1876 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTHCPHYS). */ 1877 RTHCPHYS HCPhysGCPhys; 1878 /** The Page ID. 1879 * This is NIL_GMM_PAGEID if the array entry isn't valid. */ 1880 uint32_t idPage; 1881 /** The Page ID of the shared page that pgmPageAllocPage replaced. 1882 * This is NIL_GMM_PAGEID if no shared page was replaced. */ 1883 uint32_t idSharedPage; 1884 } aHandyPages[32]; 1885 1852 1886 /** @name Release Statistics 1853 * @{ */ 1887 * @{ */ 1888 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */ 1889 uint32_t cPrivatePages; /**< The number of private pages. */ 1890 uint32_t cSharedPages; /**< The number of shared pages. */ 1891 uint32_t cZeroPages; /**< The number of zero backed pages. */ 1854 1892 /** The number of times the guest has switched mode since last reset or statistics reset. */ 1855 1893 STAMCOUNTER cGuestModeChanges; … … 2108 2146 /** Ring-3/0 chunk mapper TLB misses. */ 2109 2147 STAMCOUNTER StatChunkR3MapTlbMisses; 2148 /** Times a shared page has been replaced by a private one. */ 2149 STAMCOUNTER StatPageReplaceShared; 2150 /** Times the zero page has been replaced by a private one. */ 2151 STAMCOUNTER StatPageReplaceZero; 2152 /** The number of times we've executed GMMR3AllocateHandyPages. */ 2153 STAMCOUNTER StatPageHandyAllocs; 2110 2154 2111 2155 /** Allocated mbs of guest ram */ … … 2160 2204 int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys); 2161 2205 #ifdef IN_RING3 2206 int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk); 2207 #ifndef NEW_PHYS_CODE 2162 2208 int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys); 2163 int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk); 2209 #endif 2164 2210 2165 2211 int pgmR3PoolInit(PVM pVM); -
trunk/src/VBox/VMM/PGMPhys.cpp
r4713 r4738 372 372 } 373 373 374 #ifndef NEW_PHYS_CODE 374 375 375 376 /** … … 448 449 } 449 450 451 #endif /* !NEW_PHYS_CODE */ 450 452 451 453 /** … … 779 781 780 782 781 #define VMMR0_DO_PGM_MAP_CHUNK 0 // later 782 /** 783 * Argument package for the VMMR0_DO_PGM_MAP_CHUNK request. 784 */ 785 typedef struct PGMMAPCHUNKREQ 783 /** 784 * Argument package for the VMMR0_DO_GMM_MAP_UNMAP_CHUNK request. 785 */ 786 typedef struct GMMMAPUNMAPCHUNKREQ 786 787 { 787 788 /** The chunk to map, UINT32_MAX if unmap only. (IN) */ … … 791 792 /** Where the mapping address is returned. (OUT) */ 792 793 RTR3PTR pvR3; 793 } PGMMAPCHUNKREQ;794 } GMMMAPUNMAPCHUNKREQ; 794 795 795 796 … … 831 832 * necessary unmap another one to make space in the mapping cache. 832 833 */ 833 PGMMAPCHUNKREQ Req;834 GMMMAPUNMAPCHUNKREQ Req; 834 835 Req.pvR3 = NULL; 835 836 Req.idChunkMap = idChunk; … … 838 839 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM); 839 840 /** @todo SUPCallVMMR0Ex needs to support in+out or similar. */ 840 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_ PGM_MAP_CHUNK, &Req, sizeof(Req));841 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, &Req, sizeof(Req)); 841 842 if (VBOX_SUCCESS(rc)) 842 843 { … … 909 910 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++) 910 911 { 911 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_G PM_CHUNKID;912 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID; 912 913 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL; 913 914 } … … 916 917 917 918 919 /** 920 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES. 921 * 922 * @returns The following VBox status codes. 923 * @retval VINF_SUCCESS on success. FF cleared. 924 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case. 925 * 926 * @param pVM The VM handle. 927 */ 928 PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM) 929 { 930 pgmLock(pVM); 931 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0); 932 if (rc == VERR_GMM_SEED_ME) 933 { 934 void *pvChunk; 935 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk); 936 if (VBOX_SUCCESS(rc)) 937 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, pvChunk, 0); 938 if (VBOX_FAILURE(rc)) 939 { 940 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc)); 941 rc = VINF_EM_NO_MEMORY; 942 } 943 } 944 pgmUnlock(pVM); 945 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY); 946 return rc; 947 } 948 -
trunk/src/VBox/VMM/VMM.cpp
r4689 r4738 2173 2173 break; 2174 2174 } 2175 2176 /* 2177 * Allocates more handy pages. 2178 */ 2179 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES: 2180 { 2181 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM); 2182 break; 2183 } 2184 2175 2185 #ifndef NEW_PHYS_CODE 2176 2186 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r4714 r4738 35 35 #include <VBox/vmm.h> 36 36 #include <VBox/iom.h> 37 #include <VBox/rem.h> 37 38 #include "PGMInternal.h" 38 39 #include <VBox/vm.h> … … 167 168 168 169 170 171 /** 172 * Makes sure that there is at least one handy page ready for use. 173 * 174 * This will also take the appropriate actions when reaching water-marks. 175 * 176 * @returns The following VBox status codes. 177 * @retval VINF_SUCCESS on success. 178 * @retval VERR_EM_NO_MEMORY if we're really out of memory. 179 * 180 * @param pVM The VM handle. 181 * 182 * @remarks Must be called from within the PGM critical section. It may 183 * nip back to ring-3/0 in some cases. 184 */ 185 static int pgmPhysEnsureHandyPage(PVM pVM) 186 { 187 /** @remarks 188 * low-water mark logic for R0 & GC: 189 * - 75%: Set FF. 190 * - 50%: Force return to ring-3 ASAP. 191 * 192 * For ring-3 there is a little problem wrt to the recompiler, so: 193 * - 75%: Set FF. 194 * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP. 195 * 196 * The basic idea is that we should be able to get out of any situation with 197 * only 50% of handy pages remaining. 198 * 199 * At the moment we'll not adjust the number of handy pages relative to the 200 * actual VM RAM committment, that's too much work for now. 201 */ 202 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)); 203 if ( !pVM->pgm.s.cHandyPages 204 #ifdef IN_RING3 205 || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */ 206 #endif 207 ) 208 { 209 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages))); 210 #ifdef IN_RING3 211 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0); 212 #elif defined(IN_RING0) 213 /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */ 214 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0); 215 #else 216 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0); 217 #endif 218 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 219 { 220 Assert(rc == VINF_EM_NO_MEMORY); 221 if (!pVM->pgm.s.cHandyPages) 222 { 223 LogRel(("PGM: no more handy pages!\n")); 224 return VERR_EM_NO_MEMORY; 225 } 226 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)); 227 #ifdef IN_RING3 228 REMR3NotifyFF(pVM); 229 #else 230 VM_FF_SET(pVM, VM_FF_TO_R3); 231 #endif 232 } 233 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)); 234 } 235 else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */ 236 { 237 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES); 238 #ifndef IN_RING3 239 if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2) 240 { 241 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages))); 242 VM_FF_SET(pVM, VM_FF_TO_R3); 243 } 244 #endif 245 } 246 247 return VINF_SUCCESS; 248 } 249 250 169 251 /** 170 252 * Replace a zero or shared page with new page that we can write to. 171 253 * 172 * @returns VBox status. 173 * @todo Define the return values and propagate them up the call tree.. 254 * @returns The following VBox status codes. 255 * @retval VINF_SUCCESS on success, pPage is modified. 256 * @retval VERR_EM_NO_MEMORY if we're totally out of memory. 257 * 258 * @todo Propagate VERR_EM_NO_MEMORY up the call tree. 174 259 * 175 260 * @param pVM The VM address. 176 * @param pPage The physical page tracking structure. 261 * @param pPage The physical page tracking structure. This will 262 * be modified on success. 177 263 * @param GCPhys The address of the page. 178 264 * 179 * @remarks Called from within the PGM critical section. 265 * @remarks Must be called from within the PGM critical section. It may 266 * nip back to ring-3/0 in some cases. 267 * 268 * @remarks This function shouldn't really fail, however if it does 269 * it probably means we've screwed up the size of the amount 270 * and/or the low-water mark of handy pages. Or, that some 271 * device I/O is causing a lot of pages to be allocated while 272 * while the host is in a low-memory condition. 180 273 */ 181 274 int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) 182 275 { 183 return VERR_NOT_IMPLEMENTED; 276 /* 277 * Ensure that we've got a page handy, take it and use it. 278 */ 279 int rc = pgmPhysEnsureHandyPage(pVM); 280 if (VBOX_FAILURE(rc)) 281 { 282 Assert(rc == VERR_EM_NO_MEMORY); 283 return rc; 284 } 285 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys)); 286 Assert(!PGM_PAGE_IS_RESERVED(pPage)); 287 Assert(!PGM_PAGE_IS_MMIO(pPage)); 288 289 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages; 290 Assert(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages)); 291 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS); 292 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK)); 293 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID); 294 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID); 295 296 /* 297 * There are one or two action to be taken the next time we allocate handy pages: 298 * - Tell the GMM (global memory manager) what the page is being used for. 299 * (Speeds up replacement operations - sharing and defragmenting.) 300 * - If the current backing is shared, it must be freed. 301 */ 302 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys; 303 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys; 304 305 if (PGM_PAGE_IS_SHARED(pPage)) 306 { 307 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage); 308 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID); 309 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES); 310 311 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage), 312 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys)); 313 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared); 314 pVM->pgm.s.cSharedPages--; 315 } 316 else 317 { 318 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys)); 319 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceZero); 320 pVM->pgm.s.cZeroPages--; 321 } 322 323 /* 324 * Do the PGMPAGE modifications. 325 */ 326 pVM->pgm.s.cPrivatePages++; 327 PGM_PAGE_SET_HCPHYS(pPage, HCPhys); 328 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage); 329 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 330 331 return VINF_SUCCESS; 184 332 } 185 333 … … 266 414 pMap = pTlbe->pChunk; 267 415 } 268 else if (idChunk != NIL_G PM_CHUNKID)416 else if (idChunk != NIL_GMM_CHUNKID) 269 417 { 270 418 STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbMisses); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r4250 r4738 582 582 } 583 583 584 /* 585 * PGM wrappers. 586 */ 587 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES: 588 return PGMR0PhysAllocateHandyPages(pVM); 589 590 #if 0 591 /* 592 * GMM wrappers 593 */ 594 case VMMR0_DO_GMM_ALLOCATE_PAGES: 595 return GMMR0AllocatePages(pVM, ...); 596 case VMMR0_DO_GMM_FREE_PAGES: 597 return GMMR0FreePages(pVM, ...); 598 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK: 599 return GMMR0FreeMapUnmapChunk(pVM, ...); 600 case VMMR0_DO_GMM_SEED_CHUNK: 601 return GMMR0SeedChunk(pVM, (RTR3PTR)pvArg); 602 #endif 603 604 605 584 606 #ifdef VBOX_WITH_INTERNAL_NETWORKING 585 607 /* -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r4714 r4738 425 425 GEN_CHECK_OFF(PGM, pvZeroPgR0); 426 426 GEN_CHECK_OFF(PGM, pvZeroPgGC); 427 GEN_CHECK_OFF(PGM, cHandyPages); 428 GEN_CHECK_OFF(PGM, aHandyPages); 429 GEN_CHECK_OFF(PGM, aHandyPages[1]); 430 GEN_CHECK_OFF(PGM, aHandyPages[1].HCPhysGCPhys); 431 GEN_CHECK_OFF(PGM, aHandyPages[1].idPage); 432 GEN_CHECK_OFF(PGM, aHandyPages[1].idSharedPage); 433 GEN_CHECK_OFF(PGM, cAllPages); 434 GEN_CHECK_OFF(PGM, cPrivatePages); 435 GEN_CHECK_OFF(PGM, cSharedPages); 436 GEN_CHECK_OFF(PGM, cZeroPages); 437 GEN_CHECK_OFF(PGM, cGuestModeChanges); 427 438 428 439 GEN_CHECK_SIZE(PGMMAPPING);
Note:
See TracChangeset
for help on using the changeset viewer.