- Timestamp:
- Oct 23, 2008 4:49:58 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38400
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMPool.cpp
r13135 r13543 26 26 * should allow us to skip most of the global flushes now following access 27 27 * handler changes. The main expense is flushing shadow pages. 28 * -# Limit the pool size (currently it's kind of limitless IIRC).29 * -# Allocate shadow pages from GC. Currently we're allocating at SyncCR3 time.28 * -# Limit the pool size if necessary (default is kind of limitless). 29 * -# Allocate shadow pages from RC. We use to only do this in SyncCR3. 30 30 * -# Required for 64-bit guests. 31 31 * -# Combining the PD cache and page pool in order to simplify caching. … … 34 34 * @section sec_pgm_pool_outline Design Outline 35 35 * 36 * The shadow page pool tracks pages used for shadowing paging structures (i.e. page 37 * tables, page directory, page directory pointer table and page map level-4). Each 38 * page in the pool has an unique identifier. This identifier is used to link a guest 39 * physical page to a shadow PT. The identifier is a non-zero value and has a 40 * relativly low max value - say 14 bits. This makes it possible to fit it into the 41 * upper bits of the of the aHCPhys entries in the ram range. 42 * 43 * By restricting host physical memory to the first 48 bits (which is the announced 44 * physical memory range of the K8L chip (scheduled for 2008)), we can safely use the 45 * upper 16 bits for shadow page ID and reference counting. 46 * 47 * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT or 48 * PD. This is solved by creating a list of physical cross reference extents when 49 * ever this happens. Each node in the list (extent) is can contain 3 page pool 50 * indexes. The list it self is chained using indexes into the paPhysExt array. 36 * The shadow page pool tracks pages used for shadowing paging structures (i.e. 37 * page tables, page directory, page directory pointer table and page map 38 * level-4). Each page in the pool has an unique identifier. This identifier is 39 * used to link a guest physical page to a shadow PT. The identifier is a 40 * non-zero value and has a relativly low max value - say 14 bits. This makes it 41 * possible to fit it into the upper bits of the of the aHCPhys entries in the 42 * ram range. 43 * 44 * By restricting host physical memory to the first 48 bits (which is the 45 * announced physical memory range of the K8L chip (scheduled for 2008)), we 46 * can safely use the upper 16 bits for shadow page ID and reference counting. 47 * 48 * Update: The 48 bit assumption will be lifted with the new physical memory 49 * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB 50 * into a box in some years. 51 * 52 * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT 53 * or PD. This is solved by creating a list of physical cross reference extents 54 * when ever this happens. Each node in the list (extent) is can contain 3 page 55 * pool indexes. The list it self is chained using indexes into the paPhysExt 56 * array. 51 57 * 52 58 * … … 78 84 * @section sec_pgm_pool_impl Implementation 79 85 * 80 * The pool will take pages from the MM page pool. The tracking data (attributes, 81 * bitmaps and so on) are allocated from the hypervisor heap. The pool content can 82 * be accessed both by using the page id and the physical address (HC). The former 83 * is managed by means of an array, the latter by an offset based AVL tree. 86 * The pool will take pages from the MM page pool. The tracking data 87 * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The 88 * pool content can be accessed both by using the page id and the physical 89 * address (HC). The former is managed by means of an array, the latter by an 90 * offset based AVL tree. 84 91 * 85 92 * Flushing of a pool page means that we iterate the content (we know what kind … … 125 132 */ 126 133 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool"); 134 135 /** @cfgm{/PGM/Pool/MaxPages, uint16_t, #pages, 16, 0x3fff, 1024} 136 * The max size of the shadow page pool in pages. The pool will grow dynamically 137 * up to this limit. 138 */ 127 139 uint16_t cMaxPages; 128 int rc = CFGMR3QueryU16(pCfg, "MaxPages", &cMaxPages); 129 if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT) 130 cMaxPages = 4*_1M >> PAGE_SHIFT; 131 else if (VBOX_FAILURE(rc)) 132 AssertRCReturn(rc, rc); 133 else 134 AssertMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16), 135 ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER); 140 int rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, 4*_1M >> PAGE_SHIFT); 141 AssertLogRelRCReturn(rc, rc); 142 AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16), 143 ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER); 136 144 cMaxPages = RT_ALIGN(cMaxPages, 16); 137 145 146 /** @cfgm{/PGM/Pool/MaxUsers, uint16_t, #users, MaxUsers, 32K, MaxPages*2} 147 * The max number of shadow page user tracking records. Each shadow page has 148 * zero of other shadow pages (or CR3s) that references it, or uses it if you 149 * like. The structures describing these relationships are allocated from a 150 * fixed sized pool. This configuration variable defines the pool size. 151 */ 138 152 uint16_t cMaxUsers; 139 rc = CFGMR3QueryU16(pCfg, "MaxUsers", &cMaxUsers); 140 if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT) 141 cMaxUsers = cMaxPages * 2; 142 else if (VBOX_FAILURE(rc)) 143 AssertRCReturn(rc, rc); 144 else 145 AssertMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K, 146 ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER); 147 153 rc = CFGMR3QueryU16Def(pCfg, "MaxUsers", &cMaxUsers, cMaxPages * 2); 154 AssertLogRelRCReturn(rc, rc); 155 AssertLogRelMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K, 156 ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER); 157 158 /** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, #extents, 16, MaxPages * 2, MAX(MaxPages*2,0x3fff)} 159 * The max number of extents for tracking aliased guest pages. 160 */ 148 161 uint16_t cMaxPhysExts; 149 rc = CFGMR3QueryU16(pCfg, "MaxPhysExts", &cMaxPhysExts); 150 if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT) 151 cMaxPhysExts = RT_MAX(cMaxPages * 2, PGMPOOL_IDX_LAST); 152 else if (VBOX_FAILURE(rc)) 153 AssertRCReturn(rc, rc); 154 else 155 AssertMsgReturn(cMaxPhysExts >= 16 && cMaxPages <= PGMPOOL_IDX_LAST, 156 ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxUsers), VERR_INVALID_PARAMETER); 157 162 rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts, RT_MAX(cMaxPages * 2, PGMPOOL_IDX_LAST)); 163 AssertLogRelRCReturn(rc, rc); 164 AssertLogRelMsgReturn(cMaxPhysExts >= 16 && cMaxPages <= PGMPOOL_IDX_LAST, 165 ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxPhysExts), VERR_INVALID_PARAMETER); 166 167 /** @cfgm{/PGM/Pool/ChacheEnabled, bool, true} 168 * Enables or disabling caching of shadow pages. Chaching means that we will try 169 * reuse shadow pages instead of recreating them everything SyncCR3, SyncPT or 170 * SyncPage requests one. When reusing a shadow page, we can save time 171 * reconstructing it and it's children. 172 */ 158 173 bool fCacheEnabled; 159 rc = CFGMR3QueryBool(pCfg, "CacheEnabled", &fCacheEnabled); 160 if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT) 161 fCacheEnabled = true; 162 else if (VBOX_FAILURE(rc)) 163 AssertRCReturn(rc, rc); 174 rc = CFGMR3QueryBoolDef(pCfg, "CacheEnabled", &fCacheEnabled, true); 175 AssertLogRelRCReturn(rc, rc); 164 176 165 177 Log(("pgmR3PoolInit: cMaxPages=%#RX16 cMaxUsers=%#RX16 cMaxPhysExts=%#RX16 fCacheEnable=%RTbool\n", -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r13202 r13543 475 475 } 476 476 #ifdef PGMPOOL_INVALIDATE_UPPER_SHADOW_TABLE_ENTRIES 477 /* causes trouble when the guest uses a PDE to refer to the whole page table level structure. (invalidate here; faults later on when it tries 478 * to change the page table entries 479 * -> recheck; probably only applies to the GC case 477 /* 478 * Causes trouble when the guest uses a PDE to refer to the whole page table level 479 * structure. (Invalidate here; faults later on when it tries to change the page 480 * table entries -> recheck; probably only applies to the RC case.) 480 481 */ 481 482 else … … 509 510 } 510 511 #ifdef PGMPOOL_INVALIDATE_UPPER_SHADOW_TABLE_ENTRIES 511 else 512 if (uShw.pPDPae->a[iShw2].n.u1Present) 512 else if (uShw.pPDPae->a[iShw2].n.u1Present) 513 513 { 514 514 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u)); … … 527 527 case PGMPOOLKIND_ROOT_PDPT: 528 528 { 529 /* Hopefully this doesn't happen very often: 529 /* 530 * Hopefully this doesn't happen very often: 530 531 * - touching unused parts of the page 531 532 * - messing with the bits of pd pointers without changing the physical address … … 614 615 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 615 616 { 616 /* Hopefully this doesn't happen very often: 617 /* 618 * Hopefully this doesn't happen very often: 617 619 * - messing with the bits of pd pointers without changing the physical address 618 620 */ … … 645 647 case PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4: 646 648 { 647 /* Hopefully this doesn't happen very often: 649 /* 650 * Hopefully this doesn't happen very often: 648 651 * - messing with the bits of pd pointers without changing the physical address 649 652 */ … … 744 747 { 745 748 #ifndef IN_GC 749 /** @todo could make this general, faulting close to rsp should be safe reuse heuristic. */ 746 750 if ( HWACCMHasPendingIrq(pVM) 747 751 && (pRegFrame->rsp - pvFault) < 32) … … 752 756 } 753 757 #else 754 NOREF(pVM); 758 NOREF(pVM); NOREF(pvFault); 755 759 #endif 756 760 … … 1073 1077 #endif /* PGMPOOL_WITH_MONITORING */ 1074 1078 1075 1076 1077 1079 #ifdef PGMPOOL_WITH_CACHE 1080 1078 1081 /** 1079 1082 * Inserts a page into the GCPhys hash table. … … 1414 1417 pPage->iAgePrev = NIL_PGMPOOL_IDX; 1415 1418 } 1419 1416 1420 #endif /* PGMPOOL_WITH_CACHE */ 1417 1418 1419 1421 #ifdef PGMPOOL_WITH_MONITORING 1422 1420 1423 /** 1421 1424 * Looks for pages sharing the monitor. … … 1490 1493 } 1491 1494 1495 1492 1496 /** 1493 1497 * Enabled write monitoring of a guest page. … … 1680 1684 } 1681 1685 1682 1683 #ifdef PGMPOOL_WITH_MIXED_PT_CR3 1686 # ifdef PGMPOOL_WITH_MIXED_PT_CR3 1687 1684 1688 /** 1685 1689 * Set or clear the fCR3Mix attribute in a chain of monitored pages. … … 1800 1804 return rc; 1801 1805 } 1802 #endif /* PGMPOOL_WITH_MIXED_PT_CR3 */ 1803 1806 1807 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */ 1804 1808 1805 1809 /** … … 2018 2022 } 2019 2023 2024 2020 2025 /** 2021 2026 * Handle SyncCR3 pool tasks … … 2051 2056 return VINF_SUCCESS; 2052 2057 } 2058 2053 2059 #endif /* PGMPOOL_WITH_MONITORING */ 2054 2055 2060 #ifdef PGMPOOL_WITH_USER_TRACKING 2061 2056 2062 /** 2057 2063 * Frees up at least one user entry. … … 2402 2408 } 2403 2409 2404 2405 2410 #ifdef PGMPOOL_WITH_GCPHYS_TRACKING 2411 2406 2412 /** 2407 2413 * Scans one shadow page table for mappings of a physical page. … … 2575 2581 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTs, f); 2576 2582 } 2583 2577 2584 #endif /* PGMPOOL_WITH_GCPHYS_TRACKING */ 2578 2579 2585 2580 2586 /** … … 2819 2825 } 2820 2826 2821 2822 2827 #ifdef PGMPOOL_WITH_GCPHYS_TRACKING 2828 2823 2829 /** 2824 2830 * Allocates a new physical cross reference extent. … … 2889 2895 pPool->iPhysExtFreeHead = iPhysExtStart; 2890 2896 } 2897 2891 2898 2892 2899 /** … … 3076 3083 3077 3084 3078 3079 3085 /** 3080 3086 * Clear references to guest physical memory. … … 3103 3109 const unsigned iPage = off >> PAGE_SHIFT; 3104 3110 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage])); 3111 #ifdef LOG_ENABLED 3105 3112 RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]); 3106 3113 Log(("pgmPoolTracDerefGCPhys %VHp vs %VHp\n", HCPhysPage, HCPhys)); 3114 #endif 3107 3115 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 3108 3116 { … … 3275 3283 } 3276 3284 } 3285 3277 3286 #endif /* PGMPOOL_WITH_GCPHYS_TRACKING */ 3278 3279 3287 3280 3288 /** … … 3325 3333 } 3326 3334 3335 3327 3336 /** 3328 3337 * Clear references to shadowed pages in a 64-bit level 4 page table. … … 3348 3357 } 3349 3358 3359 3350 3360 /** 3351 3361 * Clear references to shadowed pages in an EPT page table. … … 3366 3376 } 3367 3377 } 3378 3368 3379 3369 3380 /** … … 3390 3401 } 3391 3402 3403 3392 3404 /** 3393 3405 * Clear references to shadowed pages in an EPT page directory pointer table. … … 3412 3424 } 3413 3425 } 3426 3414 3427 3415 3428 /** … … 3529 3542 pPage->fZeroed = true; 3530 3543 } 3544 3531 3545 #endif /* PGMPOOL_WITH_USER_TRACKING */ 3532 3533 3546 3534 3547 /**
Note:
See TracChangeset
for help on using the changeset viewer.