- Timestamp:
- Sep 22, 2007 12:02:40 AM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r4953 r4978 468 468 469 469 470 /** 470 /** 471 471 * A Physical Guest Page tracking structure. 472 * 473 * The format of this structure is complicated because we have to fit a lot 474 * of information into as few bits as possible. The format is also subject 475 * to change (there is one comming up soon). Which means that for we'll be 472 * 473 * The format of this structure is complicated because we have to fit a lot 474 * of information into as few bits as possible. The format is also subject 475 * to change (there is one comming up soon). Which means that for we'll be 476 476 * using PGM_PAGE_GET_* and PGM_PAGE_SET_* macros for all accessess to the 477 477 * structure. … … 509 509 #define PGM_PAGE_STATE_ALLOCATED 1 510 510 /** A allocated page that's being monitored for writes. 511 * The shadow page table mappings are read-only. When a write occurs, the 511 * The shadow page table mappings are read-only. When a write occurs, the 512 512 * fWrittenTo member is set, the page remapped as read-write and the state 513 513 * moved back to allocated. */ … … 515 515 /** The page is shared, aka. copy-on-write. 516 516 * This is a page that's shared with other VMs. */ 517 #define PGM_PAGE_STATE_SHARED 3 517 #define PGM_PAGE_STATE_SHARED 3 518 518 /** @} */ 519 519 … … 577 577 */ 578 578 #define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPage = (_idPage); } while (0) 579 /* later: 579 /* later: 580 580 #define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0x0000fffffffff000)) \ 581 581 | ((_idPage) & 0xfff) \ … … 724 724 /** 725 725 * Ring-3 tracking structore for an allocation chunk ring-3 mapping. 726 * 726 * 727 727 * The primary tree (Core) uses the chunk id as key. 728 728 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key. … … 753 753 #if HC_ARCH_BITS == 64 754 754 uint32_t u32Padding; /**< alignment padding. */ 755 #endif 755 #endif 756 756 /** The chunk map. */ 757 757 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk; … … 760 760 typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE; 761 761 762 /** The number of TLB entries in PGMCHUNKR3MAPTLB. 762 /** The number of TLB entries in PGMCHUNKR3MAPTLB. 763 763 * @remark Must be a power of two value. */ 764 764 #define PGM_CHUNKR3MAPTLB_ENTRIES 32 … … 766 766 /** 767 767 * Allocation chunk ring-3 mapping TLB. 768 * 768 * 769 769 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL. 770 * At first glance this might look kinda odd since AVL trees are 770 * At first glance this might look kinda odd since AVL trees are 771 771 * supposed to give the most optimial lookup times of all trees 772 * due to their balancing. However, take a tree with 1023 nodes 772 * due to their balancing. However, take a tree with 1023 nodes 773 773 * in it, that's 10 levels, meaning that most searches has to go 774 774 * down 9 levels before they find what they want. This isn't fast … … 776 776 * and of course the problem with trees and branch prediction. 777 777 * This is why we use TLBs in front of most of the trees. 778 * 779 * @todo Generalize this TLB + AVL stuff, shouldn't be all that 778 * 779 * @todo Generalize this TLB + AVL stuff, shouldn't be all that 780 780 * difficult when we switch to inlined AVL trees (from kStuff). 781 781 */ … … 804 804 #if HC_ARCH_BITS == 64 805 805 uint32_t u32Padding; /**< alignment padding. */ 806 #endif 806 #endif 807 807 /** The guest page. */ 808 808 R3R0PTRTYPE(PPGMPAGE) volatile pPage; … … 816 816 817 817 818 /** The number of entries in the ring-3 guest page mapping TLB. 818 /** The number of entries in the ring-3 guest page mapping TLB. 819 819 * @remarks The value must be a power of two. */ 820 820 #define PGM_PAGER3MAPTLB_ENTRIES 64 821 821 822 822 /** 823 823 * Ring-3 guest page mapping TLB. … … 833 833 834 834 /** 835 * Calculates the index of the TLB entry for the specified guest page. 835 * Calculates the index of the TLB entry for the specified guest page. 836 836 * @returns Physical TLB index. 837 837 * @param GCPhys The guest physical address. … … 840 840 841 841 842 /** @name Context neutrual page mapper TLB. 843 * 842 /** @name Context neutrual page mapper TLB. 843 * 844 844 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr 845 * code is writting in a kind of context neutrual way. Time will show whether 845 * code is writting in a kind of context neutrual way. Time will show whether 846 846 * this actually makes sense or not... 847 * 847 * 848 848 * @{ */ 849 849 /** @typedef PPGMPAGEMAPTLB … … 868 868 // typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE; 869 869 # define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES 870 # define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys) 870 # define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys) 871 871 typedef void * PPGMPAGEMAP; 872 872 typedef void ** PPPGMPAGEMAP; … … 876 876 // typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE; 877 877 //# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES 878 //# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys) 878 //# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys) 879 879 // typedef PPGMCHUNKR0MAP PPGMPAGEMAP; 880 880 // typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP; … … 884 884 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE; 885 885 # define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES 886 # define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys) 886 # define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys) 887 887 typedef PPGMCHUNKR3MAP PPGMPAGEMAP; 888 888 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP; 889 #endif 889 #endif 890 890 /** @} */ 891 891 … … 1815 1815 PGMPHYSCACHE pgmphyswritecache; 1816 1816 1817 /** 1817 /** 1818 1818 * Data associated with managing the ring-3 mappings of the allocation chunks. 1819 1819 */ 1820 struct 1820 struct 1821 1821 { 1822 1822 /** The chunk tree, ordered by chunk id. */ … … 1826 1826 /** The number of mapped chunks. */ 1827 1827 uint32_t c; 1828 /** The maximum number of mapped chunks. 1828 /** The maximum number of mapped chunks. 1829 1829 * @cfgm PGM/MaxRing3Chunks */ 1830 1830 uint32_t cMax; … … 1837 1837 } ChunkR3Map; 1838 1838 1839 /** 1839 /** 1840 1840 * The page mapping TLB for ring-3 and (for the time being) ring-0. 1841 1841 */ … … 1846 1846 /** The host physical address of the zero page. */ 1847 1847 RTHCPHYS HCPhysZeroPg; 1848 /** The ring-3 mapping of the zero page. */ 1848 /** The ring-3 mapping of the zero page. */ 1849 1849 RTR3PTR pvZeroPgR3; 1850 /** The ring-0 mapping of the zero page. */ 1850 /** The ring-0 mapping of the zero page. */ 1851 1851 RTR0PTR pvZeroPgR0; 1852 /** The GC mapping of the zero page. */ 1852 /** The GC mapping of the zero page. */ 1853 1853 RTGCPTR pvZeroPgGC; 1854 1854 #if GC_ARCH_BITS != 32 … … 1859 1859 /** The number of handy pages. */ 1860 1860 uint32_t cHandyPages; 1861 /** 1861 /** 1862 1862 * Array of handy pages. 1863 * 1864 * This array is used in a two way communication between pgmPhysAllocPage 1863 * 1864 * This array is used in a two way communication between pgmPhysAllocPage 1865 1865 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as 1866 1866 * an intermediary. 1867 * 1867 * 1868 1868 * The size of this array is important, see pgmPhysEnsureHandyPage for details. 1869 1869 * (The current size of 32 pages, means 128 KB of memory.) 1870 1870 */ 1871 struct 1871 struct 1872 1872 { 1873 1873 /** The host physical address before pgmPhysAllocPage uses it, … … 1885 1885 1886 1886 /** @name Release Statistics 1887 * @{ */ 1887 * @{ */ 1888 1888 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */ 1889 1889 uint32_t cPrivatePages; /**< The number of private pages. */ … … 2253 2253 * @returns Pointer to the page on success. 2254 2254 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 2255 * 2255 * 2256 2256 * @param pPGM PGM handle. 2257 2257 * @param GCPhys The GC physical address. … … 2280 2280 /** 2281 2281 * Gets the PGMPAGE structure for a guest page. 2282 * 2282 * 2283 2283 * Old Phys code: Will make sure the page is present. 2284 * 2284 * 2285 2285 * @returns VBox status code. 2286 2286 * @retval VINF_SUCCESS and a valid *ppPage on success. 2287 2287 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid. 2288 * 2288 * 2289 2289 * @param pPGM PGM handle. 2290 2290 * @param GCPhys The GC physical address. … … 2332 2332 Assert(rc == VINF_SUCCESS); 2333 2333 } 2334 #endif 2334 #endif 2335 2335 return VINF_SUCCESS; 2336 2336 } … … 2341 2341 /** 2342 2342 * Gets the PGMPAGE structure for a guest page. 2343 * 2343 * 2344 2344 * Old Phys code: Will make sure the page is present. 2345 * 2345 * 2346 2346 * @returns VBox status code. 2347 2347 * @retval VINF_SUCCESS and a valid *ppPage on success. 2348 2348 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid. 2349 * 2349 * 2350 2350 * @param pPGM PGM handle. 2351 2351 * @param GCPhys The GC physical address. … … 2399 2399 Assert(rc == VINF_SUCCESS); 2400 2400 } 2401 #endif 2401 #endif 2402 2402 return VINF_SUCCESS; 2403 2403 } … … 2409 2409 * @returns Pointer to the page on success. 2410 2410 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 2411 * 2411 * 2412 2412 * @param pPGM PGM handle. 2413 2413 * @param GCPhys The GC physical address. … … 2443 2443 * @returns Pointer to the page on success. 2444 2444 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 2445 * 2445 * 2446 2446 * @param pPGM PGM handle. 2447 2447 * @param GCPhys The GC physical address. … … 2494 2494 2495 2495 } 2496 #endif 2496 #endif 2497 2497 return VINF_SUCCESS; 2498 2498 } … … 2506 2506 * @param GCPhys The GC physical address. 2507 2507 * @param pHCPhys Where to store the corresponding HC physical address. 2508 * 2509 * @deprecated Doesn't deal with zero, shared or write monitored pages. 2508 * 2509 * @deprecated Doesn't deal with zero, shared or write monitored pages. 2510 2510 * Avoid when writing new code! 2511 2511 */ … … 2533 2533 * @param ppTlbe Where to store the pointer to the TLB entry. 2534 2534 */ 2535 2535 2536 2536 DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe) 2537 2537 { … … 2559 2559 * @param GCPhys The GC physical address. 2560 2560 * @param pHCPtr Where to store the corresponding HC virtual address. 2561 * 2561 * 2562 2562 * @deprecated This will be eliminated by PGMPhysGCPhys2CCPtr. 2563 2563 */ … … 2599 2599 * @param GCPhys The GC physical address. 2600 2600 * @param pHCPtr Where to store the corresponding HC virtual address. 2601 * 2601 * 2602 2602 * @deprecated This will be eliminated. Don't use it. 2603 2603 */ … … 2619 2619 #endif 2620 2620 if (rc != VINF_SUCCESS) 2621 { 2621 { 2622 2622 *pHCPtr = 0; /* GCC crap */ 2623 2623 return rc; … … 2645 2645 * @param pHCPtr Where to store the corresponding HC virtual address. 2646 2646 * @param pHCPhys Where to store the HC Physical address and its flags. 2647 * 2648 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3 2647 * 2648 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3 2649 2649 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see. 2650 2650 */ … … 2872 2872 * 2873 2873 * @returns The ram flags. 2874 * @param pVM The VM handle.2875 2874 * @param pCur The physical handler in question. 2876 2875 */ 2877 DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(P VM pVM, PPGMPHYSHANDLER pCur)2876 DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur) 2878 2877 { 2879 2878 switch (pCur->enmType) … … 2898 2897 * Clears one physical page of a virtual handler 2899 2898 * 2900 * @param p VM The VM handle.2899 * @param pPGM Pointer to the PGM instance. 2901 2900 * @param pCur Virtual handler structure 2902 2901 * @param iPage Physical page index 2903 2902 */ 2904 DECLINLINE(void) pgmHandlerVirtualClearPage(P VM pVM, PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)2903 DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage) 2905 2904 { 2906 2905 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage]; … … 3001 3000 * 3002 3001 * @returns Pointer to the shadow page structure. 3003 * @param pVM The VM handle.3004 3002 * @param pPool The pool. 3005 3003 * @param HCPhys The HC physical address of the shadow page. 3006 3004 */ 3007 DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(P VM pVM, PPGMPOOL pPool, RTHCPHYS HCPhys)3005 DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys) 3008 3006 { 3009 3007 /* … … 3020 3018 * 3021 3019 * @returns Pointer to the shadow page structure. 3022 * @param pVM The VM handle.3023 3020 * @param pPool The pool. 3024 3021 * @param idx The pool page index. 3025 3022 */ 3026 DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(P VM pVM, PPGMPOOL pPool, unsigned idx)3023 DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx) 3027 3024 { 3028 3025 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
Note:
See TracChangeset
for help on using the changeset viewer.