Changeset 13060 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 8, 2008 7:42:06 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 37532
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13019 r13060 785 785 { 786 786 PPGM pPGM = &pVM->pgm.s; 787 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);787 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 788 788 PPGMPOOLPAGE pShwPage; 789 789 int rc; … … 837 837 { 838 838 PPGM pPGM = &pVM->pgm.s; 839 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);839 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 840 840 PPGMPOOLPAGE pShwPage; 841 841 … … 873 873 PPGM pPGM = &pVM->pgm.s; 874 874 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 875 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);875 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 876 876 PX86PML4E pPml4e; 877 877 PPGMPOOLPAGE pShwPage; … … 977 977 PPGM pPGM = &pVM->pgm.s; 978 978 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 979 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);979 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 980 980 PX86PML4E pPml4e; 981 981 PPGMPOOLPAGE pShwPage; … … 1019 1019 PPGM pPGM = &pVM->pgm.s; 1020 1020 const unsigned iPml4e = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK; 1021 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);1021 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 1022 1022 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot; 1023 1023 PEPTPML4E pPml4e; … … 1750 1750 } 1751 1751 1752 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1753 1754 /** 1755 * Temporarily maps one guest page specified by GC physical address. 1756 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages. 1757 * 1758 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1759 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1760 * 1761 * @returns VBox status. 1762 * @param pVM VM handle. 1763 * @param GCPhys GC Physical address of the page. 1764 * @param ppv Where to store the address of the mapping. 1765 */ 1766 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv) 1767 { 1768 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%VGp\n", GCPhys)); 1769 1770 /* 1771 * Get the ram range. 1772 */ 1773 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1774 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 1775 pRam = pRam->CTX_SUFF(pNext); 1776 if (!pRam) 1777 { 1778 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys)); 1779 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1780 } 1781 1782 /* 1783 * Pass it on to PGMDynMapHCPage. 1784 */ 1785 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 1786 //Log(("PGMDynMapGCPage: GCPhys=%VGp HCPhys=%VHp\n", GCPhys, HCPhys)); 1787 return PGMDynMapHCPage(pVM, HCPhys, ppv); 1788 } 1789 1790 1791 /** 1792 * Temporarily maps one guest page specified by unaligned GC physical address. 1793 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages. 1794 * 1795 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1796 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1797 * 1798 * The caller is aware that only the speicifed page is mapped and that really bad things 1799 * will happen if writing beyond the page! 1800 * 1801 * @returns VBox status. 1802 * @param pVM VM handle. 1803 * @param GCPhys GC Physical address within the page to be mapped. 1804 * @param ppv Where to store the address of the mapping address corresponding to GCPhys. 1805 */ 1806 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv) 1807 { 1808 /* 1809 * Get the ram range. 1810 */ 1811 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1812 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 1813 pRam = pRam->CTX_SUFF(pNext); 1814 if (!pRam) 1815 { 1816 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys)); 1817 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1818 } 1819 1820 /* 1821 * Pass it on to PGMDynMapHCPageOff. 1822 */ 1823 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 1824 return PGMDynMapHCPageOff(pVM, HCPhys, ppv); 1825 } 1826 1827 1828 /** 1829 * Temporarily maps one host page specified by HC physical address. 1830 * 1831 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1832 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1833 * 1834 * @returns VBox status. 1835 * @param pVM VM handle. 1836 * @param HCPhys HC Physical address of the page. 1837 * @param ppv Where to store the address of the mapping. 1838 */ 1839 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv) 1840 { 1841 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%VHp\n", HCPhys)); 1842 # ifdef IN_GC 1843 1844 /* 1845 * Check the cache. 1846 */ 1847 register unsigned iCache; 1848 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys 1849 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys 1850 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys 1851 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys) 1852 { 1853 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] = 1854 { 1855 { 0, 5, 6, 7 }, 1856 { 0, 1, 6, 7 }, 1857 { 0, 1, 2, 7 }, 1858 { 0, 1, 2, 3 }, 1859 { 4, 1, 2, 3 }, 1860 { 4, 5, 2, 3 }, 1861 { 4, 5, 6, 3 }, 1862 { 4, 5, 6, 7 }, 1863 }; 1864 Assert(RT_ELEMENTS(au8Trans) == 8); 1865 Assert(RT_ELEMENTS(au8Trans[0]) == 4); 1866 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache]; 1867 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT); 1868 *ppv = pv; 1869 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheHits); 1870 //Log(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache)); 1871 return VINF_SUCCESS; 1872 } 1873 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4); 1874 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheMisses); 1875 1876 /* 1877 * Update the page tables. 1878 */ 1879 register unsigned iPage = pVM->pgm.s.iDynPageMapLast; 1880 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1); 1881 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8); 1882 1883 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys; 1884 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D; 1885 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D; 1886 1887 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT); 1888 *ppv = pv; 1889 ASMInvalidatePage(pv); 1890 Log4(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d\n", HCPhys, pv, iPage)); 1891 return VINF_SUCCESS; 1892 1893 #else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1894 AssertFailed(); 1895 return VERR_NOT_IMPLEMENTED; 1896 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1897 } 1898 1899 1900 /** 1901 * Temporarily maps one host page specified by HC physical address, returning 1902 * pointer within the page. 1903 * 1904 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1905 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1906 * 1907 * @returns VBox status. 1908 * @param pVM VM handle. 1909 * @param HCPhys HC Physical address of the page. 1910 * @param ppv Where to store the address corresponding to HCPhys. 1911 */ 1912 VMMDECL(int) PGMDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys, void **ppv) 1913 { 1914 int rc = PGMDynMapHCPage(pVM, HCPhys, ppv); 1915 if (RT_SUCCESS(rc)) 1916 *ppv = (void *)((uintptr_t)*ppv | (HCPhys & PAGE_OFFSET_MASK)); 1917 return rc; 1918 } 1919 1920 #endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1752 1921 1753 1922 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13046 r13060 121 121 if (!pPDSrc) 122 122 { 123 LogFlow(("Trap0eHandler: guest PDPTR %d not present CR3=%VGp\n", (pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK, (CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK))); 123 # if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64 124 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%VGp\n", (int)(((RTGCUINTPTR)pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)); 125 # else 126 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%VGp\n", iPDSrc, CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)); 127 # endif 124 128 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eGuestTrap; }); 125 129 TRPMSetErrorCode(pVM, uErr); … … 987 991 988 992 # if PGM_GST_TYPE == PGM_TYPE_AMD64 989 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);993 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 990 994 991 995 /* Fetch the pgm pool shadow descriptor. */ … … 1085 1089 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* root of the 2048 PDE array */ 1086 1090 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES]; 1087 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1091 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1088 1092 1089 1093 Assert(!(CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte].n.u1Present)); … … 1312 1316 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 1313 1317 { 1314 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1318 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1315 1319 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]); 1316 1320 pShwPage->cPresent--; … … 1326 1330 # else /* !PGMPOOL_WITH_GCPHYS_TRACKING */ 1327 1331 pShwPage->cPresent--; 1328 pVM->pgm.s.CTX SUFF(pPool)->cPresent--;1332 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--; 1329 1333 # endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */ 1330 1334 } … … 1367 1371 1368 1372 /* update statistics. */ 1369 pVM->pgm.s.CTX SUFF(pPool)->cPresent++;1373 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++; 1370 1374 pShwPage->cPresent++; 1371 1375 if (pShwPage->iFirstPresent > iPTDst) … … 1805 1809 * Yea, I'm lazy. 1806 1810 */ 1807 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1811 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1808 1812 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1809 1813 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst); … … 3140 3144 unsigned iPdNoMapping; 3141 3145 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM); 3142 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3146 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3143 3147 3144 3148 /* Only check mappings if they are supposed to be put into the shadow page table. */ … … 3660 3664 3661 3665 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 3662 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3666 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3663 3667 # endif 3664 3668 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13046 r13060 486 486 } 487 487 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 488 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);488 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 489 489 490 490 pVM->pgm.s.pGstPaePML4HC = (R3R0PTRTYPE(PX86PML4))HCPtrGuestCR3; … … 564 564 if (pVM->pgm.s.pHCShwAmd64CR3) 565 565 { 566 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);566 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 567 567 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT); 568 568 pVM->pgm.s.pHCShwAmd64CR3 = NULL; … … 614 614 pVM->pgm.s.pszR3GstWriteHandlerCR3); 615 615 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */ 616 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool),616 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), 617 617 pVM->pgm.s.enmShadowMode == PGMMODE_PAE 618 618 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX … … 640 640 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3) 641 641 { 642 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);642 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3); 643 643 if (VBOX_FAILURE(rc)) 644 644 { … … 661 661 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX); 662 662 663 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);663 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys); 664 664 } 665 665 … … 674 674 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS) 675 675 { 676 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);676 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i); 677 677 AssertRC(rc); 678 678 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS; … … 710 710 AssertRCReturn(rc, rc); 711 711 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */ 712 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool),712 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), 713 713 pVM->pgm.s.enmShadowMode == PGMMODE_PAE 714 714 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX … … 728 728 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS) 729 729 { 730 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PDPT);730 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT); 731 731 AssertRC(rc); 732 732 } … … 738 738 { 739 739 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX); 740 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);740 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i); 741 741 AssertRC(rc2); 742 742 if (VBOX_FAILURE(rc2)) -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r13046 r13060 491 491 int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv) 492 492 { 493 #if def IN_GC494 /* 495 * Just some sketchy GC code.493 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 494 /* 495 * Just some sketchy GC/R0-darwin code. 496 496 */ 497 497 *ppMap = NULL; 498 498 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 499 499 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg); 500 return PGM GCDynMapHCPage(pVM, HCPhys, ppv);500 return PGMDynMapHCPage(pVM, HCPhys, ppv); 501 501 502 502 #else /* IN_RING3 || IN_RING0 */ … … 557 557 558 558 559 #if ndef IN_GC559 #if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 560 560 /** 561 561 * Load a guest page into the ring-3 physical TLB. … … 613 613 return VINF_SUCCESS; 614 614 } 615 #endif /* !IN_GC */615 #endif /* !IN_GC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 616 616 617 617 … … 644 644 { 645 645 #ifdef VBOX_WITH_NEW_PHYS_CODE 646 #ifdef IN_GC 647 /* Until a physical TLB is implemented for GC, let PGMGCDynMapGCPageEx handle it. */ 648 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv); 646 # if defined(IN_GC) && defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 647 /* Until a physical TLB is implemented for GC or/and R0-darwin, let PGMDynMapGCPageEx handle it. */ 648 return PGMDynMapGCPageOff(pVM, GCPhys, ppv); 649 649 650 #else 650 651 int rc = pgmLock(pVM); … … 697 698 * Temporary fallback code. 698 699 */ 699 # if def IN_GC700 return PGM GCDynMapGCPageEx(pVM, GCPhys, ppv);700 # if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 701 return PGMDynMapGCPageOff(pVM, GCPhys, ppv); 701 702 # else 702 703 return PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1, ppv); … … 816 817 { 817 818 #ifdef VBOX_WITH_NEW_PHYS_CODE 818 #if def IN_GC819 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 819 820 /* currently nothing to do here. */ 820 821 /* --- postponed … … 1107 1108 * @thread EMT. 1108 1109 */ 1109 static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pb HC)1110 static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3) 1110 1111 { 1111 1112 uint32_t iCacheIndex; … … 1114 1115 1115 1116 GCPhys = PHYS_PAGE_ADDRESS(GCPhys); 1116 pb HC = (uint8_t *)PAGE_ADDRESS(pbHC);1117 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3); 1117 1118 1118 1119 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK); … … 1121 1122 1122 1123 pCache->Entry[iCacheIndex].GCPhys = GCPhys; 1123 pCache->Entry[iCacheIndex].pb HC = pbHC;1124 pCache->Entry[iCacheIndex].pbR3 = pbR3; 1124 1125 } 1125 1126 #endif … … 1219 1220 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1220 1221 { 1221 #if def IN_GC /** @todo @bugref{3202}: R0 too */1222 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1222 1223 void *pvSrc = NULL; 1223 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1224 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1224 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1225 1225 #else 1226 1226 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1268 1268 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1269 1269 { 1270 #if def IN_GC /** @todo @bugref{3202}: R0 too */1270 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1271 1271 void *pvSrc = NULL; 1272 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1273 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1272 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1274 1273 #else 1275 1274 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1298 1297 case MM_RAM_FLAGS_MMIO2: // MMIO2 isn't in the mask. 1299 1298 { 1300 #if def IN_GC /** @todo @bugref{3202}: R0 too */1299 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1301 1300 void *pvSrc = NULL; 1302 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1303 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1301 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1304 1302 #else 1305 1303 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1522 1520 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1523 1521 { 1524 #if def IN_GC /** @todo @bugref{3202}: R0 too */1522 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1525 1523 void *pvDst = NULL; 1526 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1527 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1524 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1528 1525 #else 1529 1526 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1567 1564 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1568 1565 { 1569 #if def IN_GC /** @todo @bugref{3202}: R0 too */1566 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1570 1567 void *pvDst = NULL; 1571 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1572 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1568 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1573 1569 #else 1574 1570 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1612 1608 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1613 1609 { 1614 #if def IN_GC /** @todo @bugref{3202}: R0 too */1610 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1615 1611 void *pvDst = NULL; 1616 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1617 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1612 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1618 1613 #else 1619 1614 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1641 1636 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */ 1642 1637 { 1643 #if def IN_GC /** @todo @bugref{3202}: R0 too */1638 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1644 1639 void *pvDst = NULL; 1645 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1646 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1640 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1647 1641 #else 1648 1642 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1723 1717 } 1724 1718 1725 #if ndef IN_GC/* Ring 0 & 3 only */ /** @todo @bugref{1865,3202}: this'll be fun! */1719 #if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Ring 0 & 3 only */ /** @todo @bugref{1865,3202}: this'll be fun! */ 1726 1720 1727 1721 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r13042 r13060 87 87 88 88 89 #if def IN_GC89 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 90 90 /** 91 91 * Maps a pool page into the current context. … … 95 95 * @param pPage The page to map. 96 96 */ 97 void *pgm GCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage)97 void *pgmPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage) 98 98 { 99 99 /* general pages. */ 100 100 if (pPage->idx >= PGMPOOL_IDX_FIRST) 101 101 { 102 Assert(pPage->idx < pVM->pgm.s. pPoolGC->cCurPages);102 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages); 103 103 void *pv; 104 int rc = PGM GCDynMapHCPage(pVM, pPage->Core.Key, &pv);104 int rc = PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 105 105 AssertReleaseRC(rc); 106 106 return pv; … … 108 108 109 109 /* special pages. */ 110 # ifdef IN_GC 110 111 switch (pPage->idx) 111 112 { … … 127 128 return NULL; 128 129 } 129 } 130 #endif /* IN_GC */ 130 131 # else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 132 RTHCPHYS HCPhys; 133 switch (pPage->idx) 134 { 135 case PGMPOOL_IDX_PD: 136 HCPhys = pVM->pgm.s.HCPhys32BitPD; 137 break; 138 case PGMPOOL_IDX_PAE_PD: 139 case PGMPOOL_IDX_PAE_PD_0: 140 HCPhys = pVM->pgm.s.aHCPhysPaePDs[0]; 141 break; 142 case PGMPOOL_IDX_PAE_PD_1: 143 HCPhys = pVM->pgm.s.aHCPhysPaePDs[1]; 144 break; 145 case PGMPOOL_IDX_PAE_PD_2: 146 HCPhys = pVM->pgm.s.aHCPhysPaePDs[2]; 147 break; 148 case PGMPOOL_IDX_PAE_PD_3: 149 HCPhys = pVM->pgm.s.aHCPhysPaePDs[3]; 150 break; 151 case PGMPOOL_IDX_PDPT: 152 HCPhys = pVM->pgm.s.HCPhysPaePDPT; 153 break; 154 default: 155 AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx)); 156 return NULL; 157 } 158 void *pv; 159 int rc = PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 160 AssertReleaseRC(rc); 161 return pv; 162 # endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 163 } 164 #endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 131 165 132 166 … … 213 247 #ifdef IN_GC 214 248 return (const void *)((RTGCUINTPTR)pvFault & ~(RTGCUINTPTR)(cbEntry - 1)); 249 250 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 251 void *pvRet; 252 int rc = PGMDynMapGCPageOff(pPool->pVMHC, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet); 253 AssertFatalRCSuccess(rc); 254 return pvRet; 215 255 216 256 #elif defined(IN_RING0) … … 950 990 DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 951 991 { 952 STAM_PROFILE_START(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), a);953 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);992 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), a); 993 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 954 994 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 955 995 LogFlow(("pgmPoolAccessHandler: pvFault=%VGv pPage=%p:{.idx=%d} GCPhysFault=%VGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); … … 983 1023 { 984 1024 rc = pgmPoolAccessHandlerSimple(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 985 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,Handled), a);1025 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,Handled), a); 986 1026 return rc; 987 1027 } … … 1005 1045 { 1006 1046 rc = pgmPoolAccessHandlerSTOSD(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 1007 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,RepStosd), a);1047 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,RepStosd), a); 1008 1048 return rc; 1009 1049 } … … 1026 1066 if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused) 1027 1067 rc = VINF_SUCCESS; 1028 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,FlushPage), a);1068 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,FlushPage), a); 1029 1069 return rc; 1030 1070 } … … 1836 1876 void pgmPoolMonitorModifiedClearAll(PVM pVM) 1837 1877 { 1838 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1839 1879 LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages)); 1840 1880 … … 1865 1905 void pgmPoolClearAll(PVM pVM) 1866 1906 { 1867 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1907 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1868 1908 STAM_PROFILE_START(&pPool->StatClearAll, c); 1869 1909 LogFlow(("pgmPoolClearAll: cUsedPages=%d\n", pPool->cUsedPages)); … … 2375 2415 { 2376 2416 LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs)); 2377 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2417 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2378 2418 2379 2419 /* … … 2463 2503 void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs) 2464 2504 { 2465 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool); NOREF(pPool);2505 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 2466 2506 LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs)); 2467 2507 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f); … … 2481 2521 void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt) 2482 2522 { 2483 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2523 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2484 2524 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f); 2485 2525 LogFlow(("pgmPoolTrackFlushGCPhysPTs: HCPhys=%RHp iPhysExt\n", pPhysPage->HCPhys, iPhysExt)); … … 2528 2568 int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage) 2529 2569 { 2530 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2570 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2531 2571 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s); 2532 2572 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d HCPhys=%RHp\n", … … 2764 2804 PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt) 2765 2805 { 2766 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2806 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2767 2807 uint16_t iPhysExt = pPool->iPhysExtFreeHead; 2768 2808 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX) … … 2787 2827 void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt) 2788 2828 { 2789 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2829 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2790 2830 Assert(iPhysExt < pPool->cMaxPhysExts); 2791 2831 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTXSUFF(paPhysExts)[iPhysExt]; … … 2805 2845 void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt) 2806 2846 { 2807 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2847 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2808 2848 2809 2849 const uint16_t iPhysExtStart = iPhysExt; … … 2836 2876 static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT) 2837 2877 { 2838 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2839 2879 PPGMPOOLPHYSEXT paPhysExts = pPool->CTXSUFF(paPhysExts); 2840 2880 … … 3875 3915 int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage) 3876 3916 { 3877 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3917 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3878 3918 STAM_PROFILE_ADV_START(&pPool->StatAlloc, a); 3879 3919 LogFlow(("pgmPoolAlloc: GCPhys=%VGp enmKind=%d iUser=%#x iUserTable=%#x\n", GCPhys, enmKind, iUser, iUserTable)); … … 3998 4038 { 3999 4039 LogFlow(("pgmPoolFree: HCPhys=%VHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable)); 4000 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);4040 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4001 4041 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable); 4002 4042 } … … 4014 4054 { 4015 4055 /** @todo profile this! */ 4016 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);4056 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4017 4057 PPGMPOOLPAGE pPage = pgmPoolGetPage(pPool, HCPhys); 4018 4058 Log3(("pgmPoolGetPageByHCPhys: HCPhys=%VHp -> %p:{.idx=%d .GCPhys=%VGp .enmKind=%d}\n", … … 4033 4073 { 4034 4074 LogFlow(("pgmPoolFlushAll:\n")); 4035 pgmPoolFlushAllInt(pVM->pgm.s.CTX SUFF(pPool));4036 } 4037 4075 pgmPoolFlushAllInt(pVM->pgm.s.CTX_SUFF(pPool)); 4076 } 4077
Note:
See TracChangeset
for help on using the changeset viewer.