- Timestamp:
- Apr 1, 2008 9:18:10 AM (17 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r7629 r7676 834 834 #define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_AMD64_STR(name) 835 835 #include "PGMShw.h" 836 837 /* Guest - real mode */838 #define PGM_GST_TYPE PGM_TYPE_REAL839 #define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)840 #define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)841 #define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)842 #define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_REAL(name)843 #define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_REAL_STR(name)844 #define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_REAL_STR(name)845 #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS846 #include "PGMBth.h"847 #undef BTH_PGMPOOLKIND_PT_FOR_PT848 #undef PGM_BTH_NAME849 #undef PGM_BTH_NAME_GC_STR850 #undef PGM_BTH_NAME_R0_STR851 #undef PGM_GST_TYPE852 #undef PGM_GST_NAME853 #undef PGM_GST_NAME_GC_STR854 #undef PGM_GST_NAME_R0_STR855 856 /* Guest - protected mode */857 #define PGM_GST_TYPE PGM_TYPE_PROT858 #define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)859 #define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)860 #define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)861 #define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)862 #define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_PROT_STR(name)863 #define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_PROT_STR(name)864 #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS865 #include "PGMBth.h"866 #undef BTH_PGMPOOLKIND_PT_FOR_PT867 #undef PGM_BTH_NAME868 #undef PGM_BTH_NAME_GC_STR869 #undef PGM_BTH_NAME_R0_STR870 #undef PGM_GST_TYPE871 #undef PGM_GST_NAME872 #undef PGM_GST_NAME_GC_STR873 #undef PGM_GST_NAME_R0_STR874 836 875 837 /* Guest - AMD64 mode */ … … 2503 2465 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2504 2466 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2505 rc = PGM_BTH_NAME_AMD64_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);2506 2467 2507 2468 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_PROT)]; … … 2510 2471 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2511 2472 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2512 rc = PGM_BTH_NAME_AMD64_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);2513 2473 2514 2474 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64)]; … … 2517 2477 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2518 2478 rc = PGM_GST_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc); 2519 rc = PGM_BTH_NAME_AMD64_AMD64(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);2520 2479 2521 2480 return VINF_SUCCESS; … … 2928 2887 case PGMMODE_AMD64: 2929 2888 case PGMMODE_AMD64_NX: 2930 rc2 = PGM_BTH_NAME_ AMD64_REAL(Enter)(pVM, NIL_RTGCPHYS);2889 rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVM, NIL_RTGCPHYS); 2931 2890 break; 2932 2891 default: AssertFailed(); break; … … 2947 2906 case PGMMODE_AMD64: 2948 2907 case PGMMODE_AMD64_NX: 2949 rc2 = PGM_BTH_NAME_ AMD64_PROT(Enter)(pVM, NIL_RTGCPHYS);2908 rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVM, NIL_RTGCPHYS); 2950 2909 break; 2951 2910 default: AssertFailed(); break; … … 3206 3165 3207 3166 int rc = VINF_SUCCESS; 3208 const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : 4;3167 const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : X86_PG_PAE_PDPTE_ENTRIES; 3209 3168 for (unsigned i = 0; i < c; i++) 3210 3169 { -
trunk/src/VBox/VMM/PGMInternal.h
r7642 r7676 1718 1718 #define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name) 1719 1719 #define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name) 1720 #define PGM_BTH_NAME_AMD64_REAL(name) PGM_CTX(pgm,BthAMD64Real##name)1721 #define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)1722 1720 #define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name) 1723 1721 #define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name … … 1728 1726 #define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name 1729 1727 #define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name 1730 #define PGM_BTH_NAME_GC_AMD64_REAL_STR(name) "pgmGCBthAMD64Real" #name1731 #define PGM_BTH_NAME_GC_AMD64_PROT_STR(name) "pgmGCBthAMD64Prot" #name1732 1728 #define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name 1733 1729 #define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name … … 1738 1734 #define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name 1739 1735 #define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name 1740 #define PGM_BTH_NAME_R0_AMD64_REAL_STR(name) "pgmR0BthAMD64Real" #name1741 #define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name1742 1736 #define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name 1743 1737 #define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name) -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r7629 r7676 184 184 #include "PGMAllShw.h" 185 185 186 /* Guest - real mode */187 #define PGM_GST_TYPE PGM_TYPE_REAL188 #define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)189 #define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_REAL(name)190 #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS191 #include "PGMAllBth.h"192 #undef BTH_PGMPOOLKIND_PT_FOR_PT193 #undef PGM_BTH_NAME194 #undef PGM_GST_NAME195 #undef PGM_GST_TYPE196 197 /* Guest - protected mode */198 #define PGM_GST_TYPE PGM_TYPE_PROT199 #define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)200 #define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)201 #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS202 #include "PGMAllBth.h"203 #undef BTH_PGMPOOLKIND_PT_FOR_PT204 #undef PGM_BTH_NAME205 #undef PGM_GST_TYPE206 #undef PGM_GST_NAME207 208 186 /* Guest - AMD64 mode */ 209 187 #define PGM_GST_TYPE PGM_TYPE_AMD64 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r7668 r7676 39 39 40 40 41 /* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */ 42 #if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE 43 #error "Invalid combination; PAE guest implies PAE shadow" 44 #endif 45 46 #if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \ 47 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE) 48 #error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging." 49 #endif 50 51 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \ 52 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE) 53 #error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging." 54 #endif 55 56 #if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64) 57 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64) 58 #error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa" 59 #endif 60 41 61 /** 42 62 * #PF Handler for raw-mode guest execution. … … 51 71 { 52 72 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64 53 54 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE55 # error "32-bit guest mode is only implemented for 32-bit and PAE shadow modes."56 # endif57 73 58 74 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE … … 759 775 760 776 LogFlow(("InvalidatePage %x\n", GCPtrPage)); 761 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE762 777 /* 763 778 * Get the shadow PD entry and skip out if this PD isn't present. … … 870 885 if (pShwPage->GCPhys == GCPhys) 871 886 { 872 # if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */887 # if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */ 873 888 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 874 889 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 875 890 if (pPT->a[iPTEDst].n.u1Present) 876 891 { 877 # ifdef PGMPOOL_WITH_USER_TRACKING892 # ifdef PGMPOOL_WITH_USER_TRACKING 878 893 /* This is very unlikely with caching/monitoring enabled. */ 879 894 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK); 880 # endif895 # endif 881 896 pPT->a[iPTEDst].u = 0; 882 897 } 883 # else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */898 # else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */ 884 899 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0); 885 900 if (VBOX_SUCCESS(rc)) 886 901 rc = VINF_SUCCESS; 887 # endif902 # endif 888 903 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePage4KBPages)); 889 904 PGM_INVL_PG(GCPtrPage); … … 910 925 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK); 911 926 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 912 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT927 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 913 928 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 914 929 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT); … … 968 983 return rc; 969 984 970 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64971 # error "Guest 32-bit mode and shadow AMD64 mode doesn't add up!"972 # endif973 return VINF_SUCCESS;974 975 985 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 976 # if PGM_SHW_TYPE == PGM_TYPE_AMD64977 986 //# error not implemented 978 987 return VERR_INTERNAL_ERROR; 979 988 980 # else /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */981 # error "Guest AMD64 mode, but not the shadow mode - that can't be right!"982 # endif /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */983 984 989 #else /* guest real and protected mode */ 985 /* There's no such thing when paging is disabled. */990 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */ 986 991 return VINF_SUCCESS; 987 992 #endif … … 1237 1242 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 1238 1243 || PGM_GST_TYPE == PGM_TYPE_PAE 1239 1240 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE1241 # error "Invalid shadow mode for 32-bit guest mode!"1242 # endif1243 1244 1244 1245 /* … … 1842 1843 || PGM_GST_TYPE == PGM_TYPE_PAE 1843 1844 1844 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE1845 # error "Invalid shadow mode for 32-bit guest mode!"1846 # endif1847 1848 1845 /* 1849 1846 * Validate input a little bit. … … 1908 1905 GCPhys = PdeSrc.u & GST_PDE_PG_MASK; 1909 1906 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1907 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 1910 1908 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2); 1911 1909 # endif … … 1916 1914 GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 1917 1915 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1918 GCPhys |= GCPtrPage & RT_BIT(X86_PAGE_2M_SHIFT); 1916 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 1917 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT); 1919 1918 # endif 1920 1919 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage); … … 1934 1933 PdeDst.u = pShwPage->Core.Key 1935 1934 | (PdeSrc.u & ~(X86_PDE_PAE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D)); 1936 # ifdef PGM_SYNC_DIRTY_BIT /* (see explanation and assumtions further down.) */1935 # ifdef PGM_SYNC_DIRTY_BIT /* (see explanation and assumptions further down.) */ 1937 1936 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write) 1938 1937 { … … 1941 1940 PdeDst.b.u1Write = 0; 1942 1941 } 1943 # 1942 # endif 1944 1943 } 1945 1944 *pPdeDst = PdeDst; … … 2005 2004 const unsigned iPTDstEnd = ELEMENTS(pPTDst->a); 2006 2005 # endif /* !PGM_SYNC_N_PAGES */ 2007 # 2006 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2008 2007 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 2009 2008 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512; 2010 # 2009 # else 2011 2010 const unsigned offPTSrc = 0; 2012 # 2011 # endif 2013 2012 for (; iPTDst < iPTDstEnd; iPTDst++) 2014 2013 { … … 2018 2017 if (PteSrc.n.u1Present) /* we've already cleared it above */ 2019 2018 { 2020 # ifndef IN_RING02019 # ifndef IN_RING0 2021 2020 /* 2022 2021 * Assuming kernel code will be marked as supervisor - and not as user level … … 2030 2029 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2031 2030 ) 2032 # endif2031 # endif 2033 2032 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst); 2034 2033 Log2(("SyncPT: 4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%VGp\n", … … 2088 2087 */ 2089 2088 /* Get address and flags from the source PDE. */ 2090 SHWPTE 2089 SHWPTE PteDstBase; 2091 2090 PteDstBase.u = PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT); 2092 2091 … … 2096 2095 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr, 2097 2096 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : "")); 2098 PPGMRAMRANGE 2099 unsigned 2097 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges); 2098 unsigned iPTDst = 0; 2100 2099 while (iPTDst < ELEMENTS(pPTDst->a)) 2101 2100 { … … 2249 2248 2250 2249 #else /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 2251 2252 2250 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE)); 2253 2251 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a); … … 2270 2268 PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage) 2271 2269 { 2272 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) && PGM_SHW_TYPE != PGM_TYPE_AMD64 2273 2274 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE 2275 # error "Invalid shadow mode for 32-bit guest mode!" 2276 # endif 2277 2270 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64 2278 2271 /* 2279 2272 * Check that all Guest levels thru the PDE are present, getting the … … 2286 2279 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD); 2287 2280 # else /* PAE */ 2288 unsigned iPDSrc 2281 unsigned iPDSrc; 2289 2282 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc); 2290 2283 # endif 2284 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 2291 2285 # else 2292 2286 PGSTPD pPDSrc = NULL; 2293 2287 const unsigned iPDSrc = 0; 2294 # endif 2295 2296 # if PGM_WITH_PAGING(PGM_GST_TYPE) 2297 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 2298 # else 2299 GSTPDE PdeSrc; 2288 GSTPDE PdeSrc; 2289 2300 2290 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */ 2301 2291 PdeSrc.n.u1Present = 1; … … 2359 2349 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64 2360 2350 2361 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE 2362 # error "Invalid shadow mode for 32-bit guest mode!" 2363 # endif 2364 2365 #ifndef IN_RING0 2351 # ifndef IN_RING0 2366 2352 if (!(fPage & X86_PTE_US)) 2367 2353 { … … 2373 2359 CSAMMarkPage(pVM, (RTGCPTR)GCPtrPage, true); 2374 2360 } 2375 # endif2361 # endif 2376 2362 /* 2377 2363 * Get guest PD and index. … … 2455 2441 2456 2442 2457 #if PGM_GST_TYPE == PGM_TYPE_32BIT 2443 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 2458 2444 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE 2459 2445 /** … … 2510 2496 PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal) 2511 2497 { 2512 #if PGM_GST_TYPE == PGM_TYPE_32BIT2513 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE2514 2498 if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 2515 2499 fGlobal = true; /* Change this CR3 reload to be a global one. */ 2516 # endif2517 #endif2518 2500 2519 2501 /* … … 2554 2536 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTXMID(Stat,SyncCR3Global) : &pVM->pgm.s.CTXMID(Stat,SyncCR3NotGlobal)); 2555 2537 2556 #if PGM_GST_TYPE == PGM_TYPE_32BIT 2557 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE 2538 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 2558 2539 /* 2559 2540 * Get page directory addresses. … … 2565 2546 # endif 2566 2547 2567 # if PGM_GST_TYPE == PGM_TYPE_32BIT2548 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2568 2549 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD); 2569 # else /* PAE */2570 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, 0);2571 # endif2572 2573 2550 Assert(pPDSrc); 2574 # ifndef IN_GC2551 # ifndef IN_GC 2575 2552 Assert(MMPhysGCPhys2HCVirt(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc); 2576 #endif 2553 # endif 2554 # endif 2577 2555 2578 2556 /* … … 2595 2573 iPdNoMapping = ~0U; 2596 2574 } 2597 2598 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 2575 # if PGM_GST_TYPE == PGM_TYPE_PAE 2576 for (unsigned iPDPTRE = 0; iPDPTRE < X86_PG_PAE_PDPTE_ENTRIES; iPDPTRE++) 2577 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 2578 for (unsigned iPDPTRE = 0; iPDPTRE < X86_PG_AMD64_PDPTE_ENTRIES; iPDPTRE++) 2579 # endif 2599 2580 { 2581 # if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 2582 unsigned iPDSrc; 2583 # if PGM_SHW_TYPE == PGM_TYPE_PAE 2584 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[iPDPTRE * X86_PG_PAE_ENTRIES]; 2585 # else 2586 AssertFailed(); /* @todo */ 2587 PX86PDPE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[iPDPTRE * X86_PG_AMD64_ENTRIES]; 2588 # endif 2589 PX86PDEPAE pPDEDst = &pPDPAE->a[0]; 2590 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTRE << X86_PDPTR_SHIFT, &iPDSrc); 2591 2592 if (pPDSrc == NULL) 2593 { 2594 /* PDPTR not present */ 2595 pVM->pgm.s.CTXMID(p,PaePDPTR)->a[iPDPTRE].n.u1Present = 0; 2596 continue; 2597 } 2598 # endif /* if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 */ 2599 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 2600 { 2600 2601 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2601 Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst);2602 Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst); 2602 2603 # else 2603 Assert(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst);2604 # endif 2605 register GSTPDE PdeSrc = pPDSrc->a[iPD];2606 if ( PdeSrc.n.u1Present2607 && (PdeSrc.n.u1User || fRawR0Enabled))2608 {2604 Assert(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst); 2605 # endif 2606 register GSTPDE PdeSrc = pPDSrc->a[iPD]; 2607 if ( PdeSrc.n.u1Present 2608 && (PdeSrc.n.u1User || fRawR0Enabled)) 2609 { 2609 2610 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2610 /* 2611 * Check for conflicts with GC mappings. 2612 */ 2613 if (iPD == iPdNoMapping) 2614 { 2611 /* 2612 * Check for conflicts with GC mappings. 2613 */ 2614 if (iPD == iPdNoMapping) 2615 { 2616 if (pVM->pgm.s.fMappingsFixed) 2617 { 2618 /* It's fixed, just skip the mapping. */ 2619 const unsigned cPTs = pMapping->cPTs; 2620 iPD += cPTs - 1; 2621 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs; 2622 pMapping = pMapping->CTXALLSUFF(pNext); 2623 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2624 continue; 2625 } 2626 # ifdef IN_RING3 2627 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2628 if (VBOX_FAILURE(rc)) 2629 return rc; 2630 2631 /* 2632 * Update iPdNoMapping and pMapping. 2633 */ 2634 pMapping = pVM->pgm.s.pMappingsR3; 2635 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT)) 2636 pMapping = pMapping->pNextR3; 2637 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2638 # else 2639 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 2640 return VINF_PGM_SYNC_CR3; 2641 # endif 2642 } 2643 # else /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 2644 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ 2645 Assert(iPD != iPdNoMapping); 2646 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 2647 /* 2648 * Sync page directory entry. 2649 * 2650 * The current approach is to allocated the page table but to set 2651 * the entry to not-present and postpone the page table synching till 2652 * it's actually used. 2653 */ 2654 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2655 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 2656 # else 2657 const unsigned iPdShw = iPD; NOREF(iPdShw); 2658 # endif 2659 { 2660 SHWPDE PdeDst = *pPDEDst; 2661 if (PdeDst.n.u1Present) 2662 { 2663 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK); 2664 RTGCPHYS GCPhys; 2665 if ( !PdeSrc.b.u1Size 2666 || !(cr4 & X86_CR4_PSE)) 2667 { 2668 GCPhys = PdeSrc.u & GST_PDE_PG_MASK; 2669 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2670 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 2671 GCPhys |= i * (PAGE_SIZE / 2); 2672 # endif 2673 } 2674 else 2675 { 2676 GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 2677 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2678 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 2679 GCPhys |= i * X86_PAGE_2M_SIZE; 2680 # endif 2681 } 2682 2683 if ( pShwPage->GCPhys == GCPhys 2684 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4) 2685 && ( pShwPage->fCached 2686 || ( !fGlobal 2687 && ( false 2688 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH 2689 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 2690 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */ 2691 || ( !pShwPage->fSeenNonGlobal 2692 && (cr4 & X86_CR4_PGE)) 2693 # endif 2694 ) 2695 ) 2696 ) 2697 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW)) 2698 || ( (cr4 & X86_CR4_PSE) 2699 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY) 2700 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS)) 2701 ) 2702 ) 2703 { 2704 # ifdef VBOX_WITH_STATISTICS 2705 if ( !fGlobal 2706 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 2707 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) 2708 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD)); 2709 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE)) 2710 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT)); 2711 else 2712 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit)); 2713 # endif /* VBOX_WITH_STATISTICS */ 2714 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages. 2715 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */ 2716 //# ifdef PGMPOOL_WITH_CACHE 2717 // pgmPoolCacheUsed(pPool, pShwPage); 2718 //# endif 2719 } 2720 else 2721 { 2722 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw); 2723 pPDEDst->u = 0; 2724 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed)); 2725 } 2726 } 2727 else 2728 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent)); 2729 pPDEDst++; 2730 } 2731 } 2732 else if (iPD != iPdNoMapping) 2733 { 2734 /* 2735 * Check if there is any page directory to mark not present here. 2736 */ 2737 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2738 const unsigned iPdShw = iPD; NOREF(iPdShw); 2739 # else 2740 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 2741 # endif 2742 { 2743 if (pPDEDst->n.u1Present) 2744 { 2745 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw); 2746 pPDEDst->u = 0; 2747 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP)); 2748 } 2749 pPDEDst++; 2750 } 2751 } 2752 else 2753 { 2754 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2755 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2756 const unsigned cPTs = pMapping->cPTs; 2615 2757 if (pVM->pgm.s.fMappingsFixed) 2616 2758 { 2617 2759 /* It's fixed, just skip the mapping. */ 2618 const unsigned cPTs = pMapping->cPTs;2619 iPD += cPTs - 1;2620 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs;2621 2760 pMapping = pMapping->CTXALLSUFF(pNext); 2622 2761 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2623 continue; 2624 } 2625 2762 } 2763 else 2764 { 2765 /* 2766 * Check for conflicts for subsequent pagetables 2767 * and advance to the next mapping. 2768 */ 2769 iPdNoMapping = ~0U; 2770 unsigned iPT = cPTs; 2771 while (iPT-- > 1) 2772 { 2773 if ( pPDSrc->a[iPD + iPT].n.u1Present 2774 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled)) 2775 { 2626 2776 # ifdef IN_RING3 2627 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2628 if (VBOX_FAILURE(rc)) 2629 return rc; 2630 2631 /* 2632 * Update iPdNoMapping and pMapping. 2633 */ 2634 pMapping = pVM->pgm.s.pMappingsR3; 2635 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT)) 2636 pMapping = pMapping->pNextR3; 2637 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2777 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2778 if (VBOX_FAILURE(rc)) 2779 return rc; 2780 2781 /* 2782 * Update iPdNoMapping and pMapping. 2783 */ 2784 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings); 2785 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT)) 2786 pMapping = pMapping->CTXALLSUFF(pNext); 2787 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2788 break; 2638 2789 # else 2639 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));2640 return VINF_PGM_SYNC_CR3;2790 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 2791 return VINF_PGM_SYNC_CR3; 2641 2792 # endif 2642 } 2643 # else /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 2644 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ 2645 Assert(iPD != iPdNoMapping); 2646 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 2647 /* 2648 * Sync page directory entry. 2649 * 2650 * The current approach is to allocated the page table but to set 2651 * the entry to not-present and postpone the page table synching till 2652 * it's actually used. 2653 */ 2654 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2655 const unsigned iPdShw = iPD; NOREF(iPdShw); 2656 # else 2657 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 2658 # endif 2659 { 2660 SHWPDE PdeDst = *pPDEDst; 2661 if (PdeDst.n.u1Present) 2662 { 2663 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK); 2664 RTGCPHYS GCPhys; 2665 if ( !PdeSrc.b.u1Size 2666 || !(cr4 & X86_CR4_PSE)) 2667 { 2668 GCPhys = PdeSrc.u & GST_PDE_PG_MASK; 2669 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2670 GCPhys |= i * (PAGE_SIZE / 2); 2671 # endif 2672 } 2673 else 2674 { 2675 GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 2676 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2677 GCPhys |= i * X86_PAGE_2M_SIZE; 2678 # endif 2679 } 2680 2681 if ( pShwPage->GCPhys == GCPhys 2682 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4) 2683 && ( pShwPage->fCached 2684 || ( !fGlobal 2685 && ( false 2686 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH 2687 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 2688 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */ 2689 || ( !pShwPage->fSeenNonGlobal 2690 && (cr4 & X86_CR4_PGE)) 2691 # endif 2692 ) 2693 ) 2694 ) 2695 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW)) 2696 || ( (cr4 & X86_CR4_PSE) 2697 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY) 2698 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS)) 2699 ) 2700 ) 2701 { 2702 # ifdef VBOX_WITH_STATISTICS 2703 if ( !fGlobal 2704 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 2705 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) 2706 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD)); 2707 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE)) 2708 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT)); 2709 else 2710 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit)); 2711 # endif /* VBOX_WITH_STATISTICS */ 2712 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages. 2713 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */ 2714 //# ifdef PGMPOOL_WITH_CACHE 2715 // pgmPoolCacheUsed(pPool, pShwPage); 2716 //# endif 2717 } 2718 else 2719 { 2720 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw); 2721 pPDEDst->u = 0; 2722 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed)); 2723 } 2724 } 2725 else 2726 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent)); 2727 pPDEDst++; 2728 } 2729 } 2730 else if (iPD != iPdNoMapping) 2731 { 2732 /* 2733 * Check if there is any page directory to mark not present here. 2734 */ 2735 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2736 const unsigned iPdShw = iPD; NOREF(iPdShw); 2737 # else 2738 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 2739 # endif 2740 { 2741 if (pPDEDst->n.u1Present) 2742 { 2743 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw); 2744 pPDEDst->u = 0; 2745 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP)); 2746 } 2747 pPDEDst++; 2748 } 2749 } 2750 else 2751 { 2752 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2753 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2754 const unsigned cPTs = pMapping->cPTs; 2755 if (pVM->pgm.s.fMappingsFixed) 2756 { 2757 /* It's fixed, just skip the mapping. */ 2758 pMapping = pMapping->CTXALLSUFF(pNext); 2759 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2760 } 2761 else 2762 { 2763 /* 2764 * Check for conflicts for subsequent pagetables 2765 * and advance to the next mapping. 2766 */ 2767 iPdNoMapping = ~0U; 2768 unsigned iPT = cPTs; 2769 while (iPT-- > 1) 2770 { 2771 if ( pPDSrc->a[iPD + iPT].n.u1Present 2772 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled)) 2773 { 2774 # ifdef IN_RING3 2775 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2776 if (VBOX_FAILURE(rc)) 2777 return rc; 2778 2779 /* 2780 * Update iPdNoMapping and pMapping. 2781 */ 2782 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings); 2783 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT)) 2784 pMapping = pMapping->CTXALLSUFF(pNext); 2785 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; 2786 break; 2787 # else 2788 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 2789 return VINF_PGM_SYNC_CR3; 2790 # endif 2791 } 2792 } 2793 if (iPdNoMapping == ~0U && pMapping) 2794 { 2795 pMapping = pMapping->CTXALLSUFF(pNext); 2796 if (pMapping) 2797 iPdNoMapping = pMapping->GCPtr >> X86_PD_SHIFT; 2798 } 2793 } 2794 } 2795 if (iPdNoMapping == ~0U && pMapping) 2796 { 2797 pMapping = pMapping->CTXALLSUFF(pNext); 2798 if (pMapping) 2799 iPdNoMapping = pMapping->GCPtr >> X86_PD_SHIFT; 2800 } 2801 } 2802 2803 /* advance. */ 2804 iPD += cPTs - 1; 2805 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs; 2799 2806 # else /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 2800 2807 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ … … 2803 2810 } 2804 2811 2805 /* advance. */ 2806 iPD += cPTs - 1; 2807 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs; 2808 } 2809 2810 } /* for iPD */ 2811 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2812 # error "Guest 32-bit mode and shadow AMD64 mode doesn't add up!" 2813 # endif 2812 } /* for iPD */ 2813 } /* for each PDPTE (PAE) */ 2814 2814 2815 2815 return VINF_SUCCESS; 2816 2817 #elif PGM_GST_TYPE == PGM_TYPE_PAE2818 # if PGM_SHW_TYPE == PGM_TYPE_PAE2819 //# error not implemented2820 return VERR_INTERNAL_ERROR;2821 2822 # else /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */2823 # error "Guest PAE mode, but not the shadow mode ; 32bit - maybe, but amd64 no."2824 # endif /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */2825 2816 2826 2817 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 … … 2834 2825 2835 2826 #else /* guest real and protected mode */ 2836 2837 2827 return VINF_SUCCESS; 2838 2828 #endif … … 2882 2872 2883 2873 #if PGM_GST_TYPE == PGM_TYPE_32BIT 2884 2885 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE2886 # error "Invalid shadow mode for 32-bit guest paging."2887 # endif2888 2889 2874 PPGM pPGM = &pVM->pgm.s; 2890 2875 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */ -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r7666 r7676 78 78 # define GST_PD_SHIFT X86_PD_PAE_SHIFT 79 79 # define GST_PD_MASK X86_PD_PAE_MASK 80 # define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*4) 80 # if PGM_GST_TYPE == PGM_TYPE_PAE 81 # define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPTE_ENTRIES) 82 # else 83 # define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPTE_ENTRIES) 84 # endif 81 85 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 82 86 # define GST_PT_SHIFT X86_PT_PAE_SHIFT -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r6829 r7676 2989 2989 2990 2990 case PGMPOOLKIND_ROOT_PAE_PD: 2991 for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES * 4; iPage++)2991 for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPTE_ENTRIES; iPage++) 2992 2992 if ((u.pau64[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P) 2993 2993 u.pau64[iPage] = 0; -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r5999 r7676 77 77 # define SHW_PDPTR_SHIFT X86_PDPTR_SHIFT 78 78 # define SHW_PDPTR_MASK X86_PDPTR_MASK_32 79 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES* 4)79 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPTE_ENTRIES) 80 80 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD 81 81 #endif
Note:
See TracChangeset
for help on using the changeset viewer.