Changeset 13060 in vbox
- Timestamp:
- Oct 8, 2008 7:42:06 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 37532
- Location:
- trunk
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r13045 r13060 438 438 #endif /* VBOX_STRICT */ 439 439 440 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 441 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv); 442 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv); 443 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv); 444 VMMDECL(int) PGMDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys, void **ppv); 445 #endif 446 440 447 441 448 #ifdef IN_GC … … 444 451 * @{ 445 452 */ 446 VMMRCDECL(int) PGMGCDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);447 VMMRCDECL(int) PGMGCDynMapGCPageEx(PVM pVM, RTGCPHYS GCPhys, void **ppv);448 VMMRCDECL(int) PGMGCDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);449 453 VMMRCDECL(int) PGMGCInvalidatePage(PVM pVM, RTGCPTR GCPtrPage); 450 454 /** @} */ … … 459 463 VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM); 460 464 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault); 461 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE462 VMMR0DECL(int) PGMR0DynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);463 VMMR0DECL(int) PGMR0DynMapGCPageEx(PVM pVM, RTGCPHYS GCPhys, void **ppv);464 VMMR0DECL(int) PGMR0DynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);465 # endif466 465 /** @} */ 467 466 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/Makefile.kmk
r13038 r13060 357 357 VMMR0_DEFS += VBOX_WITH_IDT_PATCHING 358 358 endif 359 VMMR0_DEFS.darwin = VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_ 0359 VMMR0_DEFS.darwin = VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 360 360 ifeq ($(VBOX_LDR_FMT),pe) 361 361 VMMR0_LDFLAGS = -Entry:VMMR0EntryEx -
trunk/src/VBox/VMM/PGM.cpp
r13045 r13060 3237 3237 * @todo A20 setting 3238 3238 */ 3239 if ( pVM->pgm.s.CTX SUFF(pPool)3239 if ( pVM->pgm.s.CTX_SUFF(pPool) 3240 3240 && !HWACCMIsNestedPagingActive(pVM) 3241 3241 && PGMMODE_WITH_PAGING(pVM->pgm.s.enmGuestMode) != PGMMODE_WITH_PAGING(enmGuestMode)) -
trunk/src/VBox/VMM/PGMInternal.h
r13047 r13060 220 220 * @remark There is no need to assert on the result. 221 221 */ 222 #ifdef IN_GC 223 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv)) 224 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 225 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMR0DynMapHCPage(pVM, HCPhys, (void **)(ppv)) 222 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 223 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv)) 226 224 #else 227 225 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv)) … … 240 238 * @remark There is no need to assert on the result. 241 239 */ 242 #ifdef IN_GC 243 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv)) 244 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 245 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMR0DynMapGCPage(pVM, GCPhys, (void **)(ppv)) 240 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 241 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv)) 246 242 #else 247 243 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */ … … 260 256 * @remark There is no need to assert on the result. 261 257 */ 262 #ifdef IN_GC 263 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv)) 264 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 265 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMR0DynMapGCPageEx(pVM, GCPhys, (void **)(ppv)) 258 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 259 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv)) 266 260 #else 267 261 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */ … … 927 921 uint32_t u32Alignment; /**< alignment. */ 928 922 #ifndef VBOX_WITH_NEW_PHYS_CODE 929 /** HC virtual lookup ranges for chunks - R3/R0 Ptr. 930 * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */ 923 /** R3 virtual lookup ranges for chunks. 924 * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. 925 * @remarks This is occationally accessed from ring-0!! (not darwin) */ 926 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 927 R3PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs; 928 # else 931 929 R3R0PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs; 930 # endif 932 931 #endif 933 932 /** Start of the HC mapping of the range. This is only used for MMIO2. */ … … 995 994 /** Pointer to the next range - R0. */ 996 995 R0PTRTYPE(struct PGMROMRANGE *) pNextR0; 997 /** Pointer to the next range - GC. */998 RCPTRTYPE(struct PGMROMRANGE *) pNext GC;996 /** Pointer to the next range - RC. */ 997 RCPTRTYPE(struct PGMROMRANGE *) pNextRC; 999 998 /** Pointer alignment */ 1000 999 RTRCPTR GCPtrAlignment; … … 1062 1061 1063 1062 1064 /** @todo r=bird: fix typename. */1065 1063 /** 1066 1064 * PGMPhysRead/Write cache entry 1067 1065 */ 1068 typedef struct PGMPHYSCACHE _ENTRY1069 { 1070 /** HC pointer to physical page*/1071 R3PTRTYPE(uint8_t *) pb HC;1066 typedef struct PGMPHYSCACHEENTRY 1067 { 1068 /** R3 pointer to physical page. */ 1069 R3PTRTYPE(uint8_t *) pbR3; 1072 1070 /** GC Physical address for cache entry */ 1073 1071 RTGCPHYS GCPhys; … … 1075 1073 RTGCPHYS u32Padding0; /**< alignment padding. */ 1076 1074 #endif 1077 } PGMPHYSCACHE _ENTRY;1075 } PGMPHYSCACHEENTRY; 1078 1076 1079 1077 /** … … 1085 1083 uint64_t aEntries; 1086 1084 /** Cache entries */ 1087 PGMPHYSCACHE _ENTRYEntry[PGM_MAX_PHYSCACHE_ENTRIES];1085 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES]; 1088 1086 } PGMPHYSCACHE; 1089 1087 … … 1127 1125 #endif 1128 1126 /** The chunk map. */ 1127 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1128 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk; 1129 #else 1129 1130 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk; 1131 #endif 1130 1132 } PGMCHUNKR3MAPTLBE; 1131 1133 /** Pointer to the an allocation chunk ring-3 mapping TLB entry. */ … … 1173 1175 { 1174 1176 /** Address of the page. */ 1175 RTGCPHYS volatile 1177 RTGCPHYS volatile GCPhys; 1176 1178 /** The guest page. */ 1177 R3R0PTRTYPE(PPGMPAGE) volatile pPage; 1179 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1180 R3PTRTYPE(PPGMPAGE) volatile pPage; 1181 #else 1182 R3R0PTRTYPE(PPGMPAGE) volatile pPage; 1183 #endif 1178 1184 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */ 1185 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1186 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap; 1187 #else 1179 1188 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap; 1189 #endif 1180 1190 /** The address */ 1181 R3R0PTRTYPE(void *) volatile pv; 1191 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1192 R3PTRTYPE(void *) volatile pv; 1193 #else 1194 R3R0PTRTYPE(void *) volatile pv; 1195 #endif 1182 1196 #if HC_ARCH_BITS == 32 1183 uint32_t 1197 uint32_t u32Padding; /**< alignment padding. */ 1184 1198 #endif 1185 1199 } PGMPAGER3MAPTLBE; … … 1405 1419 AVLOHCPHYSNODECORE Core; 1406 1420 /** Pointer to the HC mapping of the page. */ 1421 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1422 R3PTRTYPE(void *) pvPageHC; 1423 #else 1407 1424 R3R0PTRTYPE(void *) pvPageHC; 1425 #endif 1408 1426 /** The guest physical address. */ 1409 1427 #if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64 … … 1658 1676 * @remark There is no need to assert on the result. 1659 1677 */ 1660 #if def IN_GC1661 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgm GCPoolMapPage((pVM), (pPage))1678 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 1679 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPage((pVM), (pPage)) 1662 1680 #else 1663 1681 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC) … … 1955 1973 * @{ */ 1956 1974 /** The guest's page directory, HC pointer. */ 1975 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 1976 R3PTRTYPE(PX86PD) pGuestPDHC; 1977 #else 1957 1978 R3R0PTRTYPE(PX86PD) pGuestPDHC; 1979 #endif 1958 1980 /** The guest's page directory, static GC mapping. */ 1959 1981 RCPTRTYPE(PX86PD) pGuestPDGC; … … 1965 1987 RCPTRTYPE(PX86PDPT) pGstPaePDPTGC; 1966 1988 /** The guest's page directory pointer table, HC pointer. */ 1989 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 1990 R3PTRTYPE(PX86PDPT) pGstPaePDPTHC; 1991 #else 1967 1992 R3R0PTRTYPE(PX86PDPT) pGstPaePDPTHC; 1993 #endif 1968 1994 /** The guest's page directories, HC pointers. 1969 1995 * These are individual pointers and don't have to be adjecent. 1970 1996 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */ 1997 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 1998 R3PTRTYPE(PX86PDPAE) apGstPaePDsHC[4]; 1999 #else 1971 2000 R3R0PTRTYPE(PX86PDPAE) apGstPaePDsHC[4]; 2001 #endif 1972 2002 /** The guest's page directories, static GC mapping. 1973 2003 * Unlike the HC array the first entry can be accessed as a 2048 entry PD. … … 1983 2013 * @{ */ 1984 2014 /** The guest's page directory pointer table, HC pointer. */ 2015 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 1985 2016 R3R0PTRTYPE(PX86PML4) pGstPaePML4HC; 2017 #else 2018 R3R0PTRTYPE(PX86PML4) pGstPaePML4HC; 2019 #endif 1986 2020 /** @} */ 1987 2021 … … 1989 2023 * @{ */ 1990 2024 /** The 32-Bit PD - HC Ptr. */ 2025 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2026 R3PTRTYPE(PX86PD) pHC32BitPD; 2027 #else 1991 2028 R3R0PTRTYPE(PX86PD) pHC32BitPD; 2029 #endif 1992 2030 /** The 32-Bit PD - GC Ptr. */ 1993 2031 RCPTRTYPE(PX86PD) pGC32BitPD; … … 2004 2042 * Even though these are 4 pointers, what they point at is a single table. 2005 2043 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */ 2044 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2045 R3PTRTYPE(PX86PDPAE) apHCPaePDs[4]; 2046 #else 2006 2047 R3R0PTRTYPE(PX86PDPAE) apHCPaePDs[4]; 2048 #endif 2007 2049 /** The four PDs for the low 4GB - GC Ptr. 2008 2050 * Same kind of mapping as apHCPaePDs. */ … … 2026 2068 #endif 2027 2069 /** The Page Map Level 4 table - HC Ptr. */ 2070 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2071 R3PTRTYPE(PX86PML4) pHCPaePML4; 2072 #else 2028 2073 R3R0PTRTYPE(PX86PML4) pHCPaePML4; 2074 #endif 2029 2075 /** The Physical Address (HC) of the Page Map Level 4 table. */ 2030 2076 RTHCPHYS HCPhysPaePML4; 2031 2077 /** The pgm pool page descriptor for the current active CR3. */ 2078 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2079 R3PTRTYPE(PPGMPOOLPAGE) pHCShwAmd64CR3; 2080 #else 2032 2081 R3R0PTRTYPE(PPGMPOOLPAGE) pHCShwAmd64CR3; 2082 #endif 2033 2083 2034 2084 /** @}*/ … … 2037 2087 * @{ */ 2038 2088 /** Root table; format depends on the host paging mode (AMD-V) or EPT */ 2089 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2090 R3PTRTYPE(void *) pHCNestedRoot; 2091 #else 2039 2092 R3R0PTRTYPE(void *) pHCNestedRoot; 2093 #endif 2040 2094 /** The Physical Address (HC) of the nested paging root. */ 2041 2095 RTHCPHYS HCPhysNestedRoot; … … 2148 2202 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3; 2149 2203 /** R0 pointer corresponding to PGM::pRomRangesR3. */ 2150 R0PTRTYPE(PPGMR AMRANGE) pRomRangesR0;2151 /** GC pointer corresponding to PGM::pRomRangesR3. */2152 RCPTRTYPE(PPGMR AMRANGE) pRomRangesGC;2204 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0; 2205 /** RC pointer corresponding to PGM::pRomRangesR3. */ 2206 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC; 2153 2207 /** Alignment padding. */ 2154 2208 RTRCPTR GCPtrPadding2; … … 2247 2301 PDMCRITSECT CritSect; 2248 2302 2249 /** Shadow Page Pool - HC Ptr. */ 2250 R3R0PTRTYPE(PPGMPOOL) pPoolHC; 2251 /** Shadow Page Pool - GC Ptr. */ 2252 RCPTRTYPE(PPGMPOOL) pPoolGC; 2303 /** Shadow Page Pool - R3 Ptr. */ 2304 R3PTRTYPE(PPGMPOOL) pPoolR3; 2305 /** Shadow Page Pool - R0 Ptr. */ 2306 R0PTRTYPE(PPGMPOOL) pPoolR0; 2307 /** Shadow Page Pool - RC Ptr. */ 2308 RCPTRTYPE(PPGMPOOL) pPoolRC; 2253 2309 2254 2310 /** We're not in a state which permits writes to guest memory. … … 2270 2326 { 2271 2327 /** The chunk tree, ordered by chunk id. */ 2272 R3R0PTRTYPE(PAVLU32NODECORE) pTree; 2328 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2329 R3PTRTYPE(PAVLU32NODECORE) pTree; 2330 #else 2331 R3R0PTRTYPE(PAVLU32NODECORE) pTree; 2332 #endif 2273 2333 /** The chunk mapping TLB. */ 2274 2334 PGMCHUNKR3MAPTLB Tlb; … … 2664 2724 2665 2725 #endif /* IN_RING3 */ 2666 #if def IN_GC2667 void *pgm GCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);2726 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 2727 void *pgmPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage); 2668 2728 #endif 2669 2729 int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage); … … 3031 3091 #endif /* !IN_GC */ 3032 3092 3033 #if !defined(IN_GC) /** @todo && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) */3093 #if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 3034 3094 3035 3095 # ifndef VBOX_WITH_NEW_PHYS_CODE … … 3151 3211 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES only MapCR3 usage. */ 3152 3212 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(PGM2VM(pPGM), pRam->paChunkR3Ptrs); 3153 *pHCPtr = paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK);3213 *pHCPtr = (RTHCPTR)(paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)); 3154 3214 #else 3155 3215 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)); -
trunk/src/VBox/VMM/PGMPhys.cpp
r13042 r13060 1340 1340 pRomNew->pNextR3 = pRom; 1341 1341 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR; 1342 pRomNew->pNext GC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;1342 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR; 1343 1343 1344 1344 if (pRomPrev) … … 1346 1346 pRomPrev->pNextR3 = pRomNew; 1347 1347 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew); 1348 pRomPrev->pNext GC = MMHyperCCToRC(pVM, pRomNew);1348 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew); 1349 1349 } 1350 1350 else … … 1352 1352 pVM->pgm.s.pRomRangesR3 = pRomNew; 1353 1353 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew); 1354 pVM->pgm.s.pRomRanges GC = MMHyperCCToRC(pVM, pRomNew);1354 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew); 1355 1355 } 1356 1356 -
trunk/src/VBox/VMM/PGMPhysRWTmpl.h
r12989 r13060 52 52 { 53 53 RTGCPHYS off = GCPhys - pVM->pgm.s.pgmphysreadcache.Entry[iCacheIndex].GCPhys; 54 return *(PGMPHYS_DATATYPE *)(pVM->pgm.s.pgmphysreadcache.Entry[iCacheIndex].pb HC+ off);54 return *(PGMPHYS_DATATYPE *)(pVM->pgm.s.pgmphysreadcache.Entry[iCacheIndex].pbR3 + off); 55 55 } 56 56 #endif /* PGM_PHYSMEMACCESS_CACHING */ … … 94 94 { 95 95 RTGCPHYS off = GCPhys - pVM->pgm.s.pgmphyswritecache.Entry[iCacheIndex].GCPhys; 96 *(PGMPHYS_DATATYPE *)(pVM->pgm.s.pgmphyswritecache.Entry[iCacheIndex].pb HC+ off) = val;96 *(PGMPHYS_DATATYPE *)(pVM->pgm.s.pgmphyswritecache.Entry[iCacheIndex].pbR3 + off) = val; 97 97 return; 98 98 } -
trunk/src/VBox/VMM/PGMPool.cpp
r13042 r13060 180 180 if (VBOX_FAILURE(rc)) 181 181 return rc; 182 pVM->pgm.s.pPoolHC = pPool; 183 pVM->pgm.s.pPoolGC = MMHyperHC2GC(pVM, pPool); 182 pVM->pgm.s.pPoolR3 = pPool; 183 pVM->pgm.s.pPoolR0 = MMHyperR3ToR0(pVM, pPool); 184 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pPool); 184 185 185 186 /* … … 380 381 void pgmR3PoolRelocate(PVM pVM) 381 382 { 382 pVM->pgm.s.pPool GC = MMHyperHC2GC(pVM, pVM->pgm.s.pPoolHC);383 pVM->pgm.s.pPool HC->pVMGC = pVM->pVMGC;383 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3); 384 pVM->pgm.s.pPoolR3->pVMGC = pVM->pVMRC; 384 385 #ifdef PGMPOOL_WITH_USER_TRACKING 385 pVM->pgm.s.pPool HC->paUsersGC = MMHyperHC2GC(pVM, pVM->pgm.s.pPoolHC->paUsersHC);386 pVM->pgm.s.pPoolR3->paUsersGC = MMHyperHC2GC(pVM, pVM->pgm.s.pPoolR3->paUsersHC); 386 387 #endif 387 388 #ifdef PGMPOOL_WITH_GCPHYS_TRACKING 388 pVM->pgm.s.pPool HC->paPhysExtsGC = MMHyperHC2GC(pVM, pVM->pgm.s.pPoolHC->paPhysExtsHC);389 pVM->pgm.s.pPoolR3->paPhysExtsGC = MMHyperHC2GC(pVM, pVM->pgm.s.pPoolR3->paPhysExtsHC); 389 390 #endif 390 391 #ifdef PGMPOOL_WITH_MONITORING 391 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPool HC->pfnAccessHandlerRC);392 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC); 392 393 AssertReleaseRC(rc); 393 394 /* init order hack. */ 394 if (!pVM->pgm.s.pPool HC->pfnAccessHandlerR0)395 { 396 rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPool HC->pfnAccessHandlerR0);395 if (!pVM->pgm.s.pPoolR3->pfnAccessHandlerR0) 396 { 397 rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0); 397 398 AssertReleaseRC(rc); 398 399 } … … 423 424 VMMR3DECL(int) PGMR3PoolGrow(PVM pVM) 424 425 { 425 PPGMPOOL pPool = pVM->pgm.s.pPool HC;426 PPGMPOOL pPool = pVM->pgm.s.pPoolR3; 426 427 AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_INTERNAL_ERROR); 427 428 … … 510 511 static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) 511 512 { 512 STAM_PROFILE_START(&pVM->pgm.s.pPool HC->StatMonitorHC, a);513 PPGMPOOL pPool = pVM->pgm.s.pPool HC;513 STAM_PROFILE_START(&pVM->pgm.s.pPoolR3->StatMonitorHC, a); 514 PPGMPOOL pPool = pVM->pgm.s.pPoolR3; 514 515 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 515 516 LogFlow(("pgmR3PoolAccessHandler: GCPhys=%VGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n", -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13019 r13060 785 785 { 786 786 PPGM pPGM = &pVM->pgm.s; 787 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);787 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 788 788 PPGMPOOLPAGE pShwPage; 789 789 int rc; … … 837 837 { 838 838 PPGM pPGM = &pVM->pgm.s; 839 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);839 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 840 840 PPGMPOOLPAGE pShwPage; 841 841 … … 873 873 PPGM pPGM = &pVM->pgm.s; 874 874 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 875 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);875 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 876 876 PX86PML4E pPml4e; 877 877 PPGMPOOLPAGE pShwPage; … … 977 977 PPGM pPGM = &pVM->pgm.s; 978 978 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 979 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);979 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 980 980 PX86PML4E pPml4e; 981 981 PPGMPOOLPAGE pShwPage; … … 1019 1019 PPGM pPGM = &pVM->pgm.s; 1020 1020 const unsigned iPml4e = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK; 1021 PPGMPOOL pPool = pPGM->CTX SUFF(pPool);1021 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 1022 1022 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot; 1023 1023 PEPTPML4E pPml4e; … … 1750 1750 } 1751 1751 1752 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1753 1754 /** 1755 * Temporarily maps one guest page specified by GC physical address. 1756 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages. 1757 * 1758 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1759 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1760 * 1761 * @returns VBox status. 1762 * @param pVM VM handle. 1763 * @param GCPhys GC Physical address of the page. 1764 * @param ppv Where to store the address of the mapping. 1765 */ 1766 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv) 1767 { 1768 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%VGp\n", GCPhys)); 1769 1770 /* 1771 * Get the ram range. 1772 */ 1773 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1774 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 1775 pRam = pRam->CTX_SUFF(pNext); 1776 if (!pRam) 1777 { 1778 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys)); 1779 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1780 } 1781 1782 /* 1783 * Pass it on to PGMDynMapHCPage. 1784 */ 1785 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 1786 //Log(("PGMDynMapGCPage: GCPhys=%VGp HCPhys=%VHp\n", GCPhys, HCPhys)); 1787 return PGMDynMapHCPage(pVM, HCPhys, ppv); 1788 } 1789 1790 1791 /** 1792 * Temporarily maps one guest page specified by unaligned GC physical address. 1793 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages. 1794 * 1795 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1796 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1797 * 1798 * The caller is aware that only the speicifed page is mapped and that really bad things 1799 * will happen if writing beyond the page! 1800 * 1801 * @returns VBox status. 1802 * @param pVM VM handle. 1803 * @param GCPhys GC Physical address within the page to be mapped. 1804 * @param ppv Where to store the address of the mapping address corresponding to GCPhys. 1805 */ 1806 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv) 1807 { 1808 /* 1809 * Get the ram range. 1810 */ 1811 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 1812 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 1813 pRam = pRam->CTX_SUFF(pNext); 1814 if (!pRam) 1815 { 1816 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys)); 1817 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1818 } 1819 1820 /* 1821 * Pass it on to PGMDynMapHCPageOff. 1822 */ 1823 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 1824 return PGMDynMapHCPageOff(pVM, HCPhys, ppv); 1825 } 1826 1827 1828 /** 1829 * Temporarily maps one host page specified by HC physical address. 1830 * 1831 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1832 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1833 * 1834 * @returns VBox status. 1835 * @param pVM VM handle. 1836 * @param HCPhys HC Physical address of the page. 1837 * @param ppv Where to store the address of the mapping. 1838 */ 1839 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv) 1840 { 1841 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%VHp\n", HCPhys)); 1842 # ifdef IN_GC 1843 1844 /* 1845 * Check the cache. 1846 */ 1847 register unsigned iCache; 1848 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys 1849 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys 1850 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys 1851 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys) 1852 { 1853 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] = 1854 { 1855 { 0, 5, 6, 7 }, 1856 { 0, 1, 6, 7 }, 1857 { 0, 1, 2, 7 }, 1858 { 0, 1, 2, 3 }, 1859 { 4, 1, 2, 3 }, 1860 { 4, 5, 2, 3 }, 1861 { 4, 5, 6, 3 }, 1862 { 4, 5, 6, 7 }, 1863 }; 1864 Assert(RT_ELEMENTS(au8Trans) == 8); 1865 Assert(RT_ELEMENTS(au8Trans[0]) == 4); 1866 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache]; 1867 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT); 1868 *ppv = pv; 1869 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheHits); 1870 //Log(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache)); 1871 return VINF_SUCCESS; 1872 } 1873 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4); 1874 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheMisses); 1875 1876 /* 1877 * Update the page tables. 1878 */ 1879 register unsigned iPage = pVM->pgm.s.iDynPageMapLast; 1880 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1); 1881 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8); 1882 1883 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys; 1884 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D; 1885 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D; 1886 1887 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT); 1888 *ppv = pv; 1889 ASMInvalidatePage(pv); 1890 Log4(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d\n", HCPhys, pv, iPage)); 1891 return VINF_SUCCESS; 1892 1893 #else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1894 AssertFailed(); 1895 return VERR_NOT_IMPLEMENTED; 1896 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1897 } 1898 1899 1900 /** 1901 * Temporarily maps one host page specified by HC physical address, returning 1902 * pointer within the page. 1903 * 1904 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is 1905 * reused after 8 mappings (or perhaps a few more if you score with the cache). 1906 * 1907 * @returns VBox status. 1908 * @param pVM VM handle. 1909 * @param HCPhys HC Physical address of the page. 1910 * @param ppv Where to store the address corresponding to HCPhys. 1911 */ 1912 VMMDECL(int) PGMDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys, void **ppv) 1913 { 1914 int rc = PGMDynMapHCPage(pVM, HCPhys, ppv); 1915 if (RT_SUCCESS(rc)) 1916 *ppv = (void *)((uintptr_t)*ppv | (HCPhys & PAGE_OFFSET_MASK)); 1917 return rc; 1918 } 1919 1920 #endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1752 1921 1753 1922 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13046 r13060 121 121 if (!pPDSrc) 122 122 { 123 LogFlow(("Trap0eHandler: guest PDPTR %d not present CR3=%VGp\n", (pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK, (CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK))); 123 # if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64 124 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%VGp\n", (int)(((RTGCUINTPTR)pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)); 125 # else 126 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%VGp\n", iPDSrc, CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)); 127 # endif 124 128 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eGuestTrap; }); 125 129 TRPMSetErrorCode(pVM, uErr); … … 987 991 988 992 # if PGM_GST_TYPE == PGM_TYPE_AMD64 989 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);993 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 990 994 991 995 /* Fetch the pgm pool shadow descriptor. */ … … 1085 1089 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* root of the 2048 PDE array */ 1086 1090 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES]; 1087 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1091 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1088 1092 1089 1093 Assert(!(CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte].n.u1Present)); … … 1312 1316 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 1313 1317 { 1314 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1318 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1315 1319 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]); 1316 1320 pShwPage->cPresent--; … … 1326 1330 # else /* !PGMPOOL_WITH_GCPHYS_TRACKING */ 1327 1331 pShwPage->cPresent--; 1328 pVM->pgm.s.CTX SUFF(pPool)->cPresent--;1332 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--; 1329 1333 # endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */ 1330 1334 } … … 1367 1371 1368 1372 /* update statistics. */ 1369 pVM->pgm.s.CTX SUFF(pPool)->cPresent++;1373 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++; 1370 1374 pShwPage->cPresent++; 1371 1375 if (pShwPage->iFirstPresent > iPTDst) … … 1805 1809 * Yea, I'm lazy. 1806 1810 */ 1807 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1811 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1808 1812 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1809 1813 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst); … … 3140 3144 unsigned iPdNoMapping; 3141 3145 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM); 3142 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3146 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3143 3147 3144 3148 /* Only check mappings if they are supposed to be put into the shadow page table. */ … … 3660 3664 3661 3665 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 3662 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3666 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3663 3667 # endif 3664 3668 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13046 r13060 486 486 } 487 487 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 488 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);488 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 489 489 490 490 pVM->pgm.s.pGstPaePML4HC = (R3R0PTRTYPE(PX86PML4))HCPtrGuestCR3; … … 564 564 if (pVM->pgm.s.pHCShwAmd64CR3) 565 565 { 566 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);566 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 567 567 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT); 568 568 pVM->pgm.s.pHCShwAmd64CR3 = NULL; … … 614 614 pVM->pgm.s.pszR3GstWriteHandlerCR3); 615 615 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */ 616 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool),616 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), 617 617 pVM->pgm.s.enmShadowMode == PGMMODE_PAE 618 618 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX … … 640 640 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3) 641 641 { 642 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);642 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3); 643 643 if (VBOX_FAILURE(rc)) 644 644 { … … 661 661 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX); 662 662 663 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);663 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys); 664 664 } 665 665 … … 674 674 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS) 675 675 { 676 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);676 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i); 677 677 AssertRC(rc); 678 678 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS; … … 710 710 AssertRCReturn(rc, rc); 711 711 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */ 712 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool),712 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), 713 713 pVM->pgm.s.enmShadowMode == PGMMODE_PAE 714 714 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX … … 728 728 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS) 729 729 { 730 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PDPT);730 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT); 731 731 AssertRC(rc); 732 732 } … … 738 738 { 739 739 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX); 740 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);740 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i); 741 741 AssertRC(rc2); 742 742 if (VBOX_FAILURE(rc2)) -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r13046 r13060 491 491 int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv) 492 492 { 493 #if def IN_GC494 /* 495 * Just some sketchy GC code.493 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 494 /* 495 * Just some sketchy GC/R0-darwin code. 496 496 */ 497 497 *ppMap = NULL; 498 498 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 499 499 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg); 500 return PGM GCDynMapHCPage(pVM, HCPhys, ppv);500 return PGMDynMapHCPage(pVM, HCPhys, ppv); 501 501 502 502 #else /* IN_RING3 || IN_RING0 */ … … 557 557 558 558 559 #if ndef IN_GC559 #if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 560 560 /** 561 561 * Load a guest page into the ring-3 physical TLB. … … 613 613 return VINF_SUCCESS; 614 614 } 615 #endif /* !IN_GC */615 #endif /* !IN_GC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 616 616 617 617 … … 644 644 { 645 645 #ifdef VBOX_WITH_NEW_PHYS_CODE 646 #ifdef IN_GC 647 /* Until a physical TLB is implemented for GC, let PGMGCDynMapGCPageEx handle it. */ 648 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv); 646 # if defined(IN_GC) && defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 647 /* Until a physical TLB is implemented for GC or/and R0-darwin, let PGMDynMapGCPageEx handle it. */ 648 return PGMDynMapGCPageOff(pVM, GCPhys, ppv); 649 649 650 #else 650 651 int rc = pgmLock(pVM); … … 697 698 * Temporary fallback code. 698 699 */ 699 # if def IN_GC700 return PGM GCDynMapGCPageEx(pVM, GCPhys, ppv);700 # if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 701 return PGMDynMapGCPageOff(pVM, GCPhys, ppv); 701 702 # else 702 703 return PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1, ppv); … … 816 817 { 817 818 #ifdef VBOX_WITH_NEW_PHYS_CODE 818 #if def IN_GC819 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 819 820 /* currently nothing to do here. */ 820 821 /* --- postponed … … 1107 1108 * @thread EMT. 1108 1109 */ 1109 static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pb HC)1110 static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3) 1110 1111 { 1111 1112 uint32_t iCacheIndex; … … 1114 1115 1115 1116 GCPhys = PHYS_PAGE_ADDRESS(GCPhys); 1116 pb HC = (uint8_t *)PAGE_ADDRESS(pbHC);1117 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3); 1117 1118 1118 1119 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK); … … 1121 1122 1122 1123 pCache->Entry[iCacheIndex].GCPhys = GCPhys; 1123 pCache->Entry[iCacheIndex].pb HC = pbHC;1124 pCache->Entry[iCacheIndex].pbR3 = pbR3; 1124 1125 } 1125 1126 #endif … … 1219 1220 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1220 1221 { 1221 #if def IN_GC /** @todo @bugref{3202}: R0 too */1222 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1222 1223 void *pvSrc = NULL; 1223 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1224 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1224 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1225 1225 #else 1226 1226 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1268 1268 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1269 1269 { 1270 #if def IN_GC /** @todo @bugref{3202}: R0 too */1270 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1271 1271 void *pvSrc = NULL; 1272 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1273 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1272 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1274 1273 #else 1275 1274 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1298 1297 case MM_RAM_FLAGS_MMIO2: // MMIO2 isn't in the mask. 1299 1298 { 1300 #if def IN_GC /** @todo @bugref{3202}: R0 too */1299 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1301 1300 void *pvSrc = NULL; 1302 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc); 1303 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK); 1301 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc); 1304 1302 #else 1305 1303 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1522 1520 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1523 1521 { 1524 #if def IN_GC /** @todo @bugref{3202}: R0 too */1522 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1525 1523 void *pvDst = NULL; 1526 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1527 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1524 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1528 1525 #else 1529 1526 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1567 1564 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1568 1565 { 1569 #if def IN_GC /** @todo @bugref{3202}: R0 too */1566 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1570 1567 void *pvDst = NULL; 1571 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1572 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1568 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1573 1569 #else 1574 1570 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1612 1608 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 1613 1609 { 1614 #if def IN_GC /** @todo @bugref{3202}: R0 too */1610 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1615 1611 void *pvDst = NULL; 1616 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1617 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1612 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1618 1613 #else 1619 1614 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1641 1636 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */ 1642 1637 { 1643 #if def IN_GC /** @todo @bugref{3202}: R0 too */1638 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1644 1639 void *pvDst = NULL; 1645 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst); 1646 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK); 1640 PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst); 1647 1641 #else 1648 1642 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off) … … 1723 1717 } 1724 1718 1725 #if ndef IN_GC/* Ring 0 & 3 only */ /** @todo @bugref{1865,3202}: this'll be fun! */1719 #if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Ring 0 & 3 only */ /** @todo @bugref{1865,3202}: this'll be fun! */ 1726 1720 1727 1721 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r13042 r13060 87 87 88 88 89 #if def IN_GC89 #if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 90 90 /** 91 91 * Maps a pool page into the current context. … … 95 95 * @param pPage The page to map. 96 96 */ 97 void *pgm GCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage)97 void *pgmPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage) 98 98 { 99 99 /* general pages. */ 100 100 if (pPage->idx >= PGMPOOL_IDX_FIRST) 101 101 { 102 Assert(pPage->idx < pVM->pgm.s. pPoolGC->cCurPages);102 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages); 103 103 void *pv; 104 int rc = PGM GCDynMapHCPage(pVM, pPage->Core.Key, &pv);104 int rc = PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 105 105 AssertReleaseRC(rc); 106 106 return pv; … … 108 108 109 109 /* special pages. */ 110 # ifdef IN_GC 110 111 switch (pPage->idx) 111 112 { … … 127 128 return NULL; 128 129 } 129 } 130 #endif /* IN_GC */ 130 131 # else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 132 RTHCPHYS HCPhys; 133 switch (pPage->idx) 134 { 135 case PGMPOOL_IDX_PD: 136 HCPhys = pVM->pgm.s.HCPhys32BitPD; 137 break; 138 case PGMPOOL_IDX_PAE_PD: 139 case PGMPOOL_IDX_PAE_PD_0: 140 HCPhys = pVM->pgm.s.aHCPhysPaePDs[0]; 141 break; 142 case PGMPOOL_IDX_PAE_PD_1: 143 HCPhys = pVM->pgm.s.aHCPhysPaePDs[1]; 144 break; 145 case PGMPOOL_IDX_PAE_PD_2: 146 HCPhys = pVM->pgm.s.aHCPhysPaePDs[2]; 147 break; 148 case PGMPOOL_IDX_PAE_PD_3: 149 HCPhys = pVM->pgm.s.aHCPhysPaePDs[3]; 150 break; 151 case PGMPOOL_IDX_PDPT: 152 HCPhys = pVM->pgm.s.HCPhysPaePDPT; 153 break; 154 default: 155 AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx)); 156 return NULL; 157 } 158 void *pv; 159 int rc = PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 160 AssertReleaseRC(rc); 161 return pv; 162 # endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 163 } 164 #endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 131 165 132 166 … … 213 247 #ifdef IN_GC 214 248 return (const void *)((RTGCUINTPTR)pvFault & ~(RTGCUINTPTR)(cbEntry - 1)); 249 250 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 251 void *pvRet; 252 int rc = PGMDynMapGCPageOff(pPool->pVMHC, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet); 253 AssertFatalRCSuccess(rc); 254 return pvRet; 215 255 216 256 #elif defined(IN_RING0) … … 950 990 DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 951 991 { 952 STAM_PROFILE_START(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), a);953 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);992 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), a); 993 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 954 994 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 955 995 LogFlow(("pgmPoolAccessHandler: pvFault=%VGv pPage=%p:{.idx=%d} GCPhysFault=%VGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); … … 983 1023 { 984 1024 rc = pgmPoolAccessHandlerSimple(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 985 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,Handled), a);1025 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,Handled), a); 986 1026 return rc; 987 1027 } … … 1005 1045 { 1006 1046 rc = pgmPoolAccessHandlerSTOSD(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 1007 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,RepStosd), a);1047 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,RepStosd), a); 1008 1048 return rc; 1009 1049 } … … 1026 1066 if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused) 1027 1067 rc = VINF_SUCCESS; 1028 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,FlushPage), a);1068 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTXSUFF(StatMonitor), &pPool->CTXMID(StatMonitor,FlushPage), a); 1029 1069 return rc; 1030 1070 } … … 1836 1876 void pgmPoolMonitorModifiedClearAll(PVM pVM) 1837 1877 { 1838 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1839 1879 LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages)); 1840 1880 … … 1865 1905 void pgmPoolClearAll(PVM pVM) 1866 1906 { 1867 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);1907 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1868 1908 STAM_PROFILE_START(&pPool->StatClearAll, c); 1869 1909 LogFlow(("pgmPoolClearAll: cUsedPages=%d\n", pPool->cUsedPages)); … … 2375 2415 { 2376 2416 LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs)); 2377 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2417 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2378 2418 2379 2419 /* … … 2463 2503 void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs) 2464 2504 { 2465 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool); NOREF(pPool);2505 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 2466 2506 LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs)); 2467 2507 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f); … … 2481 2521 void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt) 2482 2522 { 2483 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2523 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2484 2524 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f); 2485 2525 LogFlow(("pgmPoolTrackFlushGCPhysPTs: HCPhys=%RHp iPhysExt\n", pPhysPage->HCPhys, iPhysExt)); … … 2528 2568 int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage) 2529 2569 { 2530 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2570 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2531 2571 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s); 2532 2572 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d HCPhys=%RHp\n", … … 2764 2804 PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt) 2765 2805 { 2766 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2806 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2767 2807 uint16_t iPhysExt = pPool->iPhysExtFreeHead; 2768 2808 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX) … … 2787 2827 void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt) 2788 2828 { 2789 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2829 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2790 2830 Assert(iPhysExt < pPool->cMaxPhysExts); 2791 2831 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTXSUFF(paPhysExts)[iPhysExt]; … … 2805 2845 void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt) 2806 2846 { 2807 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2847 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2808 2848 2809 2849 const uint16_t iPhysExtStart = iPhysExt; … … 2836 2876 static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT) 2837 2877 { 2838 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);2878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2839 2879 PPGMPOOLPHYSEXT paPhysExts = pPool->CTXSUFF(paPhysExts); 2840 2880 … … 3875 3915 int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage) 3876 3916 { 3877 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);3917 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3878 3918 STAM_PROFILE_ADV_START(&pPool->StatAlloc, a); 3879 3919 LogFlow(("pgmPoolAlloc: GCPhys=%VGp enmKind=%d iUser=%#x iUserTable=%#x\n", GCPhys, enmKind, iUser, iUserTable)); … … 3998 4038 { 3999 4039 LogFlow(("pgmPoolFree: HCPhys=%VHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable)); 4000 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);4040 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4001 4041 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable); 4002 4042 } … … 4014 4054 { 4015 4055 /** @todo profile this! */ 4016 PPGMPOOL pPool = pVM->pgm.s.CTX SUFF(pPool);4056 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4017 4057 PPGMPOOLPAGE pPage = pgmPoolGetPage(pPool, HCPhys); 4018 4058 Log3(("pgmPoolGetPageByHCPhys: HCPhys=%VHp -> %p:{.idx=%d .GCPhys=%VGp .enmKind=%d}\n", … … 4033 4073 { 4034 4074 LogFlow(("pgmPoolFlushAll:\n")); 4035 pgmPoolFlushAllInt(pVM->pgm.s.CTX SUFF(pPool));4036 } 4037 4075 pgmPoolFlushAllInt(pVM->pgm.s.CTX_SUFF(pPool)); 4076 } 4077 -
trunk/src/VBox/VMM/VMMGC/PGMGC.cpp
r13035 r13060 165 165 166 166 167 /**168 * Temporarily maps one guest page specified by GC physical address.169 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.170 *171 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is172 * reused after 8 mappings (or perhaps a few more if you score with the cache).173 *174 * @returns VBox status.175 * @param pVM VM handle.176 * @param GCPhys GC Physical address of the page.177 * @param ppv Where to store the address of the mapping.178 */179 VMMRCDECL(int) PGMGCDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)180 {181 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%VGp\n", GCPhys));182 183 /*184 * Get the ram range.185 */186 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesRC;187 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)188 pRam = pRam->pNextRC;189 if (!pRam)190 {191 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys));192 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;193 }194 195 /*196 * Pass it on to PGMGCDynMapHCPage.197 */198 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);199 //Log(("PGMGCDynMapGCPage: GCPhys=%VGp HCPhys=%VHp\n", GCPhys, HCPhys));200 return PGMGCDynMapHCPage(pVM, HCPhys, ppv);201 }202 203 204 /**205 * Temporarily maps one guest page specified by unaligned GC physical address.206 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.207 *208 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is209 * reused after 8 mappings (or perhaps a few more if you score with the cache).210 *211 * The caller is aware that only the speicifed page is mapped and that really bad things212 * will happen if writing beyond the page!213 *214 * @returns VBox status.215 * @param pVM VM handle.216 * @param GCPhys GC Physical address within the page to be mapped.217 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.218 */219 VMMRCDECL(int) PGMGCDynMapGCPageEx(PVM pVM, RTGCPHYS GCPhys, void **ppv)220 {221 /*222 * Get the ram range.223 */224 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesRC;225 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)226 pRam = pRam->pNextRC;227 if (!pRam)228 {229 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys));230 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;231 }232 233 /*234 * Pass it on to PGMGCDynMapHCPage.235 */236 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);237 int rc = PGMGCDynMapHCPage(pVM, HCPhys, ppv);238 if (VBOX_SUCCESS(rc))239 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));240 return rc;241 }242 243 244 /**245 * Temporarily maps one host page specified by HC physical address.246 *247 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is248 * reused after 8 mappings (or perhaps a few more if you score with the cache).249 *250 * @returns VBox status.251 * @param pVM VM handle.252 * @param HCPhys HC Physical address of the page.253 * @param ppv Where to store the address of the mapping.254 */255 VMMRCDECL(int) PGMGCDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)256 {257 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%VHp\n", HCPhys));258 259 /*260 * Check the cache.261 */262 register unsigned iCache;263 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys264 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys265 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys266 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)267 {268 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =269 {270 { 0, 5, 6, 7 },271 { 0, 1, 6, 7 },272 { 0, 1, 2, 7 },273 { 0, 1, 2, 3 },274 { 4, 1, 2, 3 },275 { 4, 5, 2, 3 },276 { 4, 5, 6, 3 },277 { 4, 5, 6, 7 },278 };279 Assert(RT_ELEMENTS(au8Trans) == 8);280 Assert(RT_ELEMENTS(au8Trans[0]) == 4);281 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];282 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);283 *ppv = pv;284 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheHits);285 //Log(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));286 return VINF_SUCCESS;287 }288 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);289 STAM_COUNTER_INC(&pVM->pgm.s.StatDynMapCacheMisses);290 291 /*292 * Update the page tables.293 */294 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;295 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);296 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);297 298 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;299 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;300 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;301 302 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);303 *ppv = pv;304 ASMInvalidatePage(pv);305 Log4(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d\n", HCPhys, pv, iPage));306 return VINF_SUCCESS;307 }308 309 167 310 168 /** -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r13046 r13060 452 452 GEN_CHECK_OFF(PGM, pRomRangesR3); 453 453 GEN_CHECK_OFF(PGM, pRomRangesR0); 454 GEN_CHECK_OFF(PGM, pRomRanges GC);454 GEN_CHECK_OFF(PGM, pRomRangesRC); 455 455 GEN_CHECK_OFF(PGM, cbRamSize); 456 456 GEN_CHECK_OFF(PGM, pTreesHC); … … 479 479 GEN_CHECK_OFF(PGM, fSyncFlags); 480 480 GEN_CHECK_OFF(PGM, CritSect); 481 #ifdef PGM_PD_CACHING_ENABLED 482 GEN_CHECK_OFF(PGM, pdcache); 483 #endif 481 GEN_CHECK_OFF(PGM, pPoolR3); 482 GEN_CHECK_OFF(PGM, pPoolR0); 483 GEN_CHECK_OFF(PGM, pPoolRC); 484 GEN_CHECK_OFF(PGM, fNoMorePhysWrites); 485 GEN_CHECK_OFF(PGM, fPhysCacheFlushPending); 484 486 GEN_CHECK_OFF(PGM, pgmphysreadcache); 485 487 GEN_CHECK_OFF(PGM, pgmphyswritecache); … … 582 584 GEN_CHECK_OFF(PGMROMRANGE, pNextR3); 583 585 GEN_CHECK_OFF(PGMROMRANGE, pNextR0); 584 GEN_CHECK_OFF(PGMROMRANGE, pNext GC);586 GEN_CHECK_OFF(PGMROMRANGE, pNextRC); 585 587 GEN_CHECK_OFF(PGMROMRANGE, GCPhys); 586 588 GEN_CHECK_OFF(PGMROMRANGE, GCPhysLast);
Note:
See TracChangeset
for help on using the changeset viewer.