Changeset 104840 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Jun 5, 2024 12:59:51 AM (8 months ago)
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IOMInternal.h
r103152 r104840 275 275 bool volatile fMapped; 276 276 /** Set if there is an ring-0 entry too. */ 277 bool fRing0 ;277 bool fRing0 : 1; 278 278 /** Set if there is an raw-mode entry too. */ 279 bool fRawMode; 280 uint8_t bPadding; 279 bool fRawMode : 1; 280 bool fPadding : 6; 281 /** Pre-registered ad-hoc RAM range ID. */ 282 uint16_t idRamRange; 281 283 /** Same as the handle index. */ 282 284 uint16_t idxSelf; -
trunk/src/VBox/VMM/include/PGMInternal.h
r100998 r104840 114 114 # define PGM_SYNC_NR_PAGES 8 115 115 #endif 116 117 /** Maximum number of RAM ranges. 118 * @note This can be increased to 4096 (at least when targeting x86). */ 119 #define PGM_MAX_RAM_RANGES 3072 120 121 /** Maximum pages per RAM range. 122 * 123 * The PGMRAMRANGE structures for the high memory can get very big. There 124 * used to be some limitations on SUPR3PageAllocEx allocation sizes, so 125 * traditionally we limited this to 16MB chunks. These days we do ~64 MB 126 * chunks each covering 16GB of guest RAM, making sure each range is a 127 * multiple of 1GB to enable eager hosts to use 1GB pages for NEM mode. 128 * 129 * See also pgmPhysMmio2CalcChunkCount. 130 */ 131 #define PGM_MAX_PAGES_PER_RAM_RANGE _4M 132 #if defined(X86_PD_PAE_SHIFT) && defined(AssertCompile) 133 AssertCompile(RT_ALIGN_32(PGM_MAX_PAGES_PER_RAM_RANGE, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */ 134 #endif 135 136 /** The maximum number of MMIO2 ranges. */ 137 #define PGM_MAX_MMIO2_RANGES 32 138 /** The maximum number of pages in a MMIO2 PCI region. 139 * 140 * The memory for a MMIO2 PCI region is a single chunk of host virtual memory, 141 * but may be handled internally by PGM as a set of multiple MMIO2/RAM ranges, 142 * since PGM_MAX_PAGES_PER_RAM_RANGE is currently lower than this value (4 GiB 143 * vs 16 GiB). 144 */ 145 #define PGM_MAX_PAGES_PER_MMIO2_REGION _16M 146 147 /** Maximum number of ROM ranges. */ 148 #define PGM_MAX_ROM_RANGES 16 149 /** The maximum pages per ROM range. 150 * Currently 512K pages, or 2GB with 4K pages. */ 151 #define PGM_MAX_PAGES_PER_ROM_RANGE _512K 152 AssertCompile(PGM_MAX_PAGES_PER_ROM_RANGE <= PGM_MAX_PAGES_PER_RAM_RANGE); 116 153 117 154 /** … … 1289 1326 1290 1327 /** 1291 * RAM range for GC Phys to HC Phys conversion. 1292 * 1293 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys 1294 * conversions too, but we'll let MM handle that for now. 1295 * 1296 * This structure is used by linked lists in both GC and HC. 1328 * RAM range lookup table entry. 1329 */ 1330 typedef union PGMRAMRANGELOOKUPENTRY 1331 { 1332 RT_GCC_EXTENSION struct 1333 { 1334 /** Page aligned start address of the range, with page offset holding the ID. */ 1335 RTGCPHYS GCPhysFirstAndId; 1336 /** The last address in the range (inclusive). Page aligned (-1). */ 1337 RTGCPHYS GCPhysLast; 1338 }; 1339 /** Alternative 128-bit view for atomic updating. */ 1340 RTUINT128U volatile u128Volatile; 1341 /** Alternative 128-bit view for atomic updating. */ 1342 RTUINT128U u128Normal; 1343 } PGMRAMRANGELOOKUPENTRY; 1344 /** Pointer to a lookup table entry. */ 1345 typedef PGMRAMRANGELOOKUPENTRY *PPGMRAMRANGELOOKUPENTRY; 1346 1347 /** Extracts the ID from PGMRAMRANGELOOKUPENTRY::GCPhysFirstAndId. */ 1348 #define PGMRAMRANGELOOKUPENTRY_GET_ID(a_LookupEntry) ((uint32_t)((a_LookupEntry).GCPhysFirstAndId & GUEST_PAGE_OFFSET_MASK)) 1349 /** Extracts the GCPhysFirst from PGMRAMRANGELOOKUPENTRY::GCPhysFirstAndId. */ 1350 #define PGMRAMRANGELOOKUPENTRY_GET_FIRST(a_LookupEntry) (((a_LookupEntry).GCPhysFirstAndId) & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) 1351 1352 1353 /** 1354 * RAM range for GC Phys to HC Phys & R3 Ptr conversion. 1355 * 1356 * This structure is addressed via context specific pointer tables. Lookup is 1357 * organized via the lookup table (PGMRAMRANGELOOKUPENTRY). 1297 1358 */ 1298 1359 typedef struct PGMRAMRANGE … … 1300 1361 /** Start of the range. Page aligned. */ 1301 1362 RTGCPHYS GCPhys; 1302 /** Size of the range. (Page aligned of course). */ 1363 /** Size of the range. (Page aligned of course). 1364 * Ring-0 duplicates this in a PGMR0PERVM::acRamRangePages (shifted by 1365 * guest page size). */ 1303 1366 RTGCPHYS cb; 1304 /** Pointer to the next RAM range - for R3. */1305 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;1306 /** Pointer to the next RAM range - for R0. */1307 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;1308 1367 /** PGM_RAM_RANGE_FLAGS_* flags. */ 1309 1368 uint32_t fFlags; … … 1313 1372 RTGCPHYS GCPhysLast; 1314 1373 /** Start of the HC mapping of the range. This is only used for MMIO2 and in NEM mode. */ 1315 R3PTRTYPE(void *) pvR3; 1374 R3PTRTYPE(uint8_t *) pbR3; 1375 /** The RAM range identifier (index into the pointer table). */ 1376 uint32_t idRange; 1377 #if HC_ARCH_BITS != 32 1378 /** Padding to make aPage aligned on sizeof(PGMPAGE). */ 1379 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 0 : 1]; 1380 #endif 1316 1381 /** Live save per page tracking data. */ 1317 1382 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages; 1318 1383 /** The range description. */ 1319 1384 R3PTRTYPE(const char *) pszDesc; 1320 /** Pointer to self - R0 pointer. */ 1321 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0; 1322 1323 /** Pointer to the left search three node - ring-3 context. */ 1324 R3PTRTYPE(struct PGMRAMRANGE *) pLeftR3; 1325 /** Pointer to the right search three node - ring-3 context. */ 1326 R3PTRTYPE(struct PGMRAMRANGE *) pRightR3; 1327 /** Pointer to the left search three node - ring-0 context. */ 1328 R0PTRTYPE(struct PGMRAMRANGE *) pLeftR0; 1329 /** Pointer to the right search three node - ring-0 context. */ 1330 R0PTRTYPE(struct PGMRAMRANGE *) pRightR0; 1331 1332 /** Padding to make aPage aligned on sizeof(PGMPAGE). */ 1333 #if HC_ARCH_BITS == 32 1334 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 2 : 0]; 1335 #endif 1385 1336 1386 /** Array of physical guest page tracking structures. 1337 1387 * @note Number of entries is PGMRAMRANGE::cb / GUEST_PAGE_SIZE. */ 1338 1388 PGMPAGE aPages[1]; 1339 1389 } PGMRAMRANGE; 1390 AssertCompileMemberAlignment(PGMRAMRANGE, aPages, 16); 1340 1391 /** Pointer to RAM range for GC Phys to HC Phys conversion. */ 1341 1392 typedef PGMRAMRANGE *PPGMRAMRANGE; … … 1343 1394 /** @name PGMRAMRANGE::fFlags 1344 1395 * @{ */ 1345 /** The RAM range is floating around as an independent guest mapping. */1346 #define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)1347 1396 /** Ad hoc RAM range for an ROM mapping. */ 1348 1397 #define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21) … … 1351 1400 /** Ad hoc RAM range for an MMIO2 or pre-registered MMIO mapping. */ 1352 1401 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX RT_BIT(23) 1402 /** Valid RAM range flags. */ 1403 #define PGM_RAM_RANGE_FLAGS_VALID_MASK (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX) 1353 1404 /** @} */ 1354 1405 … … 1377 1428 */ 1378 1429 #define PGM_RAMRANGE_CALC_PAGE_R3PTR(a_pRam, a_GCPhysPage) \ 1379 ( (a_pRam)->p vR3 ? (R3PTRTYPE(uint8_t *))(a_pRam)->pvR3 + (a_GCPhysPage) - (a_pRam)->GCPhys : NULL )1430 ( (a_pRam)->pbR3 ? (a_pRam)->pbR3 + (a_GCPhysPage) - (a_pRam)->GCPhys : NULL ) 1380 1431 1381 1432 … … 1427 1478 typedef struct PGMROMRANGE 1428 1479 { 1429 /** Pointer to the next range - R3. */1430 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;1431 /** Pointer to the next range - R0. */1432 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;1433 /** Pointer to the this range - R0. */1434 R0PTRTYPE(struct PGMROMRANGE *) pSelfR0;1435 1480 /** Address of the range. */ 1436 1481 RTGCPHYS GCPhys; … … 1443 1488 /** The saved state range ID. */ 1444 1489 uint8_t idSavedState; 1445 /** Alignment padding. */ 1446 uint8_t au8Alignment[2]; 1490 /** The ID of the associated RAM range. */ 1491 #ifdef IN_RING0 1492 volatile 1493 #endif 1494 uint16_t idRamRange; 1447 1495 /** The size bits pvOriginal points to. */ 1448 1496 uint32_t cbOriginal; … … 1460 1508 R3PTRTYPE(uint8_t *) pbR3Alternate; 1461 1509 RTR3PTR pvAlignment2; 1510 #else 1511 RTR3PTR apvUnused[2]; 1462 1512 #endif 1463 1513 /** The per page tracking structures. */ … … 1524 1574 typedef struct PGMREGMMIO2RANGE 1525 1575 { 1526 /** The owner of the range . (a device)*/1576 /** The owner of the range (a device). */ 1527 1577 PPDMDEVINSR3 pDevInsR3; 1528 1578 /** Pointer to the ring-3 mapping of the allocation. */ 1529 RTR3PTR pvR3; 1530 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1531 /** Pointer to the ring-0 mapping of the allocation. */ 1532 RTR0PTR pvR0; 1533 #endif 1534 /** Pointer to the next range - R3. */ 1535 R3PTRTYPE(struct PGMREGMMIO2RANGE *) pNextR3; 1579 R3PTRTYPE(uint8_t *) pbR3; 1536 1580 /** Flags (PGMREGMMIO2RANGE_F_XXX). */ 1537 1581 uint16_t fFlags; … … 1544 1588 /** MMIO2 range identifier, for page IDs (PGMPAGE::s.idPage). */ 1545 1589 uint8_t idMmio2; 1546 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */ 1547 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1548 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 4 : 2]; 1549 #else 1550 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 8 : 2 + 8]; 1551 #endif 1590 /** The ID of the associated RAM range. */ 1591 #ifdef IN_RING0 1592 volatile 1593 #endif 1594 uint16_t idRamRange; 1595 /** The mapping address if mapped, NIL_RTGCPHYS if not. */ 1596 RTGCPHYS GCPhys; 1552 1597 /** The real size. 1553 1598 * This may be larger than indicated by RamRange.cb if the range has been … … 1560 1605 /** Live save per page tracking data for MMIO2. */ 1561 1606 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages; 1562 /** The associated RAM range. */ 1563 PGMRAMRANGE RamRange; 1607 RTR3PTR R3PtrPadding; 1564 1608 } PGMREGMMIO2RANGE; 1565 AssertCompileMemberAlignment(PGMREGMMIO2RANGE, RamRange, 16);1566 1609 /** Pointer to a MMIO2 or pre-registered MMIO range. */ 1567 1610 typedef PGMREGMMIO2RANGE *PPGMREGMMIO2RANGE; … … 1588 1631 1589 1632 1590 /** @name Internal MMIO2 constants.1633 /** @name Internal MMIO2 macros. 1591 1634 * @{ */ 1592 /** The maximum number of MMIO2 ranges. */1593 #define PGM_MMIO2_MAX_RANGES 321594 /** The maximum number of pages in a MMIO2 range. */1595 #define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x01000000)1596 1635 /** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */ 1597 1636 #define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage) ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) ) … … 2948 2987 uint8_t abMmioPg[RT_MAX(HOST_PAGE_SIZE, GUEST_PAGE_SIZE)]; 2949 2988 2989 /** @name RAM, MMIO2 and ROM ranges 2990 * @{ */ 2991 /** The RAM range lookup table. */ 2992 PGMRAMRANGELOOKUPENTRY aRamRangeLookup[PGM_MAX_RAM_RANGES]; 2993 /** The ring-3 RAM range pointer table. */ 2994 R3PTRTYPE(PPGMRAMRANGE) apRamRanges[PGM_MAX_RAM_RANGES]; 2995 /** MMIO2 ranges. Indexed by idMmio2 minus 1. */ 2996 PGMREGMMIO2RANGE aMmio2Ranges[PGM_MAX_MMIO2_RANGES]; 2997 /** The ring-3 RAM range pointer table running parallel to aMmio2Ranges. */ 2998 R3PTRTYPE(PPGMRAMRANGE) apMmio2RamRanges[PGM_MAX_MMIO2_RANGES]; 2999 /** The ring-3 ROM range pointer table. */ 3000 R3PTRTYPE(PPGMROMRANGE) apRomRanges[PGM_MAX_ROM_RANGES]; 3001 /** Union of generation ID and lookup count. */ 3002 union PGMRAMRANGEGENANDLOOKUPCOUNT 3003 { 3004 /* Combined view of both the generation ID and the count for atomic updating/reading. */ 3005 uint64_t volatile u64Combined; 3006 RT_GCC_EXTENSION struct 3007 { 3008 /** Generation ID for the RAM ranges. 3009 * This member is incremented twice everytime a RAM range is mapped or 3010 * unmapped, so odd numbers means aRamRangeLookup is being modified and even 3011 * means the update has completed. */ 3012 uint32_t volatile idGeneration; 3013 /** The number of active entries in aRamRangeLookup. */ 3014 uint32_t volatile cLookupEntries; 3015 }; 3016 } RamRangeUnion; 3017 /** The max RAM range ID (mirroring PGMR0PERVM::idRamRangeMax). */ 3018 uint32_t idRamRangeMax; 3019 /** The number of MMIO2 ranges (serves as the next MMIO2 ID). */ 3020 uint8_t cMmio2Ranges; 3021 /** The number of ROM ranges. */ 3022 uint8_t cRomRanges; 3023 uint8_t abAlignment1[2]; 3024 /** @} */ 3025 2950 3026 /** @name The zero page (abPagePg). 2951 3027 * @{ */ … … 3005 3081 * Whether PCI passthrough is enabled. */ 3006 3082 bool fPciPassthrough; 3007 /** The number of MMIO2 regions (serves as the next MMIO2 ID). */3008 uint8_t cMmio2Regions;3009 3083 /** Restore original ROM page content when resetting after loading state. 3010 3084 * The flag is set by pgmR3LoadRomRanges and cleared at reset. This … … 3018 3092 /** Alignment padding. */ 3019 3093 #ifndef VBOX_WITH_PGM_NEM_MODE 3020 bool afAlignment3[1]; 3094 bool afAlignment2[2]; 3095 #else 3096 bool afAlignment2[1]; 3021 3097 #endif 3022 3098 /** The host paging mode. (This is what SUPLib reports.) */ 3023 3099 SUPPAGINGMODE enmHostMode; 3024 bool afAlignment3b[2];3025 3026 /** Generation ID for the RAM ranges. This member is incremented everytime3027 * a RAM range is linked or unlinked. */3028 uint32_t volatile idRamRangesGen;3029 3100 3030 3101 /** Physical access handler type for ROM protection. */ … … 3042 3113 /** RAM range TLB for R3. */ 3043 3114 R3PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR3[PGM_RAMRANGE_TLB_ENTRIES]; 3044 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.3045 * This is sorted by physical address and contains no overlapping ranges. */3046 R3PTRTYPE(PPGMRAMRANGE) pRamRangesXR3;3047 /** Root of the RAM range search tree for ring-3. */3048 R3PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR3;3049 3115 /** Shadow Page Pool - R3 Ptr. */ 3050 3116 R3PTRTYPE(PPGMPOOL) pPoolR3; … … 3052 3118 * This is sorted by physical address and contains no overlapping ranges. */ 3053 3119 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3; 3054 /** Pointer to the list of MMIO2 ranges - for R3.3055 * Registration order. */3056 R3PTRTYPE(PPGMREGMMIO2RANGE) pRegMmioRangesR3;3057 /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */3058 R3PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];3059 3120 3060 3121 /** RAM range TLB for R0. */ 3061 3122 R0PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR0[PGM_RAMRANGE_TLB_ENTRIES]; 3062 /** R0 pointer corresponding to PGM::pRamRangesXR3. */3063 R0PTRTYPE(PPGMRAMRANGE) pRamRangesXR0;3064 /** Root of the RAM range search tree for ring-0. */3065 R0PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR0;3066 3123 /** Shadow Page Pool - R0 Ptr. */ 3067 3124 R0PTRTYPE(PPGMPOOL) pPoolR0; 3068 3125 /** R0 pointer corresponding to PGM::pRomRangesR3. */ 3069 3126 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0; 3070 /** MMIO2 lookup array for ring-0. Indexed by idMmio2 minus 1. */3071 R0PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];3072 3127 3073 3128 /** Hack: Number of deprecated page mapping locks taken by the current lock … … 3158 3213 /** Number of repeated long allocation times. */ 3159 3214 uint32_t cLargePageLongAllocRepeats; 3160 uint32_t uPadding 5;3215 uint32_t uPadding4; 3161 3216 3162 3217 /** … … 3254 3309 #ifndef IN_TSTVMSTRUCTGC /* HACK */ 3255 3310 AssertCompileMemberAlignment(PGM, CritSectX, 8); 3311 AssertCompileMemberAlignment(PGM, CritSectX, 16); 3312 AssertCompileMemberAlignment(PGM, CritSectX, 32); 3313 AssertCompileMemberAlignment(PGM, CritSectX, 64); 3256 3314 AssertCompileMemberAlignment(PGM, ChunkR3Map, 16); 3257 AssertCompileMemberAlignment(PGM, PhysTlbR3, 32); /** @todo 32 byte alignment! */ 3315 AssertCompileMemberAlignment(PGM, PhysTlbR3, 8); 3316 AssertCompileMemberAlignment(PGM, PhysTlbR3, 16); 3317 AssertCompileMemberAlignment(PGM, PhysTlbR3, 32); 3258 3318 AssertCompileMemberAlignment(PGM, PhysTlbR0, 32); 3259 3319 AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8); … … 3688 3748 typedef struct PGMR0PERVM 3689 3749 { 3750 /** @name RAM ranges 3751 * @{ */ 3752 /** The ring-0 RAM range pointer table. */ 3753 R0PTRTYPE(PPGMRAMRANGE) apRamRanges[PGM_MAX_RAM_RANGES]; 3754 /** Trusted RAM range page counts running parallel to apRamRanges. 3755 * This keeps the original page count when a range is reduced, 3756 * only the PGMRAMRANGE::cb member is changed then. */ 3757 uint32_t acRamRangePages[PGM_MAX_RAM_RANGES]; 3758 /** The memory objects for the RAM ranges (parallel to apRamRanges). */ 3759 RTR0MEMOBJ ahRamRangeMemObjs[PGM_MAX_RAM_RANGES]; 3760 /** The ring-3 mapping objects for the RAM ranges (parallel to apRamRanges). */ 3761 RTR0MEMOBJ ahRamRangeMapObjs[PGM_MAX_RAM_RANGES]; 3762 /** The max RAM range ID (safe). */ 3763 uint32_t idRamRangeMax; 3764 uint8_t abAlignment1[64 - sizeof(uint32_t)]; 3765 /** @} */ 3766 3767 /** @name MMIO2 ranges 3768 * @{ */ 3769 /** The ring-0 RAM range pointer table running parallel to aMmio2Ranges. */ 3770 R0PTRTYPE(PPGMRAMRANGE) apMmio2RamRanges[PGM_MAX_MMIO2_RANGES]; 3771 /** The memory objects for the MMIO2 backing memory (parallel to 3772 * apMmio2RamRanges). */ 3773 RTR0MEMOBJ ahMmio2MemObjs[PGM_MAX_MMIO2_RANGES]; 3774 /** The ring-3 mapping objects for the MMIO2 backing memory (parallel 3775 * to apMmio2RamRanges & ahMmio2MemObjs). */ 3776 RTR0MEMOBJ ahMmio2MapObjs[PGM_MAX_MMIO2_RANGES]; 3777 /** Trusted MMIO2 range sizes (count of guest pages). 3778 * This keeps the original page count when a range is reduced, 3779 * only the PGMRAMRANGE::cb member is changed then. */ 3780 uint32_t acMmio2RangePages[PGM_MAX_MMIO2_RANGES]; 3781 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 3782 /** Pointer to the ring-0 mapping of the MMIO2 backings (parallel to 3783 * apMmio2RamRanges). */ 3784 R0PTRTYPE(uint8_t *) apbMmio2Backing[PGM_MAX_MMIO2_RANGES]; 3785 #endif 3786 /** @} */ 3787 3788 /** @name ROM ranges 3789 * @{ */ 3790 /** The ring-0 ROM range pointer table. */ 3791 R0PTRTYPE(PPGMROMRANGE) apRomRanges[PGM_MAX_ROM_RANGES]; 3792 /** The memory objects for each ROM range (parallel to apRomRanges). */ 3793 RTR0MEMOBJ ahRomRangeMemObjs[PGM_MAX_ROM_RANGES]; 3794 /** The ring-3 mapping objects for each ROM range (parallel to apRomRanges 3795 * & ahRamRangeMemObjs). */ 3796 RTR0MEMOBJ ahRomRangeMapObjs[PGM_MAX_ROM_RANGES]; 3797 /** Trusted ROM range sizes (count of guest pages). */ 3798 uint32_t acRomRangePages[PGM_MAX_ROM_RANGES]; 3799 /** @} */ 3800 3690 3801 /** @name PGM Pool related stuff. 3691 3802 * @{ */ … … 3788 3899 DECLCALLBACK(FNPGMRZPHYSPFHANDLER) pgmPhysMmio2WritePfHandler; 3789 3900 #endif 3901 DECLHIDDEN(uint16_t) pgmPhysMmio2CalcChunkCount(RTGCPHYS cb, uint32_t *pcPagesPerChunk); 3902 DECLHIDDEN(int) pgmPhysMmio2RegisterWorker(PVMCC pVM, uint32_t const cGuestPages, uint8_t const idMmio2, 3903 const uint8_t cChunks, PPDMDEVINSR3 const pDevIns, uint8_t 3904 const iSubDev, uint8_t const iRegion, uint32_t const fFlags); 3905 DECLHIDDEN(int) pgmPhysMmio2DeregisterWorker(PVMCC pVM, uint8_t idMmio2, uint8_t cChunks, PPDMDEVINSR3 pDevIns); 3790 3906 int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys, 3791 3907 PGMPAGETYPE enmNewType); 3908 #ifdef VBOX_STRICT 3909 DECLHIDDEN(bool) pgmPhysAssertRamRangesLocked(PVMCC pVM, bool fInUpdate, bool fRamRelaxed); 3910 #endif 3792 3911 void pgmPhysInvalidRamRangeTlbs(PVMCC pVM); 3793 3912 void pgmPhysInvalidatePageMapTLB(PVMCC pVM); 3794 3913 void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys); 3795 PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys); 3796 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys); 3797 PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys); 3798 int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage); 3799 int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam); 3914 PPGMRAMRANGE pgmPhysGetRangeSlow(PVMCC pVM, RTGCPHYS GCPhys); 3915 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVMCC pVM, RTGCPHYS GCPhys); 3916 PPGMPAGE pgmPhysGetPageSlow(PVMCC pVM, RTGCPHYS GCPhys); 3917 int pgmPhysGetPageExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage); 3918 int pgmPhysGetPageAndRangeExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam); 3919 DECLHIDDEN(int) pgmPhysRamRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint32_t fFlags, uint32_t *pidNewRange); 3920 DECLHIDDEN(int) pgmPhysRomRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint8_t idRomRange, uint32_t fFlags); 3800 3921 #ifdef VBOX_WITH_NATIVE_NEM 3801 3922 void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State); … … 3803 3924 3804 3925 #ifdef IN_RING3 3805 void pgmR3PhysRelinkRamRanges(PVM pVM);3806 3926 int pgmR3PhysRamPreAllocate(PVM pVM); 3807 3927 int pgmR3PhysRamReset(PVM pVM);
Note:
See TracChangeset
for help on using the changeset viewer.