Changeset 91016 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 31, 2021 1:23:53 AM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 deleted
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r89974 r91016 1794 1794 } 1795 1795 1796 # if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))1796 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */ 1797 1797 /* 1798 1798 * Try do a direct read using the pbMappingR3 pointer. -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r87134 r91016 257 257 if (pLookup->u.Locked.pvR0) 258 258 return (RTR0PTR)((RTR0UINTPTR)pLookup->u.Locked.pvR0 + off); 259 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE260 AssertMsg(VM_IS_RAW_MODE_ENABLED(pVM), ("%s\n", R3STRING(pLookup->pszDesc)));261 #else262 259 AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc))); NOREF(pVM); 263 #endif264 260 return NIL_RTR0PTR; 265 261 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r91014 r91016 2315 2315 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS; 2316 2316 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR; 2317 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2318 2317 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR; 2319 # endif2320 2318 2321 2319 PGM_UNLOCK(pVM); … … 2407 2405 /* Force lazy remapping if it changed in any way. */ 2408 2406 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 2409 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2410 2407 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 2411 #endif2412 2408 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 2413 2409 } -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r91014 r91016 4372 4372 4373 4373 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 4374 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4375 4374 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 4376 # endif4377 4375 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 4378 4376 } … … 4491 4489 #if PGM_GST_TYPE == PGM_TYPE_32BIT 4492 4490 pVCpu->pgm.s.pGst32BitPdR3 = 0; 4493 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4494 4491 pVCpu->pgm.s.pGst32BitPdR0 = 0; 4495 # endif4496 4492 4497 4493 #elif PGM_GST_TYPE == PGM_TYPE_PAE 4498 4494 pVCpu->pgm.s.pGstPaePdptR3 = 0; 4499 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4500 4495 pVCpu->pgm.s.pGstPaePdptR0 = 0; 4501 # endif4502 4496 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 4503 4497 { 4504 4498 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 4505 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4506 4499 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 4507 # endif4508 4500 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 4509 4501 } … … 4511 4503 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 4512 4504 pVCpu->pgm.s.pGstAmd64Pml4R3 = 0; 4513 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4514 4505 pVCpu->pgm.s.pGstAmd64Pml4R0 = 0; 4515 # endif4516 4506 4517 4507 #else /* prot/real mode stub */ -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r90379 r91016 1450 1450 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1451 1451 1452 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE1453 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5);1454 bool const fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);1455 #endif1456 1457 1452 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1458 1453 rc = g_HmR0Ops.pfnEnterSession(pVCpu); … … 1463 1458 rc = g_HmR0Ops.pfnExportHostState(pVCpu); 1464 1459 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID, rc); 1465 1466 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE1467 if (fStartedSet)1468 PGMRZDynMapReleaseAutoSet(pVCpu);1469 #endif1470 1460 } 1471 1461 return rc; … … 1557 1547 #endif 1558 1548 1559 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE1560 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_4);1561 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1562 PGMRZDynMapStartAutoSet(pVCpu);1563 #endif1564 1565 1549 VBOXSTRICTRC rcStrict = g_HmR0Ops.pfnRunGuestCode(pVCpu); 1566 1567 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE1568 PGMRZDynMapReleaseAutoSet(pVCpu);1569 #endif1570 1550 return VBOXSTRICTRC_VAL(rcStrict); 1571 1551 } -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r91014 r91016 90 90 VMMR0_INT_DECL(int) PGMR0InitVM(PGVM pGVM) 91 91 { 92 int rc = VINF_SUCCESS;93 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE94 rc = PGMR0DynMapInitVM(pGVM);95 #endif96 92 RT_NOREF(pGVM); 97 return rc; 93 /* Was used for DynMap init */ 94 return VINF_SUCCESS; 98 95 } 99 96 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r91015 r91016 168 168 if (RT_SUCCESS(rc)) 169 169 { 170 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 171 rc = PGMR0DynMapInit(); 172 #endif 170 rc = IntNetR0Init(); 173 171 if (RT_SUCCESS(rc)) 174 172 { 175 rc = IntNetR0Init(); 173 #ifdef VBOX_WITH_PCI_PASSTHROUGH 174 rc = PciRawR0Init(); 175 #endif 176 176 if (RT_SUCCESS(rc)) 177 177 { 178 #ifdef VBOX_WITH_PCI_PASSTHROUGH 179 rc = PciRawR0Init(); 180 #endif 178 rc = CPUMR0ModuleInit(); 181 179 if (RT_SUCCESS(rc)) 182 180 { 183 rc = CPUMR0ModuleInit(); 181 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 182 rc = vmmR0TripleFaultHackInit(); 184 183 if (RT_SUCCESS(rc)) 184 #endif 185 185 { 186 if (RT_SUCCESS(rc)) 187 { 188 LogFlow(("ModuleInit: returns success\n")); 189 return VINF_SUCCESS; 190 } 191 } 192 193 /* 194 * Bail out. 195 */ 186 196 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 187 rc = vmmR0TripleFaultHackInit(); 188 if (RT_SUCCESS(rc)) 189 #endif 190 { 191 if (RT_SUCCESS(rc)) 192 { 193 LogFlow(("ModuleInit: returns success\n")); 194 return VINF_SUCCESS; 195 } 196 } 197 198 /* 199 * Bail out. 200 */ 201 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 202 vmmR0TripleFaultHackTerm(); 203 #endif 204 } 205 else 206 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc)); 207 #ifdef VBOX_WITH_PCI_PASSTHROUGH 208 PciRawR0Term(); 197 vmmR0TripleFaultHackTerm(); 209 198 #endif 210 199 } 211 200 else 212 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc)); 213 IntNetR0Term(); 201 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc)); 202 #ifdef VBOX_WITH_PCI_PASSTHROUGH 203 PciRawR0Term(); 204 #endif 214 205 } 215 206 else 216 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc)); 217 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 218 PGMR0DynMapTerm(); 219 #endif 207 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc)); 208 IntNetR0Term(); 220 209 } 221 210 else 222 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));211 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc)); 223 212 PGMDeregisterStringFormatTypes(); 224 213 } … … 271 260 * PGM (Darwin), HM and PciRaw global cleanup. 272 261 */ 273 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE274 PGMR0DynMapTerm();275 #endif276 262 #ifdef VBOX_WITH_PCI_PASSTHROUGH 277 263 PciRawR0Term(); … … 561 547 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu]) 562 548 * here to make sure we don't leak any shared pages if we crash... */ 563 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE564 PGMR0DynMapTermVM(pGVM);565 #endif566 549 HMR0TermVM(pGVM); 567 550 } -
trunk/src/VBox/VMM/VMMR3/MMHyper.cpp
r90991 r91016 1006 1006 } 1007 1007 1008 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE1009 /*1010 * Set MMHYPER_AONR_FLAGS_KERNEL_MAPPING if we're in going to execute in ring-0.1011 */1012 if (VM_IS_HM_OR_NEM_ENABLED(pVM))1013 fFlags |= MMHYPER_AONR_FLAGS_KERNEL_MAPPING;1014 #endif1015 1016 1008 /* 1017 1009 * Validate alignment. -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r90992 r91016 799 799 pPGM->pGstPaePdptR3 = NULL; 800 800 pPGM->pGstAmd64Pml4R3 = NULL; 801 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE802 801 pPGM->pGst32BitPdR0 = NIL_RTR0PTR; 803 802 pPGM->pGstPaePdptR0 = NIL_RTR0PTR; 804 803 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR; 805 #endif806 804 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.apGstPaePDsR3); i++) 807 805 { 808 806 pPGM->apGstPaePDsR3[i] = NULL; 809 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE810 807 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR; 811 #endif812 808 pPGM->aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 813 809 pPGM->aGstPaePdpeRegs[i].u = UINT64_MAX; -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r90794 r91016 2562 2562 2563 2563 LOG_PGMCPU_MEMBER("p", pGst32BitPdR3); 2564 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2565 2564 LOG_PGMCPU_MEMBER("p", pGst32BitPdR0); 2566 # endif2567 2565 LOG_PGMCPU_MEMBER("RRv", pGst32BitPdRC); 2568 2566 LOG_PGMCPU_MEMBER("#RX32", fGst32BitMbzBigPdeMask); … … 2570 2568 2571 2569 LOG_PGMCPU_MEMBER("p", pGstPaePdptR3); 2572 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2573 2570 LOG_PGMCPU_MEMBER("p", pGstPaePdptR0); 2574 # endif2575 2571 LOG_PGMCPU_MEMBER("RRv", pGstPaePdptRC); 2576 2572 LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[0]); … … 2578 2574 LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[2]); 2579 2575 LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[3]); 2580 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2581 2576 LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[0]); 2582 2577 LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[1]); 2583 2578 LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[2]); 2584 2579 LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[3]); 2585 # endif2586 2580 LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[0]); 2587 2581 LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[1]); … … 2607 2601 2608 2602 LOG_PGMCPU_MEMBER("p", pGstAmd64Pml4R3); 2609 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2610 2603 LOG_PGMCPU_MEMBER("p", pGstAmd64Pml4R0); 2611 # endif2612 2604 LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPteMask); 2613 2605 LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPdeMask); -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r91014 r91016 4753 4753 pPGM->pGstPaePdptR3 = NULL; 4754 4754 pPGM->pGstAmd64Pml4R3 = NULL; 4755 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4756 4755 pPGM->pGst32BitPdR0 = NIL_RTR0PTR; 4757 4756 pPGM->pGstPaePdptR0 = NIL_RTR0PTR; 4758 4757 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR; 4759 #endif4760 4758 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++) 4761 4759 { 4762 pPGM->apGstPaePDsR3[i] = NULL; 4763 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 4764 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR; 4765 #endif 4760 pPGM->apGstPaePDsR3[i] = NULL; 4761 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR; 4766 4762 } 4767 4763 -
trunk/src/VBox/VMM/include/IEMInternal.h
r89975 r91016 268 268 uint64_t GCPhys; 269 269 /** Pointer to the ring-3 mapping (possibly also valid in ring-0). */ 270 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE271 270 R3PTRTYPE(uint8_t *) pbMappingR3; 272 #else273 R3R0PTRTYPE(uint8_t *) pbMappingR3;274 #endif275 271 #if HC_ARCH_BITS == 32 276 272 uint32_t u32Padding1; -
trunk/src/VBox/VMM/include/PGMInternal.h
r91014 r91016 1738 1738 #define PGM_PAGER0MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER0MAPTLB_ENTRIES - 1) ) 1739 1739 /** @} */ 1740 1741 1742 /**1743 * Raw-mode context dynamic mapping cache entry.1744 *1745 * Because of raw-mode context being reloctable and all relocations are applied1746 * in ring-3, this has to be defined here and be RC specific.1747 *1748 * @sa PGMRZDYNMAPENTRY, PGMR0DYNMAPENTRY.1749 */1750 typedef struct PGMRCDYNMAPENTRY1751 {1752 /** The physical address of the currently mapped page.1753 * This is duplicate for three reasons: cache locality, cache policy of the PT1754 * mappings and sanity checks. */1755 RTHCPHYS HCPhys;1756 /** Pointer to the page. */1757 RTRCPTR pvPage;1758 /** The number of references. */1759 int32_t volatile cRefs;1760 /** PTE pointer union. */1761 struct PGMRCDYNMAPENTRY_PPTE1762 {1763 /** PTE pointer, 32-bit legacy version. */1764 RCPTRTYPE(PX86PTE) pLegacy;1765 /** PTE pointer, PAE version. */1766 RCPTRTYPE(PX86PTEPAE) pPae;1767 } uPte;1768 } PGMRCDYNMAPENTRY;1769 /** Pointer to a dynamic mapping cache entry for the raw-mode context. */1770 typedef PGMRCDYNMAPENTRY *PPGMRCDYNMAPENTRY;1771 1772 1773 /**1774 * Dynamic mapping cache for the raw-mode context.1775 *1776 * This is initialized during VMMRC init based upon the pbDynPageMapBaseGC and1777 * paDynPageMap* PGM members. However, it has to be defined in PGMInternal.h1778 * so that we can perform relocations from PGMR3Relocate. This has the1779 * consequence that we must have separate ring-0 and raw-mode context versions1780 * of this struct even if they share the basic elements.1781 *1782 * @sa PPGMRZDYNMAP, PGMR0DYNMAP.1783 */1784 typedef struct PGMRCDYNMAP1785 {1786 /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */1787 uint32_t u32Magic;1788 /** Array for tracking and managing the pages. */1789 RCPTRTYPE(PPGMRCDYNMAPENTRY) paPages;1790 /** The cache size given as a number of pages. */1791 uint32_t cPages;1792 /** The current load.1793 * This does not include guard pages. */1794 uint32_t cLoad;1795 /** The max load ever.1796 * This is maintained to get trigger adding of more mapping space. */1797 uint32_t cMaxLoad;1798 /** The number of guard pages. */1799 uint32_t cGuardPages;1800 /** The number of users (protected by hInitLock). */1801 uint32_t cUsers;1802 } PGMRCDYNMAP;1803 /** Pointer to the dynamic cache for the raw-mode context. */1804 typedef PGMRCDYNMAP *PPGMRCDYNMAP;1805 1806 1807 /**1808 * Mapping cache usage set entry.1809 *1810 * @remarks 16-bit ints was chosen as the set is not expected to be used beyond1811 * the dynamic ring-0 and (to some extent) raw-mode context mapping1812 * cache. If it's extended to include ring-3, well, then something1813 * will have be changed here...1814 */1815 typedef struct PGMMAPSETENTRY1816 {1817 /** Pointer to the page. */1818 RTR0PTR pvPage;1819 /** The mapping cache index. */1820 uint16_t iPage;1821 /** The number of references.1822 * The max is UINT16_MAX - 1. */1823 uint16_t cRefs;1824 /** The number inlined references.1825 * The max is UINT16_MAX - 1. */1826 uint16_t cInlinedRefs;1827 /** Unreferences. */1828 uint16_t cUnrefs;1829 1830 #if HC_ARCH_BITS == 321831 uint32_t u32Alignment1;1832 #endif1833 /** The physical address for this entry. */1834 RTHCPHYS HCPhys;1835 } PGMMAPSETENTRY;1836 AssertCompileMemberOffset(PGMMAPSETENTRY, iPage, RT_MAX(sizeof(RTR0PTR), sizeof(RTRCPTR)));1837 AssertCompileMemberAlignment(PGMMAPSETENTRY, HCPhys, sizeof(RTHCPHYS));1838 /** Pointer to a mapping cache usage set entry. */1839 typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;1840 1841 /**1842 * Mapping cache usage set.1843 *1844 * This is used in ring-0 and the raw-mode context to track dynamic mappings1845 * done during exits / traps. The set is1846 */1847 typedef struct PGMMAPSET1848 {1849 /** The number of occupied entries.1850 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do1851 * dynamic mappings. */1852 uint32_t cEntries;1853 /** The start of the current subset.1854 * This is UINT32_MAX if no subset is currently open. */1855 uint32_t iSubset;1856 /** The index of the current CPU, only valid if the set is open. */1857 int32_t iCpu;1858 uint32_t alignment;1859 /** The entries. */1860 PGMMAPSETENTRY aEntries[64];1861 /** HCPhys -> iEntry fast lookup table.1862 * Use PGMMAPSET_HASH for hashing.1863 * The entries may or may not be valid, check against cEntries. */1864 uint8_t aiHashTable[128];1865 } PGMMAPSET;1866 AssertCompileSizeAlignment(PGMMAPSET, 8);1867 /** Pointer to the mapping cache set. */1868 typedef PGMMAPSET *PPGMMAPSET;1869 1870 /** PGMMAPSET::cEntries value for a closed set. */1871 #define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)1872 1873 /** Hash function for aiHashTable. */1874 #define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)1875 1740 1876 1741 … … 3662 3527 uint32_t uPadding0; /**< structure size alignment. */ 3663 3528 3664 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE3665 /** Automatically tracked physical memory mapping set.3666 * Ring-0 and strict raw-mode builds. */3667 PGMMAPSET AutoSet;3668 #endif3669 3670 3529 /** A20 gate mask. 3671 3530 * Our current approach to A20 emulation is to let REM do it and don't bother … … 3705 3564 /** The guest's page directory, R3 pointer. */ 3706 3565 R3PTRTYPE(PX86PD) pGst32BitPdR3; 3707 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE3708 3566 /** The guest's page directory, R0 pointer. */ 3709 3567 R0PTRTYPE(PX86PD) pGst32BitPdR0; 3710 #endif3711 3568 /** Mask containing the MBZ bits of a big page PDE. */ 3712 3569 uint32_t fGst32BitMbzBigPdeMask; … … 3721 3578 /** The guest's page directory pointer table, R3 pointer. */ 3722 3579 R3PTRTYPE(PX86PDPT) pGstPaePdptR3; 3723 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE3724 3580 /** The guest's page directory pointer table, R0 pointer. */ 3725 3581 R0PTRTYPE(PX86PDPT) pGstPaePdptR0; 3726 #endif3727 3582 3728 3583 /** The guest's page directories, R3 pointers. … … 3732 3587 /** The guest's page directories, R0 pointers. 3733 3588 * Same restrictions as apGstPaePDsR3. */ 3734 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE3735 3589 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4]; 3736 #endif3737 3590 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */ 3738 3591 RTGCPHYS aGCPhysGstPaePDs[4]; … … 3759 3612 /** The guest's page directory pointer table, R3 pointer. */ 3760 3613 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3; 3761 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE3762 3614 /** The guest's page directory pointer table, R0 pointer. */ 3763 3615 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0; 3764 #else3765 RTR0PTR alignment6b; /**< alignment equalizer. */3766 #endif3767 3616 /** Mask containing the MBZ PTE bits. */ 3768 3617 uint64_t fGstAmd64MbzPteMask; -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r90597 r91016 431 431 GEN_CHECK_OFF(MM, pPagePoolR3); 432 432 GEN_CHECK_OFF(MM, pPagePoolLowR3); 433 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE434 433 GEN_CHECK_OFF(MM, pPagePoolR0); 435 434 GEN_CHECK_OFF(MM, pPagePoolLowR0); 436 #endif437 435 GEN_CHECK_OFF(MM, pvDummyPage); 438 436 GEN_CHECK_OFF(MM, HCPhysDummyPage); … … 671 669 GEN_CHECK_OFF(PGMCPU, offVCpu); 672 670 GEN_CHECK_OFF(PGMCPU, offPGM); 673 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)674 GEN_CHECK_OFF(PGMCPU, AutoSet);675 #endif676 671 GEN_CHECK_OFF(PGMCPU, GCPhysA20Mask); 677 672 GEN_CHECK_OFF(PGMCPU, fA20Enabled); … … 682 677 GEN_CHECK_OFF(PGM, GCPtrCR3Mapping); 683 678 GEN_CHECK_OFF(PGMCPU, pGst32BitPdR3); 684 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE685 679 GEN_CHECK_OFF(PGMCPU, pGst32BitPdR0); 686 #endif687 680 GEN_CHECK_OFF(PGMCPU, pGst32BitPdRC); 688 681 GEN_CHECK_OFF(PGMCPU, pGstPaePdptR3); 689 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE690 682 GEN_CHECK_OFF(PGMCPU, pGstPaePdptR0); 691 #endif692 683 GEN_CHECK_OFF(PGMCPU, pGstPaePdptRC); 693 684 GEN_CHECK_OFF(PGMCPU, apGstPaePDsR3); 694 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE695 685 GEN_CHECK_OFF(PGMCPU, apGstPaePDsR0); 696 #endif697 686 GEN_CHECK_OFF(PGMCPU, apGstPaePDsRC); 698 687 GEN_CHECK_OFF(PGMCPU, aGCPhysGstPaePDs); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r87819 r91016 332 332 333 333 /* pgm */ 334 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE)335 CHECK_MEMBER_ALIGNMENT(PGMCPU, AutoSet, 8);336 #endif337 334 CHECK_MEMBER_ALIGNMENT(PGMCPU, GCPhysCR3, sizeof(RTGCPHYS)); 338 335 CHECK_MEMBER_ALIGNMENT(PGMCPU, aGCPhysGstPaePDs, sizeof(RTGCPHYS));
Note:
See TracChangeset
for help on using the changeset viewer.