VirtualBox

Changeset 87141 in vbox


Ignore:
Timestamp:
Dec 29, 2020 7:12:45 PM (4 years ago)
Author:
vboxsync
Message:

VMM: Remove VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 and the code it encloses as it is unused since the removal of x86 darwin support

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/types.h

    r86666 r87141  
    11231123typedef struct PGMPAGEMAPLOCK
    11241124{
    1125 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     1125#if defined(IN_RC)
    11261126    /** The locked page. */
    11271127    void       *pvPage;
  • trunk/include/VBox/vmm/pgm.h

    r85126 r87141  
    563563VMM_INT_DECL(int)   PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers);
    564564VMM_INT_DECL(int)   PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
    565 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     565#if defined(IN_RC)
    566566                                               R3PTRTYPE(uint8_t *) *ppb,
    567567#else
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r86488 r87141  
    21862186
    21872187
    2188 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2189 
    21902188/**
    21912189 * Performs the lazy mapping of the 32-bit guest PD.
     
    23472345# else
    23482346        RTHCPTR     HCPtr       = NIL_RTHCPTR;
    2349 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    23502347        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
    23512348        AssertRC(rc);
    2352 #  endif
    23532349        if (RT_SUCCESS(rc))
    23542350        {
     
    24332429    return rc;
    24342430}
    2435 
    2436 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    24372431
    24382432
     
    35603554}
    35613555
    3562 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3563 
    3564 /**
    3565  * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
    3566  *
    3567  * @returns VBox status code.
    3568  * @param   pVM         The cross context VM structure.
    3569  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3570  * @param   GCPhys      The guest physical address of the page to map.  The
    3571  *                      offset bits are not ignored.
    3572  * @param   ppv         Where to return the address corresponding to @a GCPhys.
    3573  * @param   SRC_POS     The source position of the caller (RT_SRC_POS).
    3574  */
    3575 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    3576 {
    3577     pgmLock(pVM);
    3578 
    3579     /*
    3580      * Convert it to a writable page and it on to the dynamic mapper.
    3581      */
    3582     int rc;
    3583     PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
    3584     if (RT_LIKELY(pPage))
    3585     {
    3586         rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
    3587         if (RT_SUCCESS(rc))
    3588         {
    3589             void *pv;
    3590             rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
    3591             if (RT_SUCCESS(rc))
    3592                 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
    3593         }
    3594         else
    3595             AssertRC(rc);
    3596     }
    3597     else
    3598     {
    3599         AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
    3600         rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    3601     }
    3602 
    3603     pgmUnlock(pVM);
    3604     return rc;
    3605 }
    3606 
    3607 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
     3556
    36083557#if !defined(IN_R0) || defined(LOG_ENABLED)
    36093558
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r86554 r87141  
    43184318    AssertReturn(pPageCR3, VERR_PGM_INVALID_CR3_ADDR);
    43194319    /** @todo this needs some reworking wrt. locking?  */
    4320 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4321     HCPtrGuestCR3 = NIL_RTHCPTR;
    4322     int rc = VINF_SUCCESS;
    4323 # else
    43244320    int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
    4325 # endif
    43264321    pgmUnlock(pVM);
    43274322    if (RT_SUCCESS(rc))
     
    43734368                PPGMPAGE    pPage  = pgmPhysGetPage(pVM, GCPhys);
    43744369                AssertReturn(pPage, VERR_PGM_INVALID_PDPE_ADDR);
    4375 #  ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4376                 HCPtr = NIL_RTHCPTR;
    4377                 int rc2 = VINF_SUCCESS;
    4378 #  else
    43794370                int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
    4380 #  endif
    43814371                pgmUnlock(pVM);
    43824372                if (RT_SUCCESS(rc2))
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r86473 r87141  
    11521152    AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
    11531153
    1154 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1155     /*
    1156      * Map it by HCPhys.
    1157      */
    1158     return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv  RTLOG_COMMA_SRC_POS);
    1159 
    1160 #elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
     1154#if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
    11611155# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
    11621156    return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
     
    12331227    PGM_LOCK_ASSERT_OWNER(pVM);
    12341228    NOREF(GCPhys);
    1235 
    1236 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1237     /*
    1238      * Just some sketchy GC/R0-darwin code.
    1239      */
    1240     *ppMap = NULL;
    1241     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    1242     Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
    1243     pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
    1244     return VINF_SUCCESS;
    1245 
    1246 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    1247 
    12481229
    12491230    /*
     
    13611342    return VINF_SUCCESS;
    13621343# endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */
    1363 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    13641344}
    13651345
     
    14551435}
    14561436
    1457 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    14581437
    14591438/**
     
    15421521}
    15431522
    1544 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    15451523
    15461524/**
     
    15831561     * Get the mapping address.
    15841562     */
    1585 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1586     void *pv;
    1587     rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
    1588                                   PGM_PAGE_GET_HCPHYS(pPage),
    1589                                   &pv
    1590                                   RTLOG_COMMA_SRC_POS);
    1591     if (RT_FAILURE(rc))
    1592         return rc;
    1593     *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1594 #else
    15951563    PPGMPAGEMAPTLBE pTlbe;
    15961564    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
     
    15981566        return rc;
    15991567    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1600 #endif
    16011568    return VINF_SUCCESS;
    16021569}
    16031570
    1604 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    16051571
    16061572/**
     
    16901656}
    16911657
    1692 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    1693 
    16941658
    16951659/**
     
    17321696     * Do the job.
    17331697     */
    1734 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1735     void *pv;
    1736     PVMCPU pVCpu = VMMGetCpu(pVM);
    1737     rc = pgmRZDynMapHCPageInlined(pVCpu,
    1738                                   PGM_PAGE_GET_HCPHYS(pPage),
    1739                                   &pv
    1740                                   RTLOG_COMMA_SRC_POS);
    1741     if (RT_FAILURE(rc))
    1742         return rc;
    1743     *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1744     pLock->pvPage = pv;
    1745     pLock->pVCpu  = pVCpu;
    1746 
    1747 #else
    17481698    PPGMPAGEMAPTLBE pTlbe;
    17491699    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
     
    17521702    pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
    17531703    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1754 #endif
    17551704    return VINF_SUCCESS;
    17561705}
     
    17841733     * Do the job.
    17851734     */
    1786 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1787     void *pv;
    1788     PVMCPU pVCpu = VMMGetCpu(pVM);
    1789     int rc = pgmRZDynMapHCPageInlined(pVCpu,
    1790                                       PGM_PAGE_GET_HCPHYS(pPage),
    1791                                       &pv
    1792                                       RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    1793     if (RT_FAILURE(rc))
    1794         return rc;
    1795     *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1796     pLock->pvPage = pv;
    1797     pLock->pVCpu  = pVCpu;
    1798 
    1799 #else
    18001735    PPGMPAGEMAPTLBE pTlbe;
    18011736    int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
     
    18041739    pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
    18051740    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1806 #endif
    18071741    return VINF_SUCCESS;
    18081742}
     
    18471781    AssertRCReturn(rc, rc);
    18481782
    1849 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1850     /*
    1851      * Find the page and make sure it's writable.
    1852      */
    1853     PPGMPAGE pPage;
    1854     rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
    1855     if (RT_SUCCESS(rc))
    1856     {
    1857         if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
    1858             rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
    1859         if (RT_SUCCESS(rc))
    1860         {
    1861             AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
    1862 
    1863             PVMCPU pVCpu = VMMGetCpu(pVM);
    1864             void  *pv;
    1865             rc = pgmRZDynMapHCPageInlined(pVCpu,
    1866                                           PGM_PAGE_GET_HCPHYS(pPage),
    1867                                           &pv
    1868                                           RTLOG_COMMA_SRC_POS);
    1869             if (RT_SUCCESS(rc))
    1870             {
    1871                 AssertRCSuccess(rc);
    1872 
    1873                 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1874                 *ppv = pv;
    1875                 pLock->pvPage = pv;
    1876                 pLock->pVCpu  = pVCpu;
    1877             }
    1878         }
    1879     }
    1880 
    1881 #else  /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    18821783    /*
    18831784     * Query the Physical TLB entry for the page (may fail).
     
    19111812    }
    19121813
    1913 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    19141814    pgmUnlock(pVM);
    19151815    return rc;
     
    19501850    AssertRCReturn(rc, rc);
    19511851
    1952 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1953     /*
    1954      * Find the page and make sure it's readable.
    1955      */
    1956     PPGMPAGE pPage;
    1957     rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
    1958     if (RT_SUCCESS(rc))
    1959     {
    1960         if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
    1961             rc = VERR_PGM_PHYS_PAGE_RESERVED;
    1962         else
    1963         {
    1964             PVMCPU pVCpu = VMMGetCpu(pVM);
    1965             void  *pv;
    1966             rc = pgmRZDynMapHCPageInlined(pVCpu,
    1967                                           PGM_PAGE_GET_HCPHYS(pPage),
    1968                                           &pv
    1969                                           RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    1970             if (RT_SUCCESS(rc))
    1971             {
    1972                 AssertRCSuccess(rc);
    1973 
    1974                 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1975                 *ppv = pv;
    1976                 pLock->pvPage = pv;
    1977                 pLock->pVCpu  = pVCpu;
    1978             }
    1979         }
    1980     }
    1981 
    1982 #else  /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    19831852    /*
    19841853     * Query the Physical TLB entry for the page (may fail).
     
    20021871    }
    20031872
    2004 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    20051873    pgmUnlock(pVM);
    20061874    return rc;
     
    20931961VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
    20941962{
    2095 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2096     Assert(pLock->pvPage != NULL);
    2097     Assert(pLock->pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
    2098     PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
    2099     pLock->pVCpu  = NULL;
    2100     pLock->pvPage = NULL;
    2101 
    2102 #else
    21031963# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    21041964    PPGMPAGEMAP pMap       = (PPGMPAGEMAP)pLock->pvMap;
     
    21532013# endif
    21542014    pgmUnlock(pVM);
    2155 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    21562015}
    21572016
     
    23072166
    23082167    Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
    2309 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2310     NOREF(pVM); NOREF(pR3Ptr); RT_NOREF_PV(GCPhys);
    2311     AssertFailedReturn(VERR_NOT_IMPLEMENTED);
    2312 #else
    23132168    pgmLock(pVM);
    23142169
     
    23222177    Assert(rc <= VINF_SUCCESS);
    23232178    return rc;
    2324 #endif
    2325 }
    2326 
    2327 #if 0 /*def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    2328 
    2329 /**
    2330  * Maps and locks a guest CR3 or PD (PAE) page.
    2331  *
    2332  * @returns VINF_SUCCESS on success.
    2333  * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
    2334  *          page but has no physical backing.
    2335  * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
    2336  *          GC physical address.
    2337  * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
    2338  *          a dynamic ram chunk boundary
    2339  *
    2340  * @param   pVM         The cross context VM structure.
    2341  * @param   GCPhys      The GC physical address to convert.
    2342  * @param   pR3Ptr      Where to store the R3 pointer on success.  This may or
    2343  *                      may not be valid in ring-0 depending on the
    2344  *                      VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
    2345  *
    2346  * @remarks The caller must own the PGM lock.
    2347  */
    2348 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
    2349 {
    2350 
    2351     PPGMRAMRANGE pRam;
    2352     PPGMPAGE pPage;
    2353     int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
    2354     if (RT_SUCCESS(rc))
    2355         rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
    2356     Assert(rc <= VINF_SUCCESS);
    2357     return rc;
    2358 }
    2359 
    2360 
    2361 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
    2362 {
    2363 
    2364 }
    2365 
    2366 #endif
     2179}
     2180
    23672181
    23682182/**
     
    42874101 */
    42884102VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
    4289 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4290                                              R3PTRTYPE(uint8_t *) *ppb,
    4291 #else
    42924103                                             R3R0PTRTYPE(uint8_t *) *ppb,
    4293 #endif
    42944104                                             uint64_t *pfTlb)
    42954105{
     
    43274137                            break;
    43284138                    }
    4329 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4330                     *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
    4331                     *ppb = NULL;
    4332 #else
     4139
    43334140                    PPGMPAGEMAPTLBE pTlbe;
    43344141                    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
    43354142                    AssertLogRelRCReturn(rc, rc);
    43364143                    *ppb = (uint8_t *)pTlbe->pv;
    4337 #endif
    43384144                }
    43394145                else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
     
    43684174                                break;
    43694175                        }
    4370 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4371                     *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
    4372                     *ppb = NULL;
    4373 #else
     4176
    43744177                    PPGMPAGEMAPTLBE pTlbe;
    43754178                    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
    43764179                    AssertLogRelRCReturn(rc, rc);
    43774180                    *ppb = (uint8_t *)pTlbe->pv;
    4378 #endif
    43794181                }
    43804182            }
     
    44864288                }
    44874289
    4488 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4489             void *pv;
    4490             rc = pgmRZDynMapHCPageInlined(pVCpu,
    4491                                           PGM_PAGE_GET_HCPHYS(pPage),
    4492                                           &pv
    4493                                           RTLOG_COMMA_SRC_POS);
    4494             if (RT_FAILURE(rc))
    4495                 return rc;
    4496             *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    4497             pLock->pvPage = pv;
    4498             pLock->pVCpu  = pVCpu;
    4499 
    4500 #else
    45014290            /* Get a ring-3 mapping of the address. */
    45024291            PPGMPAGEMAPTLBE pTlbe;
     
    45104299                pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
    45114300            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    4512 #endif
    45134301
    45144302            Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r86489 r87141  
    830830    while (pRegFrame->rcx)
    831831    {
    832 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    833         uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    834832        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement);
    835         PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    836 # else
    837         pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement);
    838 # endif
    839833        PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement);
    840834        pu32           += uIncrement;
     
    881875     * Clear all the pages. ASSUMES that pvFault is readable.
    882876     */
    883 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    884     uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    885 # endif
    886 
    887877    uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1);
    888878    if (cbWrite <= 8)
     
    899889            pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + off, NULL, RT_MIN(8, cbWrite - off));
    900890    }
    901 
    902 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    903     PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    904 # endif
    905891
    906892    /*
     
    16581644    Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
    16591645
    1660 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1661     PVMCPU   pVCpu = VMMGetCpu(pVM);
    1662     uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    1663 # endif
    1664 
    16651646    /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
    16661647    int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK);
     
    17231704    else
    17241705        Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
    1725 
    1726 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1727     PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    1728 # endif
    17291706}
    17301707
     
    35383515        else
    35393516        {
    3540 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 /** @todo we can drop this now. */
    3541             /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
    3542                pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
    3543             uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    3544 # endif
    3545 
    35463517            if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
    35473518            {
     
    35573528                rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
    35583529            *pfFlushTLBs = true;
    3559 
    3560 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3561             PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    3562 # endif
    35633530        }
    35643531    }
     
    48644831    }
    48654832
    4866 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4867     /* Start a subset so we won't run out of mapping space. */
    4868     PVMCPU pVCpu = VMMGetCpu(pVM);
    4869     uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    4870 #endif
    4871 
    48724833    /*
    48734834     * Mark the page as being in need of an ASMMemZeroPage().
     
    48964857     */
    48974858    pgmPoolCacheFlushPage(pPool, pPage);
    4898 
    4899 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4900     /* Heavy stuff done. */
    4901     PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    4902 #endif
    49034859
    49044860    /*
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r87040 r87141  
    1058710587        return VINF_EM_RESCHEDULE_REM;
    1058810588    }
    10589 #endif
    10590 
    10591 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    10592     PGMRZDynMapFlushAutoSet(pVCpu);
    1059310589#endif
    1059410590
  • trunk/src/VBox/VMM/include/PGMInline.h

    r86488 r87141  
    219219}
    220220
    221 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    222 
    223 /**
    224  * Inlined version of the ring-0 version of the host page mapping code
    225  * that optimizes access to pages already in the set.
    226  *
    227  * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
    228  * @param   pVCpu       The cross context virtual CPU structure.
    229  * @param   HCPhys      The physical address of the page.
    230  * @param   ppv         Where to store the mapping address.
    231  * @param   SRC_POS     The source location of the caller.
    232  */
    233 DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPUCC pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    234 {
    235     PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
    236 
    237     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
    238     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    239     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    240 
    241     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    242     unsigned    iEntry  = pSet->aiHashTable[iHash];
    243     if (    iEntry < pSet->cEntries
    244         &&  pSet->aEntries[iEntry].HCPhys == HCPhys
    245         &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
    246     {
    247         pSet->aEntries[iEntry].cInlinedRefs++;
    248         *ppv = pSet->aEntries[iEntry].pvPage;
    249         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
    250     }
    251     else
    252     {
    253         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
    254         pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    255     }
    256 
    257     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
    258     return VINF_SUCCESS;
    259 }
    260 
    261 
    262 /**
    263  * Inlined version of the guest page mapping code that optimizes access to pages
    264  * already in the set.
    265  *
    266  * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    267  * @param   pVM         The cross context VM structure.
    268  * @param   pVCpu       The cross context virtual CPU structure.
    269  * @param   GCPhys      The guest physical address of the page.
    270  * @param   ppv         Where to store the mapping address.
    271  * @param   SRC_POS     The source location of the caller.
    272  */
    273 DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    274 {
    275     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    276     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
    277 
    278     /*
    279      * Get the ram range.
    280      */
    281     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
    282     RTGCPHYS off;
    283     if (   !pRam
    284         || (off = GCPhys - pRam->GCPhys) >= pRam->cb
    285         /** @todo   || page state stuff */
    286        )
    287     {
    288         /* This case is not counted into StatRZDynMapGCPageInl. */
    289         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
    290         return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    291     }
    292 
    293     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    294     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
    295 
    296     /*
    297      * pgmRZDynMapHCPageInlined with out stats.
    298      */
    299     PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
    300     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    301     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    302 
    303     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    304     unsigned    iEntry  = pSet->aiHashTable[iHash];
    305     if (    iEntry < pSet->cEntries
    306         &&  pSet->aEntries[iEntry].HCPhys == HCPhys
    307         &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
    308     {
    309         pSet->aEntries[iEntry].cInlinedRefs++;
    310         *ppv = pSet->aEntries[iEntry].pvPage;
    311         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
    312     }
    313     else
    314     {
    315         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
    316         pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    317     }
    318 
    319     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    320     return VINF_SUCCESS;
    321 }
    322 
    323 
    324 /**
    325  * Inlined version of the ring-0 version of guest page mapping that optimizes
    326  * access to pages already in the set.
    327  *
    328  * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    329  * @param   pVCpu       The cross context virtual CPU structure.
    330  * @param   GCPhys      The guest physical address of the page.
    331  * @param   ppv         Where to store the mapping address.
    332  * @param   SRC_POS     The source location of the caller.
    333  */
    334 DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    335 {
    336     return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    337 }
    338 
    339 
    340 /**
    341  * Inlined version of the ring-0 version of the guest byte mapping code
    342  * that optimizes access to pages already in the set.
    343  *
    344  * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    345  * @param   pVCpu       The cross context virtual CPU structure.
    346  * @param   GCPhys      The guest physical address of the page.
    347  * @param   ppv         Where to store the mapping address. The offset is
    348  *                      preserved.
    349  * @param   SRC_POS     The source location of the caller.
    350  */
    351 DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    352 {
    353     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
    354 
    355     /*
    356      * Get the ram range.
    357      */
    358     PVMCC             pVM  = pVCpu->CTX_SUFF(pVM);
    359     PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
    360     RTGCPHYS        off;
    361     if (   !pRam
    362         || (off = GCPhys - pRam->GCPhys) >= pRam->cb
    363         /** @todo   || page state stuff */
    364        )
    365     {
    366         /* This case is not counted into StatRZDynMapGCPageInl. */
    367         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
    368         return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    369     }
    370 
    371     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    372     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
    373 
    374     /*
    375      * pgmRZDynMapHCPageInlined with out stats.
    376      */
    377     PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
    378     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    379     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    380 
    381     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    382     unsigned    iEntry  = pSet->aiHashTable[iHash];
    383     if (    iEntry < pSet->cEntries
    384         &&  pSet->aEntries[iEntry].HCPhys == HCPhys
    385         &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
    386     {
    387         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
    388         pSet->aEntries[iEntry].cInlinedRefs++;
    389         *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    390     }
    391     else
    392     {
    393         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
    394         pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    395         *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    396     }
    397 
    398     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    399     return VINF_SUCCESS;
    400 }
    401 
    402 
    403 /**
    404  * Maps the page into current context (RC and maybe R0).
    405  *
    406  * @returns pointer to the mapping.
    407  * @param   pVM         The cross context VM structure.
    408  * @param   pPage       The page.
    409  * @param   SRC_POS     The source location of the caller.
    410  */
    411 DECLINLINE(void *) pgmPoolMapPageInlined(PVMCC pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
    412 {
    413     if (pPage->idx >= PGMPOOL_IDX_FIRST)
    414     {
    415         Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
    416         void *pv;
    417         pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
    418         return pv;
    419     }
    420     AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
    421 }
    422 
    423 
    424 /**
    425  * Maps the page into current context (RC and maybe R0).
    426  *
    427  * @returns pointer to the mapping.
    428  * @param   pVM         The cross context VM structure.
    429  * @param   pVCpu       The cross context virtual CPU structure.
    430  * @param   pPage       The page.
    431  * @param   SRC_POS     The source location of the caller.
    432  */
    433 DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
    434 {
    435     if (pPage->idx >= PGMPOOL_IDX_FIRST)
    436     {
    437         Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
    438         void *pv;
    439         Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
    440         pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
    441         return pv;
    442     }
    443     AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
    444 }
    445 
    446 #endif /*  VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    447221
    448222/**
     
    496270        STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
    497271        rc = VINF_SUCCESS;
    498 #if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    499 # ifdef IN_RING3
    500         if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
    501 # else
    502         if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
    503 # endif
    504             pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
    505 #endif
    506272        AssertPtr(pTlbe->pv);
    507 #if defined(IN_RING3) || (!defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_RAM_IN_KERNEL))
     273#if defined(IN_RING3) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    508274        Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
    509275#endif
     
    653419DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
    654420{
    655 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    656     int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
    657     if (RT_FAILURE(rc))
    658     {
    659         *ppPd = NULL;
    660         return rc;
    661     }
    662 #else
    663421    *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
    664422    if (RT_UNLIKELY(!*ppPd))
    665423        return pgmGstLazyMap32BitPD(pVCpu, ppPd);
    666 #endif
    667424    return VINF_SUCCESS;
    668425}
     
    677434DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
    678435{
    679 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    680     PX86PD pGuestPD = NULL;
    681     int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
    682     if (RT_FAILURE(rc))
    683     {
    684         AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
    685         return NULL;
    686     }
    687 #else
    688436    PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
    689437    if (RT_UNLIKELY(!pGuestPD))
     
    693441            return NULL;
    694442    }
    695 #endif
    696443    return pGuestPD;
    697444}
     
    707454DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
    708455{
    709 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    710     int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
    711     if (RT_FAILURE(rc))
    712     {
    713         *ppPdpt = NULL;
    714         return rc;
    715     }
    716 #else
    717456    *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
    718457    if (RT_UNLIKELY(!*ppPdpt))
    719458        return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
    720 #endif
    721459    return VINF_SUCCESS;
    722460}
     
    751489    AssertGCPtr32(GCPtr);
    752490
    753 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    754     PX86PDPT pGuestPDPT = NULL;
    755     int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
    756     AssertRCReturn(rc, NULL);
    757 #else
    758491    PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
    759492    if (RT_UNLIKELY(!pGuestPDPT))
     
    763496            return NULL;
    764497    }
    765 #endif
    766498    return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
    767499}
     
    786518        {
    787519            const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    788 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    789             PX86PDPAE   pGuestPD = NULL;
    790             int rc = pgmRZDynMapGCPageInlined(pVCpu,
    791                                               pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
    792                                               (void **)&pGuestPD
    793                                               RTLOG_COMMA_SRC_POS);
    794             if (RT_SUCCESS(rc))
    795                 return pGuestPD->a[iPD];
    796             AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
    797 #else
    798520            PX86PDPAE   pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
    799521            if (    !pGuestPD
     
    802524            if (pGuestPD)
    803525                return pGuestPD->a[iPD];
    804 #endif
    805526        }
    806527    }
     
    838559
    839560            /* The PDE. */
    840 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    841             PX86PDPAE   pGuestPD = NULL;
    842             int rc = pgmRZDynMapGCPageInlined(pVCpu,
    843                                               uPdpe & X86_PDPE_PG_MASK,
    844                                               (void **)&pGuestPD
    845                                               RTLOG_COMMA_SRC_POS);
    846             if (RT_FAILURE(rc))
    847             {
    848                 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
    849                 return NULL;
    850             }
    851 #else
    852561            PX86PDPAE   pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
    853562            if (    !pGuestPD
    854563                ||  (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
    855564                pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
    856 #endif
    857565            *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    858566            return pGuestPD;
     
    872580DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
    873581{
    874 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    875     int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
    876     if (RT_FAILURE(rc))
    877     {
    878         *ppPml4 = NULL;
    879         return rc;
    880     }
    881 #else
    882582    *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
    883583    if (RT_UNLIKELY(!*ppPml4))
    884584        return pgmGstLazyMapPml4(pVCpu, ppPml4);
    885 #endif
    886585    return VINF_SUCCESS;
    887586}
     
    913612DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
    914613{
    915 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    916     PX86PML4 pGuestPml4;
    917     int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
    918     AssertRCReturn(rc, NULL);
    919 #else
    920614    PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
    921615    if (pGuestPml4)
     
    926620         AssertRCReturn(rc, NULL);
    927621    }
    928 #endif
    929622    return &pGuestPml4->a[iPml4];
    930623}
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r86466 r87141  
    296296 * @remark  There is no need to assert on the result.
    297297 */
    298 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    299 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
    300      pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
    301 #else
    302 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
    303      pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    304 #endif
     298#define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
     299    pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    305300
    306301/** @def PGM_GCPHYS_2_PTR
     
    344339 * @remark  There is no need to assert on the result.
    345340 */
    346 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    347 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    348      pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
    349 #else
    350 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    351      pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    352 #endif
     341#define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
     342    pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    353343
    354344/** @def PGM_DYNMAP_UNUSED_HINT
     
    361351 * @param   pvPage  The pool page.
    362352 */
    363 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    364 # ifdef LOG_ENABLED
    365 #  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS)
    366 # else
    367 #  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage)
    368 # endif
    369 #else
    370 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  do {} while (0)
    371 #endif
     353#define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  do {} while (0)
    372354
    373355/** @def PGM_DYNMAP_UNUSED_HINT_VM
     
    24332415 * @remark  There is no need to assert on the result.
    24342416 */
    2435 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2436 # define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage)     pgmPoolMapPageInlined((a_pVM), (a_pPage) RTLOG_COMMA_SRC_POS)
    2437 #elif defined(VBOX_STRICT) || 1 /* temporarily going strict here */
     2417#if defined(VBOX_STRICT) || 1 /* temporarily going strict here */
    24382418# define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage)     pgmPoolMapPageStrict(a_pPage, __FUNCTION__)
    24392419DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE a_pPage, const char *pszCaller)
     
    24632443 * @remark  There is no need to assert on the result.
    24642444 */
    2465 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2466 # define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage)     pgmPoolMapPageV2Inlined((a_pVM), (a_pVCpu), (a_pPage) RTLOG_COMMA_SRC_POS)
    2467 #else
    2468 # define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage)     PGMPOOL_PAGE_2_PTR((a_pVM), (a_pPage))
    2469 #endif
     2445#define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage)     PGMPOOL_PAGE_2_PTR((a_pVM), (a_pPage))
    24702446
    24712447
     
    40143990
    40153991#endif /* IN_RING3 */
    4016 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4017 int             pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
    4018 int             pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
    4019 # ifdef LOG_ENABLED
    4020 void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL);
    4021 # else
    4022 void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint);
    4023 # endif
    4024 #endif
    40253992int             pgmPoolAlloc(PVMCC pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
    40263993                             uint16_t iUser, uint32_t iUserTable, bool fLockPage, PPPGMPOOLPAGE ppPage);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette