VirtualBox

Changeset 17509 in vbox


Ignore:
Timestamp:
Mar 7, 2009 1:30:23 AM (16 years ago)
Author:
vboxsync
Message:

PGM: Moved the page pool PT flushing code in the access handler bits to where it belongs and called it pgmPoolTrackFlushGCPhys. Fixed a status code corruption bug in PGMR3PhysTlbGCPhys2Ptr (new phys). Made lazy zero page replacement code work in the new code, it's disabled by default because it frequently requires flushing the shadow page pool because the tracking code assuming the HCPhys of a PGMPAGE is unique and never shared.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGMInternal.h

    r17489 r17509  
    146146#ifdef VBOX_STRICT
    147147# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
     148#endif
     149
     150#ifdef VBOX_WITH_NEW_PHYS_CODE
     151/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
     152 * Enables the experimental lazy page allocation code. */
     153/*# define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
    148154#endif
    149155/** @} */
     
    30773083void            pgmPoolClearAll(PVM pVM);
    30783084int             pgmPoolSyncCR3(PVM pVM);
     3085int             pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs);
    30793086void            pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
    30803087void            pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r17503 r17509  
    15521552                    return rc;
    15531553                }
     1554                AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
    15541555            }
    15551556
    15561557            void *pvDstPage;
    15571558            PPGMPAGEMAP pMapIgnored;
    1558             rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
    1559             if (RT_SUCCESS(rc))
     1559            int rc2 = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
     1560            if (RT_SUCCESS(rc2))
    15601561                memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
     1562            else
     1563                rc = rc2;
    15611564
    15621565            pgmUnlock(pVM);
     
    26652668        if (RT_SUCCESS(rc))
    26662669        {
     2670            int rc2;
     2671
    26672672            /* Make sure what we return is writable. */
    26682673            if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
     
    26742679                    case PGM_PAGE_STATE_SHARED:
    26752680                    case PGM_PAGE_STATE_WRITE_MONITORED:
    2676                         rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
    2677                         AssertLogRelRCReturn(rc, rc);
     2681                        rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
     2682                        AssertLogRelRCReturn(rc2, rc2);
    26782683                        break;
    26792684                }
     
    26812686            /* Get a ring-3 mapping of the address. */
    26822687            PPGMPAGER3MAPTLBE pTlbe;
    2683             int rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
    2684             AssertLogRelRCReturn(rc, rc);
     2688            rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
     2689            AssertLogRelRCReturn(rc2, rc2);
    26852690            *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
    26862691            /** @todo mapping/locking hell; this isn't horribly efficient since
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r17507 r17509  
    20902090        rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
    20912091#endif
    2092     AssertRC(rc);
     2092    AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
    20932093    return rc;
    20942094}
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r17505 r17509  
    399399            PPGMPAGE pPage;
    400400            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
    401             if (RT_SUCCESS(rc))
     401            if (RT_SUCCESS(rc)) /** just handle the failure immediate (it returns) and make things easier to read. */
    402402            {
    403403                if (   PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage)
     
    641641            STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
    642642
    643 #  ifdef PGM_OUT_OF_SYNC_IN_GC
     643#  ifdef PGM_OUT_OF_SYNC_IN_GC /** @todo remove this bugger. */
    644644            /*
    645645             * We are here only if page is present in Guest page tables and
     
    755755                }
    756756            }
    757             else
     757            else /* uErr & X86_TRAP_PF_P: */
    758758            {
    759 #   ifdef VBOX_WITH_NEW_PHYS_CODE
    760759                /*
    761                  * Need to deal with these buggers somewhere...
    762                  */
    763                 if (    PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
    764                     &&  (uErr & X86_TRAP_PF_RW))
    765                 {
    766                     Log(("PGM #PF: %RGp %R[pgmpage] uErr=%#x\n", GCPhys, pPage, uErr));
    767                     AssertMsgFailed(("PGM #PF: %RGp %R[pgmpage] uErr=%#x\n", GCPhys, pPage, uErr));
    768                 }
    769 #   endif /* VBOX_WITH_NEW_PHYS_CODE */
    770 
    771                 /*
    772                  * A side effect of not flushing global PDEs are out of sync pages due
     760                 * Write protected pages is make writable when the guest makes the first
     761                 * write to it. This happens for pages that are shared, write monitored
     762                 * and not yet allocated.
     763                 *
     764                 * Also, a side effect of not flushing global PDEs are out of sync pages due
    773765                 * to physical monitored regions, that are no longer valid.
    774                  * Assume for now it only applies to the read/write flag
     766                 * Assume for now it only applies to the read/write flag.
    775767                 */
    776768                if (RT_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
    777769                {
     770#   ifdef VBOX_WITH_NEW_PHYS_CODE
     771                    if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
     772                    {
     773                        Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n",
     774                             GCPhys, pPage, pvFault, uErr));
     775                        rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, GCPhys);
     776                        if (rc != VINF_SUCCESS)
     777                        {
     778                            AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc));
     779                            return rc;
     780                        }
     781                    }
     782                    /// @todo count the above case; else
     783#   endif /* VBOX_WITH_NEW_PHYS_CODE */
    778784                    if (uErr & X86_TRAP_PF_US)
    779785                        STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
     
    781787                        STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
    782788
    783 
    784789                    /*
    785                      * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
     790                     * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the
     791                     *       page is not present, which is not true in this case.
    786792                     */
    787793                    rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
     
    862868#  endif /* PGM_OUT_OF_SYNC_IN_GC */
    863869        }
    864         else
     870        else /* GCPhys == NIL_RTGCPHYS */
    865871        {
    866872            /*
     
    880886        }
    881887    }
     888    /* else: !present (guest) */
    882889
    883890
     
    14821489        {
    14831490#ifdef VBOX_WITH_NEW_PHYS_CODE
     1491# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    14841492            /* Try make the page writable if necessary. */
    14851493            if (    PteSrc.n.u1Write
     
    14901498                AssertRC(rc);
    14911499            }
     1500# endif
    14921501#endif
    14931502
     
    15741583            {
    15751584                PteDst.n.u1Write = 0;   /** @todo this isn't quite working yet. */
    1576                 Log3(("SyncPageWorker: write-protecting pPage=%R[pgmpage]at iPTDst=%d\n", pPage, iPTDst));
     1585                Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
    15771586            }
    15781587#endif
     
    18641873                {
    18651874# ifdef VBOX_WITH_NEW_PHYS_CODE
     1875#  ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    18661876                    /* Try make the page writable if necessary. */
    18671877                    if (    PdeSrc.n.u1Write
     
    18721882                        AssertRC(rc);
    18731883                    }
     1884#  endif
    18741885# endif
    18751886
     
    18991910                    {
    19001911                        PteDst.n.u1Write = 0;   /** @todo this isn't quite working yet... */
    1901                         Log3(("SyncPage: write-protecting pPage=%R[pgmpage] at %RGv\n", pPage, GCPtrPage));
     1912                        Log3(("SyncPage: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, GCPtrPage));
    19021913                    }
    19031914# endif
     
    27612772
    27622773# ifdef VBOX_WITH_NEW_PHYS_CODE
     2774#  ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    27632775                        /* Try make the page writable if necessary. */
    27642776                        if (    PteDstBase.n.u1Write
     
    27692781                            AssertRCReturn(rc, rc);
    27702782                        }
     2783#  endif
    27712784# else  /* !VBOX_WITH_NEW_PHYS_CODE */
    27722785                        /* Make sure the RAM has already been allocated. */
     
    28162829                        {
    28172830                            PteDst.n.u1Write = 0;   /** @todo this isn't quite working yet... */
    2818                             Log3(("SyncPT: write-protecting pPage=%R[pgmpage] at %RGv\n", pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
     2831                            Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
    28192832                        }
    28202833# endif
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r17432 r17509  
    216216     */
    217217    bool            fFlushTLBs = false;
    218 #if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
    219218    int             rc = VINF_SUCCESS;
    220 #else
    221     const int       rc = VINF_PGM_GCPHYS_ALIASED;
    222 #endif
    223219    const unsigned  uState = pgmHandlerPhysicalCalcState(pCur);
    224220    RTUINT          cPages = pCur->cPages;
     
    248244            Assert(PGM_PAGE_GET_HCPHYS(pPage));
    249245
    250 #ifdef PGMPOOL_WITH_GCPHYS_TRACKING
    251             const uint16_t u16 = PGM_PAGE_GET_TRACKING(&pRam->aPages[i]);
    252             if (u16)
    253             {
    254 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    255                 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and pgmPoolTrackFlushGCPhysPTs
    256                    will/may kill the pool otherwise. */
    257                 PVMCPU pVCpu = VMMGetCpu(pVM);
    258                 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
    259 # endif
    260                 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
    261                     pgmPoolTrackFlushGCPhysPT(pVM,
    262                                               pPage,
    263                                               PGMPOOL_TD_GET_IDX(u16),
    264                                               PGMPOOL_TD_GET_CREFS(u16));
    265                 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
    266                     pgmPoolTrackFlushGCPhysPTs(pVM, pPage, PGMPOOL_TD_GET_IDX(u16));
    267                 else
    268                     rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
    269                 fFlushTLBs = true;
    270 
    271 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    272                 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
    273 #endif
    274             }
    275 
    276 #elif defined(PGMPOOL_WITH_CACHE)
    277 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    278             /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow kill the pool otherwise. */
    279             PVMCPU pVCpu = VMMGetCpu(pVM);
    280             uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
    281 # endif
    282 
    283             rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
    284             fFlushTLBs = true;
    285 
    286 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    287             PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
    288 # endif
    289 #endif
     246            int rc2 = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
     247            if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
     248                rc = rc2;
    290249        }
    291250
     
    974933             * need to be copied over. The aliased page would have to be MMIO2 ofc, since
    975934             * RAM or ROM pages would require write sharing which is something we don't
    976              * intend to implement just yet... */
     935             * intend to implement just yet...
     936             */
     937
     938            /*
     939             * Note! This trick does only work reliably if the two pages are never ever
     940             *       mapped in the same page table. If they are the page pool code will
     941             *       be confused should either of them be flushed. See the special case
     942             *       of zero page aliasing mentioned in #3170.
     943             */
    977944
    978945            PPGMPAGE pPageRemap;
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r17504 r17509  
    345345 * @returns The following VBox status codes.
    346346 * @retval  VINF_SUCCESS on success, pPage is modified.
     347 * @retval  VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
    347348 * @retval  VERR_EM_NO_MEMORY if we're totally out of memory.
    348349 *
     
    368369
    369370    /*
     371     * Prereqs.
     372     */
     373    Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
     374    AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
     375    Assert(!PGM_PAGE_IS_MMIO(pPage));
     376
     377
     378    /*
     379     * Flush any shadow page table mappings of the page.
     380     * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
     381     */
     382    bool fFlushTLBs = false;
     383    int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
     384    if (rc == VINF_SUCCESS)
     385        /* nothing */;
     386    else if (rc == VINF_PGM_GCPHYS_ALIASED)
     387    {
     388        pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
     389        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     390        rc = VINF_PGM_SYNC_CR3;
     391    }
     392    else
     393    {
     394        AssertRCReturn(rc, rc);
     395        AssertMsgFailedReturn(("%Rrc\n", rc), VERR_INTERNAL_ERROR);
     396    }
     397
     398    /*
    370399     * Ensure that we've got a page handy, take it and use it.
    371400     */
    372     int rc = pgmPhysEnsureHandyPage(pVM);
    373     if (RT_FAILURE(rc))
    374     {
    375         Assert(rc == VERR_EM_NO_MEMORY);
    376         return rc;
    377     }
     401    int rc2 = pgmPhysEnsureHandyPage(pVM);
     402    if (RT_FAILURE(rc2))
     403    {
     404        if (fFlushTLBs)
     405            PGM_INVL_GUEST_TLBS();
     406        Assert(rc2 == VERR_EM_NO_MEMORY);
     407        return rc2;
     408    }
     409    /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
    378410    Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
    379     AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys));
     411    AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
    380412    Assert(!PGM_PAGE_IS_MMIO(pPage));
    381413
     
    424456    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
    425457
    426     return VINF_SUCCESS;
     458    if (    fFlushTLBs
     459        &&  rc != VINF_PGM_GCPHYS_ALIASED)
     460        PGM_INVL_GUEST_TLBS();
     461    return rc;
    427462}
    428463
     
    433468 * @returns VBox status code.
    434469 * @retval  VINF_SUCCESS on success.
     470 * @retval  VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
    435471 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
    436472 *
     
    473509 * @returns VBox status code.
    474510 * @retval  VINF_SUCCESS on success.
     511 * @retval  VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
    475512 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
    476513 *
     
    824861        if (RT_FAILURE(rc))
    825862            return rc;
     863        AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
    826864    }
    827865    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
     
    929967            pLock->u32Dummy = UINT32_MAX;
    930968#endif
     969            AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
     970            rc = VINF_SUCCESS;
    931971        }
    932972    }
     
    952992            rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
    953993            if (RT_SUCCESS(rc))
     994            {
     995                AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
    954996                rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
     997            }
    955998        }
    956999        if (RT_SUCCESS(rc))
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r17500 r17509  
    29942994
    29952995/**
     2996 * Flushes all shadow page table mappings of the given guest page.
     2997 *
     2998 * This is typically called when the host page backing the guest one has been
     2999 * replaced or when the page protection was changed due to an access handler.
     3000 *
     3001 * @returns VBox status code.
     3002 * @retval  VINF_SUCCESS if all references has been successfully cleared.
     3003 * @retval  VINF_PGM_GCPHYS_ALIASED if we're better off with a CR3 sync and
     3004 *          a page pool cleaning.
     3005 *
     3006 * @param   pVM         The VM handle.
     3007 * @param   pPhysPage   The guest page in question.
     3008 * @param   pfFlushTLBs This is set to @a true if the shadow TLBs should be
     3009 *                      flushed, it is NOT touched if this isn't necessary.
     3010 *                      The caller MUST initialized this to @a false.
     3011 */
     3012int pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
     3013{
     3014    int rc = VINF_SUCCESS;
     3015#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
     3016    const uint16_t u16 = PGM_PAGE_GET_TRACKING(pPhysPage);
     3017    if (u16)
     3018    {
     3019# ifdef VBOX_WITH_NEW_PHYS_CODE
     3020        /*
     3021         * The zero page is currently screwing up the tracking and we'll
     3022         * have to flush the whole shebang. Unless VBOX_WITH_NEW_LAZY_PAGE_ALLOC
     3023         * is defined, zero pages won't normally be mapped. Some kind of solution
     3024         * will be needed for this problem of course, but it will have to wait...
     3025         */
     3026        if (PGM_PAGE_IS_ZERO(pPhysPage))
     3027            rc = VINF_PGM_GCPHYS_ALIASED;
     3028        else
     3029# endif
     3030        {
     3031# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3032            /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
     3033               pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
     3034            PVMCPU pVCpu = VMMGetCpu(pVM);
     3035            uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     3036# endif
     3037
     3038            if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
     3039                pgmPoolTrackFlushGCPhysPT(pVM,
     3040                                          pPhysPage,
     3041                                          PGMPOOL_TD_GET_IDX(u16),
     3042                                          PGMPOOL_TD_GET_CREFS(u16));
     3043            else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
     3044                pgmPoolTrackFlushGCPhysPTs(pVM, pPhysPage, PGMPOOL_TD_GET_IDX(u16));
     3045            else
     3046                rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
     3047            *pfFlushTLBs = true;
     3048
     3049# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3050            PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     3051# endif
     3052        }
     3053    }
     3054
     3055#elif defined(PGMPOOL_WITH_CACHE)
     3056# ifdef VBOX_WITH_NEW_PHYS_CODE
     3057    if (PGM_PAGE_IS_ZERO(pPhysPage))
     3058        rc = VINF_PGM_GCPHYS_ALIASED;
     3059    else
     3060# endif
     3061    {
     3062# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3063        /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow kill the pool otherwise. */
     3064        PVMCPU pVCpu = VMMGetCpu(pVM);
     3065        uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     3066# endif
     3067        rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
     3068        if (rc == VINF_SUCCESS)
     3069            *pfFlushTLBs = true;
     3070    }
     3071
     3072# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3073    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     3074# endif
     3075
     3076#else
     3077    rc = VINF_PGM_GCPHYS_ALIASED;
     3078#endif
     3079
     3080    return rc;
     3081}
     3082
     3083
     3084/**
    29963085 * Scans all shadow page tables for mappings of a physical page.
    29973086 *
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette