VirtualBox

Changeset 22473 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 26, 2009 2:51:50 PM (15 years ago)
Author:
vboxsync
Message:

Disabled experiment with optimized dirty PTs.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGMInternal.h

    r22343 r22473  
    6565 */
    6666#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
     67
     68/**
     69 * Optimization for PAE page tables that are modified often
     70 */
     71#ifndef IN_RC
     72////# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     73#endif
    6774
    6875/**
     
    16621669     * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
    16631670    bool volatile       fReusedFlushPending;
     1671#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1672    /** Used to mark the page as dirty (write monitoring if temporarily off. */
     1673    bool                fDirty;
     1674#else
    16641675    bool                bPadding1;
     1676#endif
    16651677
    16661678    /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
    16671679    uint32_t            cLocked;
     1680#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1681    uint32_t            idxDirty;
     1682#else
    16681683    uint32_t            bPadding2;
     1684#endif
    16691685} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
    16701686/** Pointer to a const pool page. */
     
    17521768    /** The access handler description (HC ptr). */
    17531769    R3PTRTYPE(const char *)         pszAccessHandler;
     1770# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1771    /* Next available slot. */
     1772    uint32_t                    idxFreeDirtyPage;
     1773    /* Number of active dirty pages. */
     1774    uint32_t                    cDirtyPages;
     1775    /* Array of current dirty pgm pool page indices. */
     1776    uint16_t                    aIdxDirtyPages[8];
     1777    uint64_t                    aDirtyPages[8][512];
     1778# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
    17541779#endif /* PGMPOOL_WITH_MONITORING */
    17551780    /** The number of pages currently in use. */
    17561781    uint16_t                    cUsedPages;
    17571782#ifdef VBOX_WITH_STATISTICS
    1758     /** The high wather mark for cUsedPages. */
     1783    /** The high water mark for cUsedPages. */
    17591784    uint16_t                    cUsedPagesHigh;
    17601785    uint32_t                    Alignment1;         /**< Align the next member on a 64-bit boundrary. */
     
    18311856    /** The number of times we're called in an async thread an need to flush. */
    18321857    STAMCOUNTER                 StatMonitorR3Async;
     1858    /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
     1859    STAMCOUNTER                 StatResetDirtyPages;
     1860    /** Times we've called pgmPoolAddDirtyPage. */
     1861    STAMCOUNTER                 StatDirtyPage;
     1862
    18331863    /** The high wather mark for cModifiedPages. */
    18341864    uint16_t                    cModifiedPagesHigh;
     
    30233053uint16_t        pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
    30243054void            pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
     3055void            pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint);
    30253056#ifdef PGMPOOL_WITH_MONITORING
    30263057void            pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu);
     
    30283059void            pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
    30293060#endif
     3061
     3062void            pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
     3063void            pgmPoolResetDirtyPages(PVM pVM, bool fForceRemoval = false);
    30303064
    30313065int             pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
  • trunk/src/VBox/VMM/PGMPool.cpp

    r22343 r22473  
    362362    STAM_REG(pVM, &pPool->cModifiedPages,               STAMTYPE_U16,       "/PGM/Pool/Monitor/cModifiedPages",     STAMUNIT_PAGES,          "The current cModifiedPages value.");
    363363    STAM_REG(pVM, &pPool->cModifiedPagesHigh,           STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES,          "The high watermark for cModifiedPages.");
     364    STAM_REG(pVM, &pPool->StatResetDirtyPages,          STAMTYPE_COUNTER,   "/PGM/Pool/Monitor/Dirty/Resets",       STAMUNIT_OCCURENCES,     "Times we've called pgmPoolResetDirtyPages (and there were dirty page).");
     365    STAM_REG(pVM, &pPool->StatDirtyPage,                STAMTYPE_COUNTER,   "/PGM/Pool/Monitor/Dirty/Pages",        STAMUNIT_OCCURENCES,     "Times we've called pgmPoolAddDirtyPage.");
     366   
    364367# endif
    365368# ifdef PGMPOOL_WITH_CACHE
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r21059 r22473  
    403403    PVM pVM = pVCpu->CTX_SUFF(pVM);
    404404
    405     LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
     405    Log(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
    406406    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
    407407    STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
     
    17011701    else
    17021702    {
     1703        pgmLock(pVM);
     1704# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1705        pgmPoolResetDirtyPages(pVM);
     1706# endif
     1707        pgmUnlock(pVM);
    17031708        /*
    17041709         * Check if we have a pending update of the CR3 monitoring.
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r22327 r22473  
    967967
    968968    LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
     969
     970# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     971    if (pPool->cDirtyPages)
     972        pgmPoolResetDirtyPages(pVM);
     973# endif
     974
    969975    /*
    970976     * Get the shadow PD entry and skip out if this PD isn't present.
     
    14701476        PVM pVM = pVCpu->CTX_SUFF(pVM);
    14711477
     1478# if    defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)                            \
     1479     && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)                         \
     1480     && (PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64)
     1481        if (pShwPage->fDirty)
     1482        {
     1483            PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     1484            PX86PTPAE pGstPT;
     1485           
     1486            pGstPT = (PX86PTPAE)&pPool->aDirtyPages[pShwPage->idxDirty][0];
     1487            pGstPT->a[iPTDst].u = PteSrc.u;
     1488        }
     1489# endif
    14721490        /*
    14731491         * Find the ram range.
     
    17111729
    17121730    PPGMPOOLPAGE    pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
     1731    Assert(pShwPage);
    17131732
    17141733# if PGM_GST_TYPE == PGM_TYPE_AMD64
     
    18381857                        const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
    18391858                        PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    1840                         Log2(("SyncPage: 4K  %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
     1859                        Log2(("SyncPage: 4K  %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx %s\n",
    18411860                              GCPtrPage, PteSrc.n.u1Present,
    18421861                              PteSrc.n.u1Write & PdeSrc.n.u1Write,
    18431862                              PteSrc.n.u1User & PdeSrc.n.u1User,
    18441863                              (uint64_t)PteSrc.u,
     1864                              (uint64_t)pPTDst->a[iPTDst].u,
    18451865                              pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
    18461866                    }
     
    33953415
    33963416#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
     3417
     3418    pgmLock(pVM);
     3419# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     3420    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     3421    if (pPool->cDirtyPages)
     3422        pgmPoolResetDirtyPages(pVM);
     3423# endif
     3424
    33973425    /*
    33983426     * Update page access handlers.
     
    34073435    PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
    34083436    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
     3437    pgmUnlock(pVM);
    34093438#endif
    34103439
     
    43794408    pgmLock(pVM);
    43804409
     4410# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     4411    if (pPool->cDirtyPages)
     4412        pgmPoolResetDirtyPages(pVM);
     4413# endif
     4414
    43814415    Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
    43824416    rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3, true /* lock page */);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r22349 r22473  
    777777 * Checks if a access could be a fork operation in progress.
    778778 *
    779  * Meaning, that the guest is setuping up the parent process for Copy-On-Write.
     779 * Meaning, that the guest is setting up the parent process for Copy-On-Write.
    780780 *
    781781 * @returns true if it's likly that we're forking, otherwise false.
     
    830830{
    831831#ifndef IN_RC
    832     /** @todo could make this general, faulting close to rsp should be safe reuse heuristic. */
     832    /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
    833833    if (   HWACCMHasPendingIrq(pVM)
    834834        && (pRegFrame->rsp - pvFault) < 32)
     
    885885            return false;
    886886    }
    887     if (    (pDis->param1.flags & USE_REG_GEN32)
     887    if (    (    (pDis->param1.flags & USE_REG_GEN32)
     888             ||  (pDis->param1.flags & USE_REG_GEN64))
    888889        &&  (pDis->param1.base.reg_gen == USE_REG_ESP))
    889890    {
     
    919920    /*
    920921     * Emulate the instruction (xp/w2k problem, requires pc/cr2/sp detection).
     922     * @todo: why is this necessary? an instruction restart would be sufficient, wouldn't it?
    921923     */
    922924    uint32_t cbWritten;
     
    11171119    PVMCPU          pVCpu = VMMGetCpu(pVM);
    11181120    unsigned        cMaxModifications;
     1121    bool            fForcedFlush = false;
    11191122
    11201123    LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
     
    11441147     */
    11451148    Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
     1149#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1150    Assert(!pPage->fDirty);
     1151#endif
    11461152
    11471153    /* Maximum nr of modifications depends on the guest mode. */
     
    11661172        pPage->cLastAccessHandlerCount  = pVCpu->pgm.s.cPoolAccessHandler;
    11671173        if (pPage->cModifications > cMaxModifications)
     1174        {
    11681175            STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit));
    1169     }
     1176            fForcedFlush = true;
     1177        }
     1178    }
     1179
     1180    if (pPage->cModifications >= cMaxModifications)
     1181        Log(("Mod overflow %VGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked(&pVM->pgm.s, pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
    11701182
    11711183    /*
     
    12661278    }
    12671279
     1280#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1281    /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This
     1282     * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring.
     1283     */
     1284    if (    !fReused
     1285        &&  !fForcedFlush
     1286        &&  pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
     1287        &&  pPage->cModifications >= cMaxModifications
     1288        &&  pPage->iModifiedNext == NIL_PGMPOOL_IDX
     1289        &&  pPage->iModifiedPrev == NIL_PGMPOOL_IDX)
     1290    {
     1291        Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
     1292        Assert(pPage->fDirty == false);
     1293
     1294        rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys, pPage->GCPhys);
     1295        if (rc == VINF_SUCCESS)
     1296        {
     1297            rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     1298            AssertMsg(rc == VINF_SUCCESS
     1299                      /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
     1300                      ||  rc == VERR_PAGE_TABLE_NOT_PRESENT
     1301                      ||  rc == VERR_PAGE_NOT_PRESENT,
     1302                      ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc));
     1303
     1304            pgmPoolAddDirtyPage(pVM, pPool, pPage);
     1305 
     1306            STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
     1307            pgmUnlock(pVM);
     1308            return rc;
     1309        }
     1310    }
     1311#endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
     1312
    12681313    /*
    12691314     * Not worth it, so flush it.
     
    12731318     * interpret then. This may be a bit risky, in which case
    12741319     * the reuse detection must be fixed.
    1275      */
     1320     */       
    12761321    rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
    12771322    if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused)
     
    12831328
    12841329# endif /* !IN_RING3 */
     1330
     1331# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     1332/**
     1333 * Clear references to guest physical memory in a PAE / PAE page table.
     1334 *
     1335 * @returns nr of changed PTEs
     1336 * @param   pPool       The pool.
     1337 * @param   pPage       The page.
     1338 * @param   pShwPT      The shadow page table (mapping of the page).
     1339 * @param   pGstPT      The guest page table.
     1340 * @param   pGstPT      The old cached guest page table.
     1341 */
     1342DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT, PCX86PTPAE pOldGstPT)
     1343{
     1344    unsigned cChanged = 0;
     1345
     1346    for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++)
     1347    {
     1348        if (pShwPT->a[i].n.u1Present)
     1349        {
     1350            /* The the old cached PTE is identical, then there's no need to flush the shadow copy. */
     1351            if ((pGstPT->a[i].u & X86_PTE_PAE_PG_MASK) == (pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
     1352            {
     1353#ifdef VBOX_STRICT
     1354                RTHCPHYS HCPhys = -1;
     1355                int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
     1356                AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RGp %RHp vs %RHp\n", rc, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK), HCPhys));
     1357#endif
     1358                uint64_t uHostAttr  = pShwPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
     1359                bool     fHostRW    = !!(pShwPT->a[i].u & X86_PTE_RW);
     1360                uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
     1361                bool     fGuestRW   = !!(pGstPT->a[i].u & X86_PTE_RW);
     1362
     1363                if (    uHostAttr == uGuestAttr
     1364                    &&  fHostRW <= fGuestRW)
     1365                    continue;
     1366            }
     1367            cChanged++;
     1368            /* Something was changed, so flush it. */
     1369            Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n",
     1370                  i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
     1371            pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK);
     1372            ASMAtomicWriteSize(&pShwPT->a[i].u, 0);
     1373        }
     1374    }
     1375    return cChanged;
     1376}
     1377
     1378
     1379/**
     1380 * Flush a dirty page
     1381 *
     1382 * @param   pVM             VM Handle.
     1383 * @param   pPool           The pool.
     1384 * @param   idxSlot         Dirty array slot index
     1385 * @param   fForceRemoval   Force removal from the dirty page list
     1386 */
     1387static void pgmPoolFlushDirtyPage(PVM pVM, PPGMPOOL pPool, unsigned idxSlot, bool fForceRemoval = false)
     1388{
     1389    PPGMPOOLPAGE pPage;
     1390    unsigned     idxPage;
     1391
     1392    Assert(idxSlot < RT_ELEMENTS(pPool->aIdxDirtyPages));
     1393    if (pPool->aIdxDirtyPages[idxSlot] == NIL_PGMPOOL_IDX)
     1394        return;
     1395
     1396    idxPage = pPool->aIdxDirtyPages[idxSlot];
     1397    AssertRelease(idxPage != NIL_PGMPOOL_IDX);
     1398    pPage = &pPool->aPages[idxPage];
     1399    Assert(pPage->idx == idxPage);
     1400    Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX && pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
     1401
     1402    AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, idxSlot));
     1403    Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
     1404
     1405    /* Flush those PTEs that have changed. */
     1406    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
     1407    void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1408    void *pvGst;
     1409    int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     1410    unsigned cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst, (PCX86PTPAE)&pPool->aDirtyPages[idxSlot][0]);
     1411    STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
     1412
     1413    /** Note: we might want to consider keeping the dirty page active in case there were many changes. */
     1414
     1415    /* Write protect the page again to catch all write accesses. */
     1416    rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys);
     1417    Assert(rc == VINF_SUCCESS);
     1418    pPage->fDirty         = false;
     1419    pPage->fZeroed        = true;
     1420
     1421    /* This page is likely to be modified again, so reduce the nr of modifications just a bit here. */
     1422    Assert(pPage->cModifications);
     1423    if (cChanges < 4)
     1424        pPage->cModifications = 1;      /* must use > 0 here */
     1425    else
     1426        pPage->cModifications = RT_MAX(1, pPage->cModifications / 2);
     1427
     1428    STAM_COUNTER_INC(&pPool->StatResetDirtyPages);
     1429    if (pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages))
     1430        pPool->idxFreeDirtyPage = idxSlot;
     1431
     1432    pPool->cDirtyPages--;
     1433    pPool->aIdxDirtyPages[idxSlot] = NIL_PGMPOOL_IDX;
     1434    Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
     1435    Log(("Removed dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
     1436}
     1437
     1438/**
     1439 * Add a new dirty page
     1440 *
     1441 * @param   pVM         VM Handle.
     1442 * @param   pPool       The pool.
     1443 * @param   pPage       The page.
     1444 */
     1445void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)
     1446{
     1447    unsigned idxFree;
     1448
     1449    Assert(PGMIsLocked(pVM));
     1450    AssertCompile(RT_ELEMENTS(pPool->aIdxDirtyPages) == 8 || RT_ELEMENTS(pPool->aIdxDirtyPages) == 16);
     1451
     1452    if (pPage->fDirty)
     1453        return;
     1454
     1455    idxFree = pPool->idxFreeDirtyPage;
     1456    Assert(idxFree < RT_ELEMENTS(pPool->aIdxDirtyPages));
     1457    Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX && pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
     1458
     1459    if (pPool->cDirtyPages >= RT_ELEMENTS(pPool->aIdxDirtyPages))
     1460        pgmPoolFlushDirtyPage(pVM, pPool, idxFree, true /* force removal */);
     1461    Assert(pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages));
     1462    AssertMsg(pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX, ("idxFree=%d cDirtyPages=%d\n", idxFree, pPool->cDirtyPages));
     1463
     1464    /* Make a copy of the guest page table as we require valid GCPhys addresses when removing
     1465     * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!)
     1466     */
     1467    void *pvGst;
     1468    int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     1469    memcpy(&pPool->aDirtyPages[idxFree][0], pvGst, PAGE_SIZE);
     1470
     1471    STAM_COUNTER_INC(&pPool->StatDirtyPage);
     1472    Log(("Mark dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree));
     1473    pPage->fDirty                  = true;
     1474    pPage->idxDirty                = idxFree;
     1475    pPool->aIdxDirtyPages[idxFree] = pPage->idx;
     1476    pPool->cDirtyPages++;
     1477
     1478    pPool->idxFreeDirtyPage        = (pPool->idxFreeDirtyPage + 1) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
     1479    if (    pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages)
     1480        &&  pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
     1481    {
     1482        unsigned i;
     1483        for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
     1484        {
     1485            idxFree = (pPool->idxFreeDirtyPage + i) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
     1486            if (pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX)
     1487            {
     1488                pPool->idxFreeDirtyPage = idxFree;
     1489                break;
     1490            }
     1491        }
     1492        Assert(i != RT_ELEMENTS(pPool->aIdxDirtyPages));
     1493    }
     1494
     1495    Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages) || pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX);
     1496    return;
     1497}
     1498
     1499
     1500/**
     1501 * Reset all dirty pages by reinstating page monitoring.
     1502 *
     1503 * @param   pVM             VM Handle.
     1504 * @param   fForceRemoval   Force removal of all dirty pages
     1505 */
     1506void pgmPoolResetDirtyPages(PVM pVM, bool fForceRemoval)
     1507{
     1508    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     1509    Assert(PGMIsLocked(pVM));
     1510    Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
     1511
     1512    if (!pPool->cDirtyPages)
     1513        return;
     1514
     1515    Log(("pgmPoolResetDirtyPages\n"));
     1516    for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
     1517        pgmPoolFlushDirtyPage(pVM, pPool, i, fForceRemoval);
     1518
     1519    pPool->idxFreeDirtyPage = 0;
     1520    if (    pPool->cDirtyPages != RT_ELEMENTS(pPool->aIdxDirtyPages)
     1521        &&  pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
     1522    {
     1523        unsigned i;
     1524        for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
     1525        {
     1526            if (pPool->aIdxDirtyPages[i] == NIL_PGMPOOL_IDX)
     1527            {
     1528                pPool->idxFreeDirtyPage = i;
     1529                break;
     1530            }
     1531        }
     1532        AssertMsg(i != RT_ELEMENTS(pPool->aIdxDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
     1533    }
     1534
     1535    Assert(pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX || pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages));
     1536    return;
     1537}
     1538# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
    12851539#endif  /* PGMPOOL_WITH_MONITORING */
    12861540
     
    18182072        Assert(pPageHead != pPage); Assert(pPageHead->iMonitoredNext != pPage->idx);
    18192073        Assert(pPageHead->iMonitoredPrev != pPage->idx);
     2074
     2075#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     2076        if (pPageHead->fDirty)
     2077            pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPageHead->idxDirty, true /* force removal */);
     2078#endif
     2079
    18202080        pPage->iMonitoredPrev = pPageHead->idx;
    18212081        pPage->iMonitoredNext = pPageHead->iMonitoredNext;
     
    20242284 * @param   pVM     The VM handle.
    20252285 */
    2026 void pgmPoolMonitorModifiedClearAll(PVM pVM)
     2286static void pgmPoolMonitorModifiedClearAll(PVM pVM)
    20272287{
    20282288    pgmLock(pVM);
     
    20312291
    20322292    unsigned cPages = 0; NOREF(cPages);
     2293
     2294#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     2295    pgmPoolResetDirtyPages(pVM, true /* force removal. */);
     2296#endif
     2297
    20332298    uint16_t idx = pPool->iModifiedHead;
    20342299    pPool->iModifiedHead = NIL_PGMPOOL_IDX;
     
    21712436    }
    21722437    paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
     2438#endif
     2439
     2440#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     2441    /* Clear all dirty pages. */
     2442    pPool->idxFreeDirtyPage = 0;
     2443    pPool->cDirtyPages      = 0;
     2444    for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
     2445        pPool->aIdxDirtyPages[i] = NIL_PGMPOOL_IDX;
    21732446#endif
    21742447
     
    39874260    pPage->fZeroed = false;
    39884261
     4262#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     4263    if (pPage->fDirty)
     4264        pgmPoolFlushDirtyPage(pVM, pPool, pPage->idxDirty, true /* force removal */);
     4265#endif
     4266
    39894267#ifdef PGMPOOL_WITH_USER_TRACKING
    39904268    /*
     
    42114489    pPage->fMonitored = false;
    42124490    pPage->fCached = false;
     4491#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     4492    pPage->fDirty = false;
     4493#endif
    42134494    pPage->fReusedFlushPending = false;
    42144495#ifdef PGMPOOL_WITH_MONITORING
     
    43734654        pPage->fSeenNonGlobal = false;
    43744655        pPage->fMonitored = false;
     4656#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     4657        pPage->fDirty     = false;
     4658#endif
    43754659        pPage->fCached    = false;
    43764660        pPage->fReusedFlushPending = false;
     
    44494733    pPool->iAgeHead = NIL_PGMPOOL_IDX;
    44504734    pPool->iAgeTail = NIL_PGMPOOL_IDX;
     4735#endif
     4736
     4737#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     4738    /* Clear all dirty pages. */
     4739    pPool->idxFreeDirtyPage = 0;
     4740    pPool->cDirtyPages      = 0;
     4741    for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
     4742        pPool->aIdxDirtyPages[i] = NIL_PGMPOOL_IDX;
    44514743#endif
    44524744
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette