VirtualBox

Changeset 92167 in vbox for trunk


Ignore:
Timestamp:
Nov 1, 2021 2:11:32 PM (3 years ago)
Author:
vboxsync
Message:

VMM/PGMPhys.cpp: Rearranged the functions a little more by topic to make it easier to find stuff. bugref:10122

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r92163 r92167  
    5555#define PGMPHYS_FREE_PAGE_BATCH_SIZE    128
    5656
     57
     58
     59/*********************************************************************************************************************************
     60*   Reading and Writing Guest Pysical Memory                                                                                     *
     61*********************************************************************************************************************************/
    5762
    5863/*
     
    353358}
    354359
     360
     361/*********************************************************************************************************************************
     362*   Mapping Guest Physical Memory                                                                                                *
     363*********************************************************************************************************************************/
    355364
    356365/**
     
    877886
    878887
     888/**
     889 * Converts a GC physical address to a HC ring-3 pointer, with some
     890 * additional checks.
     891 *
     892 * @returns VBox status code.
     893 * @retval  VINF_SUCCESS on success.
     894 * @retval  VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
     895 *          access handler of some kind.
     896 * @retval  VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
     897 *          accesses or is odd in any way.
     898 * @retval  VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
     899 *
     900 * @param   pVM         The cross context VM structure.
     901 * @param   GCPhys      The GC physical address to convert.  Since this is only
     902 *                      used for filling the REM TLB, the A20 mask must be
     903 *                      applied before calling this API.
     904 * @param   fWritable   Whether write access is required.
     905 * @param   ppv         Where to store the pointer corresponding to GCPhys on
     906 *                      success.
     907 */
     908VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
     909{
     910    PGM_LOCK_VOID(pVM);
     911    PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
     912
     913    PPGMRAMRANGE pRam;
     914    PPGMPAGE pPage;
     915    int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
     916    if (RT_SUCCESS(rc))
     917    {
     918        if (PGM_PAGE_IS_BALLOONED(pPage))
     919            rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
     920        else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
     921            rc = VINF_SUCCESS;
     922        else
     923        {
     924            if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
     925                rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
     926            else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     927            {
     928                /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
     929                 *        in -norawr0 mode. */
     930                if (fWritable)
     931                    rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
     932            }
     933            else
     934            {
     935                /* Temporarily disabled physical handler(s), since the recompiler
     936                   doesn't get notified when it's reset we'll have to pretend it's
     937                   operating normally. */
     938                if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
     939                    rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
     940                else
     941                    rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
     942            }
     943        }
     944        if (RT_SUCCESS(rc))
     945        {
     946            int rc2;
     947
     948            /* Make sure what we return is writable. */
     949            if (fWritable)
     950                switch (PGM_PAGE_GET_STATE(pPage))
     951                {
     952                    case PGM_PAGE_STATE_ALLOCATED:
     953                        break;
     954                    case PGM_PAGE_STATE_BALLOONED:
     955                        AssertFailed();
     956                        break;
     957                    case PGM_PAGE_STATE_ZERO:
     958                    case PGM_PAGE_STATE_SHARED:
     959                        if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
     960                            break;
     961                        RT_FALL_THRU();
     962                    case PGM_PAGE_STATE_WRITE_MONITORED:
     963                        rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
     964                        AssertLogRelRCReturn(rc2, rc2);
     965                        break;
     966                }
     967
     968            /* Get a ring-3 mapping of the address. */
     969            PPGMPAGER3MAPTLBE pTlbe;
     970            rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
     971            AssertLogRelRCReturn(rc2, rc2);
     972            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     973            /** @todo mapping/locking hell; this isn't horribly efficient since
     974             *        pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
     975
     976            Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
     977        }
     978        else
     979            Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
     980
     981        /* else: handler catching all access, no pointer returned. */
     982    }
     983    else
     984        rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
     985
     986    PGM_UNLOCK(pVM);
     987    return rc;
     988}
     989
     990
     991
     992/*********************************************************************************************************************************
     993*   RAM Range Management                                                                                                         *
     994*********************************************************************************************************************************/
     995
    879996#define MAKE_LEAF(a_pNode) \
    880997    do { \
     
    11511268
    11521269/**
    1153  * Frees a range of pages, replacing them with ZERO pages of the specified type.
    1154  *
    1155  * @returns VBox status code.
    1156  * @param   pVM         The cross context VM structure.
    1157  * @param   pRam        The RAM range in which the pages resides.
    1158  * @param   GCPhys      The address of the first page.
    1159  * @param   GCPhysLast  The address of the last page.
    1160  * @param   pvMmio2     Pointer to the ring-3 mapping of any MMIO2 memory that
    1161  *                      will replace the pages we're freeing up.
    1162  */
    1163 static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, void *pvMmio2)
    1164 {
    1165     PGM_LOCK_ASSERT_OWNER(pVM);
    1166 
    1167 #ifdef VBOX_WITH_PGM_NEM_MODE
    1168     /*
    1169      * In simplified memory mode we don't actually free the memory,
    1170      * we just unmap it and let NEM do any unlocking of it.
    1171      */
    1172     if (pVM->pgm.s.fNemMode)
    1173     {
    1174         Assert(VM_IS_NEM_ENABLED(pVM));
    1175         uint32_t const  fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
    1176         uint8_t         u2State    = 0; /* (We don't support UINT8_MAX here.) */
    1177         int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
    1178                                                pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
    1179                                                pvMmio2, &u2State);
    1180         AssertLogRelRCReturn(rc, rc);
    1181 
    1182         /* Iterate the pages. */
    1183         PPGMPAGE pPageDst   = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    1184         uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
    1185         while (cPagesLeft-- > 0)
    1186         {
    1187             rc = pgmPhysFreePage(pVM, NULL, NULL, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
    1188             AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
    1189 
    1190             PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
    1191             PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
    1192 
    1193             GCPhys += PAGE_SIZE;
    1194             pPageDst++;
    1195         }
    1196         return rc;
    1197     }
    1198 #else  /* !VBOX_WITH_PGM_NEM_MODE */
    1199     RT_NOREF(pvMmio2);
    1200 #endif /* !VBOX_WITH_PGM_NEM_MODE */
    1201 
    1202     /*
    1203      * Regular mode.
    1204      */
    1205     /* Prepare. */
    1206     uint32_t            cPendingPages = 0;
    1207     PGMMFREEPAGESREQ    pReq;
    1208     int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
    1209     AssertLogRelRCReturn(rc, rc);
    1210 
    1211 #ifdef VBOX_WITH_NATIVE_NEM
    1212     /* Tell NEM up-front. */
    1213     uint8_t u2State = UINT8_MAX;
    1214     if (VM_IS_NEM_ENABLED(pVM))
    1215     {
    1216         uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
    1217         rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, NULL, pvMmio2, &u2State);
    1218         AssertLogRelRCReturnStmt(rc, GMMR3FreePagesCleanup(pReq), rc);
    1219     }
    1220 #endif
    1221 
    1222     /* Iterate the pages. */
    1223     PPGMPAGE pPageDst   = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    1224     uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
    1225     while (cPagesLeft-- > 0)
    1226     {
    1227         rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
    1228         AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
    1229 
    1230         PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
    1231 #ifdef VBOX_WITH_NATIVE_NEM
    1232         if (u2State != UINT8_MAX)
    1233             PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
    1234 #endif
    1235 
    1236         GCPhys += PAGE_SIZE;
    1237         pPageDst++;
    1238     }
    1239 
    1240     /* Finish pending and cleanup. */
    1241     if (cPendingPages)
    1242     {
    1243         rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
    1244         AssertLogRelRCReturn(rc, rc);
    1245     }
    1246     GMMR3FreePagesCleanup(pReq);
    1247 
    1248     return rc;
    1249 }
    1250 
    1251 #if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
    1252 
    1253 /**
    1254  * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
    1255  *
    1256  * This is only called on one of the EMTs while the other ones are waiting for
    1257  * it to complete this function.
    1258  *
    1259  * @returns VINF_SUCCESS (VBox strict status code).
    1260  * @param   pVM         The cross context VM structure.
    1261  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT. Unused.
    1262  * @param   pvUser      User parameter
    1263  */
    1264 static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
    1265 {
    1266     uintptr_t          *paUser          = (uintptr_t *)pvUser;
    1267     bool                fInflate        = !!paUser[0];
    1268     unsigned            cPages          = paUser[1];
    1269     RTGCPHYS           *paPhysPage      = (RTGCPHYS *)paUser[2];
    1270     uint32_t            cPendingPages   = 0;
    1271     PGMMFREEPAGESREQ    pReq;
    1272     int                 rc;
    1273 
    1274     Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
    1275     PGM_LOCK_VOID(pVM);
    1276 
    1277     if (fInflate)
    1278     {
    1279         /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
    1280         pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
    1281 
    1282         /* Replace pages with ZERO pages. */
    1283         rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
    1284         if (RT_FAILURE(rc))
    1285         {
    1286             PGM_UNLOCK(pVM);
    1287             AssertLogRelRC(rc);
    1288             return rc;
    1289         }
    1290 
    1291         /* Iterate the pages. */
    1292         for (unsigned i = 0; i < cPages; i++)
    1293         {
    1294             PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
    1295             if (    pPage == NULL
    1296                 ||  PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
    1297             {
    1298                 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
    1299                 break;
    1300             }
    1301 
    1302             LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
    1303 
    1304             /* Flush the shadow PT if this page was previously used as a guest page table. */
    1305             pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
    1306 
    1307             rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
    1308             if (RT_FAILURE(rc))
    1309             {
    1310                 PGM_UNLOCK(pVM);
    1311                 AssertLogRelRC(rc);
    1312                 return rc;
    1313             }
    1314             Assert(PGM_PAGE_IS_ZERO(pPage));
    1315             PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
    1316         }
    1317 
    1318         if (cPendingPages)
    1319         {
    1320             rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
    1321             if (RT_FAILURE(rc))
    1322             {
    1323                 PGM_UNLOCK(pVM);
    1324                 AssertLogRelRC(rc);
    1325                 return rc;
    1326             }
    1327         }
    1328         GMMR3FreePagesCleanup(pReq);
    1329     }
    1330     else
    1331     {
    1332         /* Iterate the pages. */
    1333         for (unsigned i = 0; i < cPages; i++)
    1334         {
    1335             PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
    1336             AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
    1337 
    1338             LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
    1339 
    1340             Assert(PGM_PAGE_IS_BALLOONED(pPage));
    1341 
    1342             /* Change back to zero page.  (NEM does not need to be informed.) */
    1343             PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
    1344         }
    1345 
    1346         /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
    1347     }
    1348 
    1349     /* Notify GMM about the balloon change. */
    1350     rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
    1351     if (RT_SUCCESS(rc))
    1352     {
    1353         if (!fInflate)
    1354         {
    1355             Assert(pVM->pgm.s.cBalloonedPages >= cPages);
    1356             pVM->pgm.s.cBalloonedPages -= cPages;
    1357         }
    1358         else
    1359             pVM->pgm.s.cBalloonedPages += cPages;
    1360     }
    1361 
    1362     PGM_UNLOCK(pVM);
    1363 
    1364     /* Flush the recompiler's TLB as well. */
    1365     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1366         CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    1367 
    1368     AssertLogRelRC(rc);
    1369     return rc;
    1370 }
    1371 
    1372 
    1373 /**
    1374  * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
    1375  *
    1376  * @returns VBox status code.
    1377  * @param   pVM         The cross context VM structure.
    1378  * @param   fInflate    Inflate or deflate memory balloon
    1379  * @param   cPages      Number of pages to free
    1380  * @param   paPhysPage  Array of guest physical addresses
    1381  */
    1382 static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
    1383 {
    1384     uintptr_t paUser[3];
    1385 
    1386     paUser[0] = fInflate;
    1387     paUser[1] = cPages;
    1388     paUser[2] = (uintptr_t)paPhysPage;
    1389     int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
    1390     AssertRC(rc);
    1391 
    1392     /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
    1393     RTMemFree(paPhysPage);
    1394 }
    1395 
    1396 #endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
    1397 
    1398 /**
    1399  * Inflate or deflate a memory balloon
    1400  *
    1401  * @returns VBox status code.
    1402  * @param   pVM         The cross context VM structure.
    1403  * @param   fInflate    Inflate or deflate memory balloon
    1404  * @param   cPages      Number of pages to free
    1405  * @param   paPhysPage  Array of guest physical addresses
    1406  */
    1407 VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
    1408 {
    1409     /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
    1410 #if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
    1411     int rc;
    1412 
    1413     /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
    1414     AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
    1415 
    1416     /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
    1417      * In the SMP case we post a request packet to postpone the job.
    1418      */
    1419     if (pVM->cCpus > 1)
    1420     {
    1421         unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
    1422         RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
    1423         AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
    1424 
    1425         memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
    1426 
    1427         rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
    1428         AssertRC(rc);
    1429     }
    1430     else
    1431     {
    1432         uintptr_t paUser[3];
    1433 
    1434         paUser[0] = fInflate;
    1435         paUser[1] = cPages;
    1436         paUser[2] = (uintptr_t)paPhysPage;
    1437         rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
    1438         AssertRC(rc);
    1439     }
    1440     return rc;
    1441 
    1442 #else
    1443     NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
    1444     return VERR_NOT_IMPLEMENTED;
    1445 #endif
    1446 }
    1447 
    1448 
    1449 /**
    1450  * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
    1451  * physical RAM.
    1452  *
    1453  * This is only called on one of the EMTs while the other ones are waiting for
    1454  * it to complete this function.
    1455  *
    1456  * @returns VINF_SUCCESS (VBox strict status code).
    1457  * @param   pVM         The cross context VM structure.
    1458  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT. Unused.
    1459  * @param   pvUser      User parameter, unused.
    1460  */
    1461 static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
    1462 {
    1463     int rc = VINF_SUCCESS;
    1464     NOREF(pvUser); NOREF(pVCpu);
    1465 
    1466     PGM_LOCK_VOID(pVM);
    1467 #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
    1468     pgmPoolResetDirtyPages(pVM);
    1469 #endif
    1470 
    1471     /** @todo pointless to write protect the physical page pointed to by RSP. */
    1472 
    1473     for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
    1474          pRam;
    1475          pRam = pRam->CTX_SUFF(pNext))
    1476     {
    1477         uint32_t cPages = pRam->cb >> PAGE_SHIFT;
    1478         for (uint32_t iPage = 0; iPage < cPages; iPage++)
    1479         {
    1480             PPGMPAGE    pPage = &pRam->aPages[iPage];
    1481             PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
    1482 
    1483             if (    RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
    1484                 ||  enmPageType == PGMPAGETYPE_MMIO2)
    1485             {
    1486                 /*
    1487                  * A RAM page.
    1488                  */
    1489                 switch (PGM_PAGE_GET_STATE(pPage))
    1490                 {
    1491                     case PGM_PAGE_STATE_ALLOCATED:
    1492                         /** @todo Optimize this: Don't always re-enable write
    1493                          * monitoring if the page is known to be very busy. */
    1494                         if (PGM_PAGE_IS_WRITTEN_TO(pPage))
    1495                             PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
    1496 
    1497                         pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
    1498                         break;
    1499 
    1500                     case PGM_PAGE_STATE_SHARED:
    1501                         AssertFailed();
    1502                         break;
    1503 
    1504                     case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
    1505                     default:
    1506                         break;
    1507                 }
    1508             }
    1509         }
    1510     }
    1511     pgmR3PoolWriteProtectPages(pVM);
    1512     PGM_INVL_ALL_VCPU_TLBS(pVM);
    1513     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    1514         CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    1515 
    1516     PGM_UNLOCK(pVM);
    1517     return rc;
    1518 }
    1519 
    1520 /**
    1521  * Protect all physical RAM to monitor writes
    1522  *
    1523  * @returns VBox status code.
    1524  * @param   pVM         The cross context VM structure.
    1525  */
    1526 VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
    1527 {
    1528     VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
    1529 
    1530     int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
    1531     AssertRC(rc);
    1532     return rc;
    1533 }
    1534 
    1535 
    1536 /**
    15371270 * Gets the number of ram ranges.
    15381271 *
     
    15931326
    15941327
    1595 /**
    1596  * Query the amount of free memory inside VMMR0
     1328/*********************************************************************************************************************************
     1329*   RAM                                                                                                                          *
     1330*********************************************************************************************************************************/
     1331
     1332/**
     1333 * Frees the specified RAM page and replaces it with the ZERO page.
     1334 *
     1335 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
     1336 *
     1337 * @param   pVM             The cross context VM structure.
     1338 * @param   pReq            Pointer to the request.  This is NULL when doing a
     1339 *                          bulk free in NEM memory mode.
     1340 * @param   pcPendingPages  Where the number of pages waiting to be freed are
     1341 *                          kept.  This will normally be incremented.  This is
     1342 *                          NULL when doing a bulk free in NEM memory mode.
     1343 * @param   pPage           Pointer to the page structure.
     1344 * @param   GCPhys          The guest physical address of the page, if applicable.
     1345 * @param   enmNewType      New page type for NEM notification, since several
     1346 *                          callers will change the type upon successful return.
     1347 *
     1348 * @remarks The caller must own the PGM lock.
     1349 */
     1350int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
     1351                    PGMPAGETYPE enmNewType)
     1352{
     1353    /*
     1354     * Assert sanity.
     1355     */
     1356    PGM_LOCK_ASSERT_OWNER(pVM);
     1357    if (RT_UNLIKELY(    PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
     1358                    &&  PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
     1359    {
     1360        AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
     1361        return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
     1362    }
     1363
     1364    /** @todo What about ballooning of large pages??! */
     1365    Assert(   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
     1366           && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     1367
     1368    if (    PGM_PAGE_IS_ZERO(pPage)
     1369        ||  PGM_PAGE_IS_BALLOONED(pPage))
     1370        return VINF_SUCCESS;
     1371
     1372    const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
     1373    Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
     1374    if (RT_UNLIKELY(!PGM_IS_IN_NEM_MODE(pVM)
     1375                    ?    idPage == NIL_GMM_PAGEID
     1376                      ||  idPage > GMM_PAGEID_LAST
     1377                      ||  PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID
     1378                    :    idPage != NIL_GMM_PAGEID))
     1379    {
     1380        AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
     1381        return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
     1382    }
     1383#ifdef VBOX_WITH_NATIVE_NEM
     1384    const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
     1385#endif
     1386
     1387    /* update page count stats. */
     1388    if (PGM_PAGE_IS_SHARED(pPage))
     1389        pVM->pgm.s.cSharedPages--;
     1390    else
     1391        pVM->pgm.s.cPrivatePages--;
     1392    pVM->pgm.s.cZeroPages++;
     1393
     1394    /* Deal with write monitored pages. */
     1395    if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
     1396    {
     1397        PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
     1398        pVM->pgm.s.cWrittenToPages++;
     1399    }
     1400
     1401    /*
     1402     * pPage = ZERO page.
     1403     */
     1404    PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
     1405    PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
     1406    PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
     1407    PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
     1408    PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
     1409    PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
     1410
     1411    /* Flush physical page map TLB entry. */
     1412    pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
     1413
     1414#ifdef VBOX_WITH_PGM_NEM_MODE
     1415    /*
     1416     * Skip the rest if we're doing a bulk free in NEM memory mode.
     1417     */
     1418    if (!pReq)
     1419        return VINF_SUCCESS;
     1420    AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
     1421#endif
     1422
     1423#ifdef VBOX_WITH_NATIVE_NEM
     1424    /* Notify NEM. */
     1425    /** @todo Remove this one? */
     1426    if (VM_IS_NEM_ENABLED(pVM))
     1427    {
     1428        uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
     1429        NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, pVM->pgm.s.pvZeroPgR3,
     1430                                   pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
     1431        PGM_PAGE_SET_NEM_STATE(pPage, u2State);
     1432    }
     1433#else
     1434    RT_NOREF(enmNewType);
     1435#endif
     1436
     1437    /*
     1438     * Make sure it's not in the handy page array.
     1439     */
     1440    for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
     1441    {
     1442        if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
     1443        {
     1444            pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
     1445            break;
     1446        }
     1447        if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
     1448        {
     1449            pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
     1450            break;
     1451        }
     1452    }
     1453
     1454    /*
     1455     * Push it onto the page array.
     1456     */
     1457    uint32_t iPage = *pcPendingPages;
     1458    Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
     1459    *pcPendingPages += 1;
     1460
     1461    pReq->aPages[iPage].idPage = idPage;
     1462
     1463    if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
     1464        return VINF_SUCCESS;
     1465
     1466    /*
     1467     * Flush the pages.
     1468     */
     1469    int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
     1470    if (RT_SUCCESS(rc))
     1471    {
     1472        GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
     1473        *pcPendingPages = 0;
     1474    }
     1475    return rc;
     1476}
     1477
     1478
     1479/**
     1480 * Frees a range of pages, replacing them with ZERO pages of the specified type.
    15971481 *
    15981482 * @returns VBox status code.
    1599  * @param   pUVM                The user mode VM handle.
    1600  * @param   pcbAllocMem         Where to return the amount of memory allocated
    1601  *                              by VMs.
    1602  * @param   pcbFreeMem          Where to return the amount of memory that is
    1603  *                              allocated from the host but not currently used
    1604  *                              by any VMs.
    1605  * @param   pcbBallonedMem      Where to return the sum of memory that is
    1606  *                              currently ballooned by the VMs.
    1607  * @param   pcbSharedMem        Where to return the amount of memory that is
    1608  *                              currently shared.
    1609  */
    1610 VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
    1611                                            uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
    1612 {
    1613     UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
    1614     VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
    1615 
    1616     uint64_t cAllocPages   = 0;
    1617     uint64_t cFreePages    = 0;
    1618     uint64_t cBalloonPages = 0;
    1619     uint64_t cSharedPages  = 0;
    1620     int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
    1621     AssertRCReturn(rc, rc);
    1622 
    1623     if (pcbAllocMem)
    1624         *pcbAllocMem    = cAllocPages * _4K;
    1625 
    1626     if (pcbFreeMem)
    1627         *pcbFreeMem     = cFreePages * _4K;
    1628 
    1629     if (pcbBallonedMem)
    1630         *pcbBallonedMem = cBalloonPages * _4K;
    1631 
    1632     if (pcbSharedMem)
    1633         *pcbSharedMem   = cSharedPages * _4K;
    1634 
    1635     Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
    1636          cAllocPages, cFreePages, cBalloonPages, cSharedPages));
    1637     return VINF_SUCCESS;
    1638 }
    1639 
    1640 
    1641 /**
    1642  * Query memory stats for the VM.
    1643  *
    1644  * @returns VBox status code.
    1645  * @param   pUVM                The user mode VM handle.
    1646  * @param   pcbTotalMem         Where to return total amount memory the VM may
    1647  *                              possibly use.
    1648  * @param   pcbPrivateMem       Where to return the amount of private memory
    1649  *                              currently allocated.
    1650  * @param   pcbSharedMem        Where to return the amount of actually shared
    1651  *                              memory currently used by the VM.
    1652  * @param   pcbZeroMem          Where to return the amount of memory backed by
    1653  *                              zero pages.
    1654  *
    1655  * @remarks The total mem is normally larger than the sum of the three
    1656  *          components.  There are two reasons for this, first the amount of
    1657  *          shared memory is what we're sure is shared instead of what could
    1658  *          possibly be shared with someone.  Secondly, because the total may
    1659  *          include some pure MMIO pages that doesn't go into any of the three
    1660  *          sub-counts.
    1661  *
    1662  * @todo Why do we return reused shared pages instead of anything that could
    1663  *       potentially be shared?  Doesn't this mean the first VM gets a much
    1664  *       lower number of shared pages?
    1665  */
    1666 VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
    1667                                      uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
    1668 {
    1669     UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
    1670     PVM pVM = pUVM->pVM;
    1671     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
    1672 
    1673     if (pcbTotalMem)
    1674         *pcbTotalMem    = (uint64_t)pVM->pgm.s.cAllPages            * PAGE_SIZE;
    1675 
    1676     if (pcbPrivateMem)
    1677         *pcbPrivateMem  = (uint64_t)pVM->pgm.s.cPrivatePages        * PAGE_SIZE;
    1678 
    1679     if (pcbSharedMem)
    1680         *pcbSharedMem   = (uint64_t)pVM->pgm.s.cReusedSharedPages   * PAGE_SIZE;
    1681 
    1682     if (pcbZeroMem)
    1683         *pcbZeroMem     = (uint64_t)pVM->pgm.s.cZeroPages           * PAGE_SIZE;
    1684 
    1685     Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
    1686     return VINF_SUCCESS;
     1483 * @param   pVM         The cross context VM structure.
     1484 * @param   pRam        The RAM range in which the pages resides.
     1485 * @param   GCPhys      The address of the first page.
     1486 * @param   GCPhysLast  The address of the last page.
     1487 * @param   pvMmio2     Pointer to the ring-3 mapping of any MMIO2 memory that
     1488 *                      will replace the pages we're freeing up.
     1489 */
     1490static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, void *pvMmio2)
     1491{
     1492    PGM_LOCK_ASSERT_OWNER(pVM);
     1493
     1494#ifdef VBOX_WITH_PGM_NEM_MODE
     1495    /*
     1496     * In simplified memory mode we don't actually free the memory,
     1497     * we just unmap it and let NEM do any unlocking of it.
     1498     */
     1499    if (pVM->pgm.s.fNemMode)
     1500    {
     1501        Assert(VM_IS_NEM_ENABLED(pVM));
     1502        uint32_t const  fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
     1503        uint8_t         u2State    = 0; /* (We don't support UINT8_MAX here.) */
     1504        int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
     1505                                               pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
     1506                                               pvMmio2, &u2State);
     1507        AssertLogRelRCReturn(rc, rc);
     1508
     1509        /* Iterate the pages. */
     1510        PPGMPAGE pPageDst   = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     1511        uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
     1512        while (cPagesLeft-- > 0)
     1513        {
     1514            rc = pgmPhysFreePage(pVM, NULL, NULL, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
     1515            AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
     1516
     1517            PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
     1518            PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
     1519
     1520            GCPhys += PAGE_SIZE;
     1521            pPageDst++;
     1522        }
     1523        return rc;
     1524    }
     1525#else  /* !VBOX_WITH_PGM_NEM_MODE */
     1526    RT_NOREF(pvMmio2);
     1527#endif /* !VBOX_WITH_PGM_NEM_MODE */
     1528
     1529    /*
     1530     * Regular mode.
     1531     */
     1532    /* Prepare. */
     1533    uint32_t            cPendingPages = 0;
     1534    PGMMFREEPAGESREQ    pReq;
     1535    int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
     1536    AssertLogRelRCReturn(rc, rc);
     1537
     1538#ifdef VBOX_WITH_NATIVE_NEM
     1539    /* Tell NEM up-front. */
     1540    uint8_t u2State = UINT8_MAX;
     1541    if (VM_IS_NEM_ENABLED(pVM))
     1542    {
     1543        uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
     1544        rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, NULL, pvMmio2, &u2State);
     1545        AssertLogRelRCReturnStmt(rc, GMMR3FreePagesCleanup(pReq), rc);
     1546    }
     1547#endif
     1548
     1549    /* Iterate the pages. */
     1550    PPGMPAGE pPageDst   = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     1551    uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
     1552    while (cPagesLeft-- > 0)
     1553    {
     1554        rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
     1555        AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
     1556
     1557        PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
     1558#ifdef VBOX_WITH_NATIVE_NEM
     1559        if (u2State != UINT8_MAX)
     1560            PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
     1561#endif
     1562
     1563        GCPhys += PAGE_SIZE;
     1564        pPageDst++;
     1565    }
     1566
     1567    /* Finish pending and cleanup. */
     1568    if (cPendingPages)
     1569    {
     1570        rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
     1571        AssertLogRelRCReturn(rc, rc);
     1572    }
     1573    GMMR3FreePagesCleanup(pReq);
     1574
     1575    return rc;
    16871576}
    16881577
     
    23482237}
    23492238
     2239
     2240
     2241/*********************************************************************************************************************************
     2242*   MMIO                                                                                                                         *
     2243*********************************************************************************************************************************/
    23502244
    23512245/**
     
    42314125}
    42324126
     4127
     4128
     4129/*********************************************************************************************************************************
     4130*   ROM                                                                                                                          *
     4131*********************************************************************************************************************************/
    42334132
    42344133/**
     
    50634962
    50644963
    5065 /**
    5066  * Sets the Address Gate 20 state.
    5067  *
    5068  * @param   pVCpu       The cross context virtual CPU structure.
    5069  * @param   fEnable     True if the gate should be enabled.
    5070  *                      False if the gate should be disabled.
    5071  */
    5072 VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
    5073 {
    5074     LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
    5075     if (pVCpu->pgm.s.fA20Enabled != fEnable)
    5076     {
    5077 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5078         PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    5079         if (   CPUMIsGuestInVmxRootMode(pCtx)
    5080             && !fEnable)
    5081         {
    5082             Log(("Cannot enter A20M mode while in VMX root mode\n"));
    5083             return;
    5084         }
     4964
     4965/*********************************************************************************************************************************
     4966*   Ballooning                                                                                                                   *
     4967*********************************************************************************************************************************/
     4968
     4969#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
     4970
     4971/**
     4972 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
     4973 *
     4974 * This is only called on one of the EMTs while the other ones are waiting for
     4975 * it to complete this function.
     4976 *
     4977 * @returns VINF_SUCCESS (VBox strict status code).
     4978 * @param   pVM         The cross context VM structure.
     4979 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT. Unused.
     4980 * @param   pvUser      User parameter
     4981 */
     4982static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
     4983{
     4984    uintptr_t          *paUser          = (uintptr_t *)pvUser;
     4985    bool                fInflate        = !!paUser[0];
     4986    unsigned            cPages          = paUser[1];
     4987    RTGCPHYS           *paPhysPage      = (RTGCPHYS *)paUser[2];
     4988    uint32_t            cPendingPages   = 0;
     4989    PGMMFREEPAGESREQ    pReq;
     4990    int                 rc;
     4991
     4992    Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
     4993    PGM_LOCK_VOID(pVM);
     4994
     4995    if (fInflate)
     4996    {
     4997        /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
     4998        pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
     4999
     5000        /* Replace pages with ZERO pages. */
     5001        rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
     5002        if (RT_FAILURE(rc))
     5003        {
     5004            PGM_UNLOCK(pVM);
     5005            AssertLogRelRC(rc);
     5006            return rc;
     5007        }
     5008
     5009        /* Iterate the pages. */
     5010        for (unsigned i = 0; i < cPages; i++)
     5011        {
     5012            PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
     5013            if (    pPage == NULL
     5014                ||  PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
     5015            {
     5016                Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
     5017                break;
     5018            }
     5019
     5020            LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
     5021
     5022            /* Flush the shadow PT if this page was previously used as a guest page table. */
     5023            pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
     5024
     5025            rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
     5026            if (RT_FAILURE(rc))
     5027            {
     5028                PGM_UNLOCK(pVM);
     5029                AssertLogRelRC(rc);
     5030                return rc;
     5031            }
     5032            Assert(PGM_PAGE_IS_ZERO(pPage));
     5033            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
     5034        }
     5035
     5036        if (cPendingPages)
     5037        {
     5038            rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
     5039            if (RT_FAILURE(rc))
     5040            {
     5041                PGM_UNLOCK(pVM);
     5042                AssertLogRelRC(rc);
     5043                return rc;
     5044            }
     5045        }
     5046        GMMR3FreePagesCleanup(pReq);
     5047    }
     5048    else
     5049    {
     5050        /* Iterate the pages. */
     5051        for (unsigned i = 0; i < cPages; i++)
     5052        {
     5053            PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
     5054            AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
     5055
     5056            LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
     5057
     5058            Assert(PGM_PAGE_IS_BALLOONED(pPage));
     5059
     5060            /* Change back to zero page.  (NEM does not need to be informed.) */
     5061            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
     5062        }
     5063
     5064        /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
     5065    }
     5066
     5067    /* Notify GMM about the balloon change. */
     5068    rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
     5069    if (RT_SUCCESS(rc))
     5070    {
     5071        if (!fInflate)
     5072        {
     5073            Assert(pVM->pgm.s.cBalloonedPages >= cPages);
     5074            pVM->pgm.s.cBalloonedPages -= cPages;
     5075        }
     5076        else
     5077            pVM->pgm.s.cBalloonedPages += cPages;
     5078    }
     5079
     5080    PGM_UNLOCK(pVM);
     5081
     5082    /* Flush the recompiler's TLB as well. */
     5083    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     5084        CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     5085
     5086    AssertLogRelRC(rc);
     5087    return rc;
     5088}
     5089
     5090
     5091/**
     5092 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
     5093 *
     5094 * @returns VBox status code.
     5095 * @param   pVM         The cross context VM structure.
     5096 * @param   fInflate    Inflate or deflate memory balloon
     5097 * @param   cPages      Number of pages to free
     5098 * @param   paPhysPage  Array of guest physical addresses
     5099 */
     5100static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
     5101{
     5102    uintptr_t paUser[3];
     5103
     5104    paUser[0] = fInflate;
     5105    paUser[1] = cPages;
     5106    paUser[2] = (uintptr_t)paPhysPage;
     5107    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
     5108    AssertRC(rc);
     5109
     5110    /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
     5111    RTMemFree(paPhysPage);
     5112}
     5113
     5114#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
     5115
     5116/**
     5117 * Inflate or deflate a memory balloon
     5118 *
     5119 * @returns VBox status code.
     5120 * @param   pVM         The cross context VM structure.
     5121 * @param   fInflate    Inflate or deflate memory balloon
     5122 * @param   cPages      Number of pages to free
     5123 * @param   paPhysPage  Array of guest physical addresses
     5124 */
     5125VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
     5126{
     5127    /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
     5128#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
     5129    int rc;
     5130
     5131    /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
     5132    AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
     5133
     5134    /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
     5135     * In the SMP case we post a request packet to postpone the job.
     5136     */
     5137    if (pVM->cCpus > 1)
     5138    {
     5139        unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
     5140        RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
     5141        AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
     5142
     5143        memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
     5144
     5145        rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
     5146        AssertRC(rc);
     5147    }
     5148    else
     5149    {
     5150        uintptr_t paUser[3];
     5151
     5152        paUser[0] = fInflate;
     5153        paUser[1] = cPages;
     5154        paUser[2] = (uintptr_t)paPhysPage;
     5155        rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
     5156        AssertRC(rc);
     5157    }
     5158    return rc;
     5159
     5160#else
     5161    NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
     5162    return VERR_NOT_IMPLEMENTED;
    50855163#endif
    5086         pVCpu->pgm.s.fA20Enabled = fEnable;
    5087         pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
    5088         if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)))
    5089             NEMR3NotifySetA20(pVCpu, fEnable);
    5090 #ifdef PGM_WITH_A20
    5091         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    5092         pgmR3RefreshShadowModeAfterA20Change(pVCpu);
    5093         HMFlushTlb(pVCpu);
     5164}
     5165
     5166
     5167/*********************************************************************************************************************************
     5168*   Write Monitoring                                                                                                             *
     5169*********************************************************************************************************************************/
     5170
     5171/**
     5172 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
     5173 * physical RAM.
     5174 *
     5175 * This is only called on one of the EMTs while the other ones are waiting for
     5176 * it to complete this function.
     5177 *
     5178 * @returns VINF_SUCCESS (VBox strict status code).
     5179 * @param   pVM         The cross context VM structure.
     5180 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT. Unused.
     5181 * @param   pvUser      User parameter, unused.
     5182 */
     5183static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
     5184{
     5185    int rc = VINF_SUCCESS;
     5186    NOREF(pvUser); NOREF(pVCpu);
     5187
     5188    PGM_LOCK_VOID(pVM);
     5189#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     5190    pgmPoolResetDirtyPages(pVM);
    50945191#endif
    5095         IEMTlbInvalidateAllPhysical(pVCpu);
    5096         STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
    5097     }
    5098 }
    5099 
     5192
     5193    /** @todo pointless to write protect the physical page pointed to by RSP. */
     5194
     5195    for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
     5196         pRam;
     5197         pRam = pRam->CTX_SUFF(pNext))
     5198    {
     5199        uint32_t cPages = pRam->cb >> PAGE_SHIFT;
     5200        for (uint32_t iPage = 0; iPage < cPages; iPage++)
     5201        {
     5202            PPGMPAGE    pPage = &pRam->aPages[iPage];
     5203            PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
     5204
     5205            if (    RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
     5206                ||  enmPageType == PGMPAGETYPE_MMIO2)
     5207            {
     5208                /*
     5209                 * A RAM page.
     5210                 */
     5211                switch (PGM_PAGE_GET_STATE(pPage))
     5212                {
     5213                    case PGM_PAGE_STATE_ALLOCATED:
     5214                        /** @todo Optimize this: Don't always re-enable write
     5215                         * monitoring if the page is known to be very busy. */
     5216                        if (PGM_PAGE_IS_WRITTEN_TO(pPage))
     5217                            PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
     5218
     5219                        pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
     5220                        break;
     5221
     5222                    case PGM_PAGE_STATE_SHARED:
     5223                        AssertFailed();
     5224                        break;
     5225
     5226                    case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
     5227                    default:
     5228                        break;
     5229                }
     5230            }
     5231        }
     5232    }
     5233    pgmR3PoolWriteProtectPages(pVM);
     5234    PGM_INVL_ALL_VCPU_TLBS(pVM);
     5235    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     5236        CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     5237
     5238    PGM_UNLOCK(pVM);
     5239    return rc;
     5240}
     5241
     5242/**
     5243 * Protect all physical RAM to monitor writes
     5244 *
     5245 * @returns VBox status code.
     5246 * @param   pVM         The cross context VM structure.
     5247 */
     5248VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
     5249{
     5250    VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
     5251
     5252    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
     5253    AssertRC(rc);
     5254    return rc;
     5255}
     5256
     5257
     5258/*********************************************************************************************************************************
     5259*   Stats.                                                                                                                       *
     5260*********************************************************************************************************************************/
     5261
     5262/**
     5263 * Query the amount of free memory inside VMMR0
     5264 *
     5265 * @returns VBox status code.
     5266 * @param   pUVM                The user mode VM handle.
     5267 * @param   pcbAllocMem         Where to return the amount of memory allocated
     5268 *                              by VMs.
     5269 * @param   pcbFreeMem          Where to return the amount of memory that is
     5270 *                              allocated from the host but not currently used
     5271 *                              by any VMs.
     5272 * @param   pcbBallonedMem      Where to return the sum of memory that is
     5273 *                              currently ballooned by the VMs.
     5274 * @param   pcbSharedMem        Where to return the amount of memory that is
     5275 *                              currently shared.
     5276 */
     5277VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
     5278                                           uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
     5279{
     5280    UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
     5281    VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
     5282
     5283    uint64_t cAllocPages   = 0;
     5284    uint64_t cFreePages    = 0;
     5285    uint64_t cBalloonPages = 0;
     5286    uint64_t cSharedPages  = 0;
     5287    int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
     5288    AssertRCReturn(rc, rc);
     5289
     5290    if (pcbAllocMem)
     5291        *pcbAllocMem    = cAllocPages * _4K;
     5292
     5293    if (pcbFreeMem)
     5294        *pcbFreeMem     = cFreePages * _4K;
     5295
     5296    if (pcbBallonedMem)
     5297        *pcbBallonedMem = cBalloonPages * _4K;
     5298
     5299    if (pcbSharedMem)
     5300        *pcbSharedMem   = cSharedPages * _4K;
     5301
     5302    Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
     5303         cAllocPages, cFreePages, cBalloonPages, cSharedPages));
     5304    return VINF_SUCCESS;
     5305}
     5306
     5307
     5308/**
     5309 * Query memory stats for the VM.
     5310 *
     5311 * @returns VBox status code.
     5312 * @param   pUVM                The user mode VM handle.
     5313 * @param   pcbTotalMem         Where to return total amount memory the VM may
     5314 *                              possibly use.
     5315 * @param   pcbPrivateMem       Where to return the amount of private memory
     5316 *                              currently allocated.
     5317 * @param   pcbSharedMem        Where to return the amount of actually shared
     5318 *                              memory currently used by the VM.
     5319 * @param   pcbZeroMem          Where to return the amount of memory backed by
     5320 *                              zero pages.
     5321 *
     5322 * @remarks The total mem is normally larger than the sum of the three
     5323 *          components.  There are two reasons for this, first the amount of
     5324 *          shared memory is what we're sure is shared instead of what could
     5325 *          possibly be shared with someone.  Secondly, because the total may
     5326 *          include some pure MMIO pages that doesn't go into any of the three
     5327 *          sub-counts.
     5328 *
     5329 * @todo Why do we return reused shared pages instead of anything that could
     5330 *       potentially be shared?  Doesn't this mean the first VM gets a much
     5331 *       lower number of shared pages?
     5332 */
     5333VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
     5334                                     uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
     5335{
     5336    UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
     5337    PVM pVM = pUVM->pVM;
     5338    VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
     5339
     5340    if (pcbTotalMem)
     5341        *pcbTotalMem    = (uint64_t)pVM->pgm.s.cAllPages            * PAGE_SIZE;
     5342
     5343    if (pcbPrivateMem)
     5344        *pcbPrivateMem  = (uint64_t)pVM->pgm.s.cPrivatePages        * PAGE_SIZE;
     5345
     5346    if (pcbSharedMem)
     5347        *pcbSharedMem   = (uint64_t)pVM->pgm.s.cReusedSharedPages   * PAGE_SIZE;
     5348
     5349    if (pcbZeroMem)
     5350        *pcbZeroMem     = (uint64_t)pVM->pgm.s.cZeroPages           * PAGE_SIZE;
     5351
     5352    Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
     5353    return VINF_SUCCESS;
     5354}
     5355
     5356
     5357
     5358/*********************************************************************************************************************************
     5359*   Chunk Mappings and Page Allocation                                                                                           *
     5360*********************************************************************************************************************************/
    51005361
    51015362/**
     
    57406001
    57416002
    5742 /**
    5743  * Frees the specified RAM page and replaces it with the ZERO page.
    5744  *
    5745  * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
    5746  *
    5747  * @param   pVM             The cross context VM structure.
    5748  * @param   pReq            Pointer to the request.  This is NULL when doing a
    5749  *                          bulk free in NEM memory mode.
    5750  * @param   pcPendingPages  Where the number of pages waiting to be freed are
    5751  *                          kept.  This will normally be incremented.  This is
    5752  *                          NULL when doing a bulk free in NEM memory mode.
    5753  * @param   pPage           Pointer to the page structure.
    5754  * @param   GCPhys          The guest physical address of the page, if applicable.
    5755  * @param   enmNewType      New page type for NEM notification, since several
    5756  *                          callers will change the type upon successful return.
    5757  *
    5758  * @remarks The caller must own the PGM lock.
    5759  */
    5760 int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
    5761                     PGMPAGETYPE enmNewType)
    5762 {
    5763     /*
    5764      * Assert sanity.
    5765      */
    5766     PGM_LOCK_ASSERT_OWNER(pVM);
    5767     if (RT_UNLIKELY(    PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
    5768                     &&  PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
    5769     {
    5770         AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
    5771         return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
    5772     }
    5773 
    5774     /** @todo What about ballooning of large pages??! */
    5775     Assert(   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
    5776            && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
    5777 
    5778     if (    PGM_PAGE_IS_ZERO(pPage)
    5779         ||  PGM_PAGE_IS_BALLOONED(pPage))
    5780         return VINF_SUCCESS;
    5781 
    5782     const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
    5783     Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
    5784     if (RT_UNLIKELY(!PGM_IS_IN_NEM_MODE(pVM)
    5785                     ?    idPage == NIL_GMM_PAGEID
    5786                       ||  idPage > GMM_PAGEID_LAST
    5787                       ||  PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID
    5788                     :    idPage != NIL_GMM_PAGEID))
    5789     {
    5790         AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
    5791         return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
    5792     }
    5793 #ifdef VBOX_WITH_NATIVE_NEM
    5794     const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
     6003/*********************************************************************************************************************************
     6004*   Other Stuff                                                                                                                  *
     6005*********************************************************************************************************************************/
     6006
     6007/**
     6008 * Sets the Address Gate 20 state.
     6009 *
     6010 * @param   pVCpu       The cross context virtual CPU structure.
     6011 * @param   fEnable     True if the gate should be enabled.
     6012 *                      False if the gate should be disabled.
     6013 */
     6014VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
     6015{
     6016    LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
     6017    if (pVCpu->pgm.s.fA20Enabled != fEnable)
     6018    {
     6019#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     6020        PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     6021        if (   CPUMIsGuestInVmxRootMode(pCtx)
     6022            && !fEnable)
     6023        {
     6024            Log(("Cannot enter A20M mode while in VMX root mode\n"));
     6025            return;
     6026        }
    57956027#endif
    5796 
    5797     /* update page count stats. */
    5798     if (PGM_PAGE_IS_SHARED(pPage))
    5799         pVM->pgm.s.cSharedPages--;
    5800     else
    5801         pVM->pgm.s.cPrivatePages--;
    5802     pVM->pgm.s.cZeroPages++;
    5803 
    5804     /* Deal with write monitored pages. */
    5805     if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
    5806     {
    5807         PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
    5808         pVM->pgm.s.cWrittenToPages++;
    5809     }
    5810 
    5811     /*
    5812      * pPage = ZERO page.
    5813      */
    5814     PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
    5815     PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
    5816     PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
    5817     PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
    5818     PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
    5819     PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
    5820 
    5821     /* Flush physical page map TLB entry. */
    5822     pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
    5823 
    5824 #ifdef VBOX_WITH_PGM_NEM_MODE
    5825     /*
    5826      * Skip the rest if we're doing a bulk free in NEM memory mode.
    5827      */
    5828     if (!pReq)
    5829         return VINF_SUCCESS;
    5830     AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
     6028        pVCpu->pgm.s.fA20Enabled = fEnable;
     6029        pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
     6030        if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)))
     6031            NEMR3NotifySetA20(pVCpu, fEnable);
     6032#ifdef PGM_WITH_A20
     6033        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     6034        pgmR3RefreshShadowModeAfterA20Change(pVCpu);
     6035        HMFlushTlb(pVCpu);
    58316036#endif
    5832 
    5833 #ifdef VBOX_WITH_NATIVE_NEM
    5834     /* Notify NEM. */
    5835     /** @todo Remove this one? */
    5836     if (VM_IS_NEM_ENABLED(pVM))
    5837     {
    5838         uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
    5839         NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, pVM->pgm.s.pvZeroPgR3,
    5840                                    pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
    5841         PGM_PAGE_SET_NEM_STATE(pPage, u2State);
    5842     }
    5843 #else
    5844     RT_NOREF(enmNewType);
    5845 #endif
    5846 
    5847     /*
    5848      * Make sure it's not in the handy page array.
    5849      */
    5850     for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
    5851     {
    5852         if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
    5853         {
    5854             pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
    5855             break;
    5856         }
    5857         if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
    5858         {
    5859             pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
    5860             break;
    5861         }
    5862     }
    5863 
    5864     /*
    5865      * Push it onto the page array.
    5866      */
    5867     uint32_t iPage = *pcPendingPages;
    5868     Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
    5869     *pcPendingPages += 1;
    5870 
    5871     pReq->aPages[iPage].idPage = idPage;
    5872 
    5873     if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
    5874         return VINF_SUCCESS;
    5875 
    5876     /*
    5877      * Flush the pages.
    5878      */
    5879     int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
    5880     if (RT_SUCCESS(rc))
    5881     {
    5882         GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
    5883         *pcPendingPages = 0;
    5884     }
    5885     return rc;
    5886 }
    5887 
    5888 
    5889 /**
    5890  * Converts a GC physical address to a HC ring-3 pointer, with some
    5891  * additional checks.
    5892  *
    5893  * @returns VBox status code.
    5894  * @retval  VINF_SUCCESS on success.
    5895  * @retval  VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
    5896  *          access handler of some kind.
    5897  * @retval  VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
    5898  *          accesses or is odd in any way.
    5899  * @retval  VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
    5900  *
    5901  * @param   pVM         The cross context VM structure.
    5902  * @param   GCPhys      The GC physical address to convert.  Since this is only
    5903  *                      used for filling the REM TLB, the A20 mask must be
    5904  *                      applied before calling this API.
    5905  * @param   fWritable   Whether write access is required.
    5906  * @param   ppv         Where to store the pointer corresponding to GCPhys on
    5907  *                      success.
    5908  */
    5909 VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
    5910 {
    5911     PGM_LOCK_VOID(pVM);
    5912     PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
    5913 
    5914     PPGMRAMRANGE pRam;
    5915     PPGMPAGE pPage;
    5916     int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
    5917     if (RT_SUCCESS(rc))
    5918     {
    5919         if (PGM_PAGE_IS_BALLOONED(pPage))
    5920             rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
    5921         else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
    5922             rc = VINF_SUCCESS;
    5923         else
    5924         {
    5925             if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
    5926                 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
    5927             else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
    5928             {
    5929                 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
    5930                  *        in -norawr0 mode. */
    5931                 if (fWritable)
    5932                     rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
    5933             }
    5934             else
    5935             {
    5936                 /* Temporarily disabled physical handler(s), since the recompiler
    5937                    doesn't get notified when it's reset we'll have to pretend it's
    5938                    operating normally. */
    5939                 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
    5940                     rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
    5941                 else
    5942                     rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
    5943             }
    5944         }
    5945         if (RT_SUCCESS(rc))
    5946         {
    5947             int rc2;
    5948 
    5949             /* Make sure what we return is writable. */
    5950             if (fWritable)
    5951                 switch (PGM_PAGE_GET_STATE(pPage))
    5952                 {
    5953                     case PGM_PAGE_STATE_ALLOCATED:
    5954                         break;
    5955                     case PGM_PAGE_STATE_BALLOONED:
    5956                         AssertFailed();
    5957                         break;
    5958                     case PGM_PAGE_STATE_ZERO:
    5959                     case PGM_PAGE_STATE_SHARED:
    5960                         if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
    5961                             break;
    5962                         RT_FALL_THRU();
    5963                     case PGM_PAGE_STATE_WRITE_MONITORED:
    5964                         rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
    5965                         AssertLogRelRCReturn(rc2, rc2);
    5966                         break;
    5967                 }
    5968 
    5969             /* Get a ring-3 mapping of the address. */
    5970             PPGMPAGER3MAPTLBE pTlbe;
    5971             rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
    5972             AssertLogRelRCReturn(rc2, rc2);
    5973             *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    5974             /** @todo mapping/locking hell; this isn't horribly efficient since
    5975              *        pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
    5976 
    5977             Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
    5978         }
    5979         else
    5980             Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
    5981 
    5982         /* else: handler catching all access, no pointer returned. */
    5983     }
    5984     else
    5985         rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
    5986 
    5987     PGM_UNLOCK(pVM);
    5988     return rc;
    5989 }
    5990 
     6037        IEMTlbInvalidateAllPhysical(pVCpu);
     6038        STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
     6039    }
     6040}
     6041
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette