VirtualBox

Changeset 38953 in vbox for trunk/src


Ignore:
Timestamp:
Oct 6, 2011 8:49:36 AM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
74324
Message:

PGM: Attempt at fixing the VERR_MAP_FAILED during state save problem on 32-bit hosts when assigning lots of memory to the guest. PGM should lock down guest RAM pages before use and release them afterwards like everyone else. Still quite some stuff left to do there, so I've deviced a little hack for tracking unlocked mappings and using this as input when deciding to do async or sync chunk unmapping at save/load time. See xtracker #5912 and public ticket 7929.

Location:
trunk/src/VBox/VMM
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r38707 r38953  
    14521452    {
    14531453        RTHCPTR HCPtrGuestCR3;
    1454         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     1454        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
    14551455        if (RT_SUCCESS(rc))
    14561456        {
     
    14941494    {
    14951495        RTHCPTR HCPtrGuestCR3;
    1496         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     1496        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
    14971497        if (RT_SUCCESS(rc))
    14981498        {
     
    15441544        RTHCPTR     HCPtr       = NIL_RTHCPTR;
    15451545#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1546         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
     1546        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
    15471547        AssertRC(rc);
    15481548#endif
     
    16041604    {
    16051605        RTHCPTR HCPtrGuestCR3;
    1606         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     1606        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
    16071607        if (RT_SUCCESS(rc))
    16081608        {
     
    23372337VMMDECL(bool) PGMIsLockOwner(PVM pVM)
    23382338{
    2339     return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
     2339    return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
    23402340}
    23412341
     
    23652365int pgmLock(PVM pVM)
    23662366{
    2367     int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
     2367    int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
    23682368#if defined(IN_RC) || defined(IN_RING0)
    23692369    if (rc == VERR_SEM_BUSY)
     
    23832383void pgmUnlock(PVM pVM)
    23842384{
    2385     PDMCritSectLeave(&pVM->pgm.s.CritSect);
     2385    uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
     2386    pVM->pgm.s.cDeprecatedPageLocks = 0;
     2387    int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
     2388    if (rc == VINF_SEM_NESTED)
     2389        pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
    23862390}
    23872391
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r38707 r38953  
    44254425    int rc = VINF_SUCCESS;
    44264426# else
    4427     int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
     4427    int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
    44284428# endif
    44294429    pgmUnlock(pVM);
     
    44734473                    int rc2 = VINF_SUCCESS;
    44744474#  else
    4475                     int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)&HCPtr);
     4475                    int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
    44764476#  endif
    44774477                    pgmUnlock(pVM);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r37360 r38953  
    632632    pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
    633633
    634     const void *pvSharedPage = NULL;
    635 
     634    void *pvSharedPage = NULL;
    636635    if (PGM_PAGE_IS_SHARED(pPage))
    637636    {
     
    645644        pVM->pgm.s.cSharedPages--;
    646645
    647         /* Grab the address of the page so we can make a copy later on. */
    648         rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
     646        /* Grab the address of the page so we can make a copy later on. (safe) */
     647        rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvSharedPage);
    649648        AssertRC(rc);
    650649    }
     
    670669    {
    671670        /* Get the virtual address of the new page. */
    672         void *pvNewPage;
    673         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
    674         AssertRC(rc);
    675         if (rc == VINF_SUCCESS)
    676         {
    677             /** @todo todo write ASMMemCopyPage */
    678             memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
     671        PGMPAGEMAPLOCK  PgMpLck;
     672        void           *pvNewPage;
     673        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
     674        if (RT_SUCCESS(rc))
     675        {
     676            memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
     677            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    679678        }
    680679    }
     
    922921 *
    923922 * @remarks Called from within the PGM critical section.  The mapping is only
    924  *          valid while your inside this section.
     923 *          valid while you are inside this section.
    925924 */
    926925int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
     
    11201119 *
    11211120 * @remarks Called from within the PGM critical section.  The mapping is only
    1122  *          valid while your inside this section.
     1121 *          valid while you are inside section.
    11231122 */
    11241123int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
     
    11561155 *
    11571156 * @remarks Called from within the PGM critical section.  The mapping is only
    1158  *          valid while your inside this section.
     1157 *          valid while you are inside section.
    11591158 */
    11601159int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
     
    11841183 *
    11851184 * @remarks Called from within the PGM critical section.  The mapping is only
    1186  *          valid while your inside this section.
     1185 *          valid while you are inside this section.
    11871186 */
    11881187int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
     
    12921291 *
    12931292 * @internal
    1294  */
    1295 int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
     1293 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
     1294 */
     1295int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
    12961296{
    12971297    int rc;
    12981298    AssertReturn(pPage, VERR_INTERNAL_ERROR);
    12991299    PGM_LOCK_ASSERT_OWNER(pVM);
     1300    pVM->pgm.s.cDeprecatedPageLocks++;
    13001301
    13011302    /*
     
    13331334}
    13341335
    1335 
    1336 /**
    1337  * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
    1338  * own the PGM lock and therefore not need to lock the mapped page.
     1336#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     1337
     1338/**
     1339 * Locks a page mapping for writing.
     1340 *
     1341 * @param   pVM                 The VM handle.
     1342 * @param   pPage               The page.
     1343 * @param   pTlbe               The mapping TLB entry for the page.
     1344 * @param   pLock               The lock structure (output).
     1345 */
     1346DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
     1347{
     1348    PPGMPAGEMAP pMap = pTlbe->pMap;
     1349    if (pMap)
     1350        pMap->cRefs++;
     1351
     1352    unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
     1353    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     1354    {
     1355        if (cLocks == 0)
     1356            pVM->pgm.s.cWriteLockedPages++;
     1357        PGM_PAGE_INC_WRITE_LOCKS(pPage);
     1358    }
     1359    else if (cLocks != PGM_PAGE_MAX_LOCKS)
     1360    {
     1361        PGM_PAGE_INC_WRITE_LOCKS(pPage);
     1362        AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
     1363        if (pMap)
     1364            pMap->cRefs++; /* Extra ref to prevent it from going away. */
     1365    }
     1366
     1367    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
     1368    pLock->pvMap = pMap;
     1369}
     1370
     1371/**
     1372 * Locks a page mapping for reading.
     1373 *
     1374 * @param   pVM                 The VM handle.
     1375 * @param   pPage               The page.
     1376 * @param   pTlbe               The mapping TLB entry for the page.
     1377 * @param   pLock               The lock structure (output).
     1378 */
     1379DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
     1380{
     1381    PPGMPAGEMAP pMap = pTlbe->pMap;
     1382    if (pMap)
     1383        pMap->cRefs++;
     1384
     1385    unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
     1386    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     1387    {
     1388        if (cLocks == 0)
     1389            pVM->pgm.s.cReadLockedPages++;
     1390        PGM_PAGE_INC_READ_LOCKS(pPage);
     1391    }
     1392    else if (cLocks != PGM_PAGE_MAX_LOCKS)
     1393    {
     1394        PGM_PAGE_INC_READ_LOCKS(pPage);
     1395        AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
     1396        if (pMap)
     1397            pMap->cRefs++; /* Extra ref to prevent it from going away. */
     1398    }
     1399
     1400    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
     1401    pLock->pvMap = pMap;
     1402}
     1403
     1404#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
     1405
     1406
     1407/**
     1408 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
     1409 * own the PGM lock and have access to the page structure.
    13391410 *
    13401411 * @returns VBox status code.
     
    13471418 * @param   pPage       Pointer to the PGMPAGE structure for the page.
    13481419 * @param   ppv         Where to store the address corresponding to GCPhys.
     1420 * @param   pLock       Where to store the lock information that
     1421 *                      pgmPhysReleaseInternalPageMappingLock needs.
    13491422 *
    13501423 * @internal
    13511424 */
    1352 int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
     1425int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
     1426{
     1427    int rc;
     1428    AssertReturn(pPage, VERR_INTERNAL_ERROR);
     1429    PGM_LOCK_ASSERT_OWNER(pVM);
     1430
     1431    /*
     1432     * Make sure the page is writable.
     1433     */
     1434    if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
     1435    {
     1436        rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     1437        if (RT_FAILURE(rc))
     1438            return rc;
     1439        AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
     1440    }
     1441    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
     1442
     1443    /*
     1444     * Do the job.
     1445     */
     1446#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     1447    void *pv;
     1448    PVMCPU pVCpu = VMMGetCpu(pVM);
     1449    rc = pgmRZDynMapHCPageInlined(pVCpu,
     1450                                  PGM_PAGE_GET_HCPHYS(pPage),
     1451                                  &pv
     1452                                  RTLOG_COMMA_SRC_POS);
     1453    if (RT_FAILURE(rc))
     1454        return rc;
     1455    *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     1456    pLock->pvPage = pv;
     1457    pLock->pVCpu  = pVCpu;
     1458
     1459#else
     1460    PPGMPAGEMAPTLBE pTlbe;
     1461    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
     1462    if (RT_FAILURE(rc))
     1463        return rc;
     1464    pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
     1465    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     1466#endif
     1467    return VINF_SUCCESS;
     1468}
     1469
     1470
     1471/**
     1472 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
     1473 * own the PGM lock and have access to the page structure.
     1474 *
     1475 * @returns VBox status code.
     1476 * @retval  VINF_SUCCESS on success.
     1477 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
     1478 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
     1479 *
     1480 * @param   pVM         The VM handle.
     1481 * @param   GCPhys      The guest physical address of the page that should be mapped.
     1482 * @param   pPage       Pointer to the PGMPAGE structure for the page.
     1483 * @param   ppv         Where to store the address corresponding to GCPhys.
     1484 * @param   pLock       Where to store the lock information that
     1485 *                      pgmPhysReleaseInternalPageMappingLock needs.
     1486 *
     1487 * @internal
     1488 */
     1489int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
    13531490{
    13541491    AssertReturn(pPage, VERR_INTERNAL_ERROR);
     
    13571494
    13581495    /*
    1359      * Get the mapping address.
     1496     * Do the job.
    13601497     */
    13611498#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    13621499    void *pv;
    1363     int rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
     1500    PVMCPU pVCpu = VMMGetCpu(pVM);
     1501    int rc = pgmRZDynMapHCPageInlined(pVCpu,
    13641502                                      PGM_PAGE_GET_HCPHYS(pPage),
    13651503                                      &pv
     
    13681506        return rc;
    13691507    *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     1508    pLock->pvPage = pv;
     1509    pLock->pVCpu  = pVCpu;
     1510
    13701511#else
    13711512    PPGMPAGEMAPTLBE pTlbe;
     
    13731514    if (RT_FAILURE(rc))
    13741515        return rc;
     1516    pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
    13751517    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    13761518#endif
     
    13821524 * Requests the mapping of a guest page into the current context.
    13831525 *
    1384  * This API should only be used for very short term, as it will consume
    1385  * scarse resources (R0 and GC) in the mapping cache. When you're done
    1386  * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
     1526 * This API should only be used for very short term, as it will consume scarse
     1527 * resources (R0 and GC) in the mapping cache. When you're done with the page,
     1528 * call PGMPhysReleasePageMappingLock() ASAP to release it.
    13871529 *
    13881530 * This API will assume your intention is to write to the page, and will
     
    13961538 *
    13971539 * @param   pVM         The VM handle.
    1398  * @param   GCPhys      The guest physical address of the page that should be mapped.
     1540 * @param   GCPhys      The guest physical address of the page that should be
     1541 *                      mapped.
    13991542 * @param   ppv         Where to store the address corresponding to GCPhys.
    1400  * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
     1543 * @param   pLock       Where to store the lock information that
     1544 *                      PGMPhysReleasePageMappingLock needs.
    14011545 *
    14021546 * @remarks The caller is responsible for dealing with access handlers.
    14031547 * @todo    Add an informational return code for pages with access handlers?
    14041548 *
    1405  * @remark  Avoid calling this API from within critical sections (other than the
    1406  *          PGM one) because of the deadlock risk. External threads may need to
    1407  *          delegate jobs to the EMTs.
     1549 * @remark  Avoid calling this API from within critical sections (other than
     1550 *          the PGM one) because of the deadlock risk. External threads may
     1551 *          need to delegate jobs to the EMTs.
     1552 * @remarks Only one page is mapped!  Make no assumption about what's after or
     1553 *          before the returned page!
    14081554 * @thread  Any thread.
    14091555 */
     
    14461592
    14471593#else  /* IN_RING3 || IN_RING0 */
    1448     /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
    1449     /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
    1450     /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
    1451 
    14521594    /*
    14531595     * Query the Physical TLB entry for the page (may fail).
     
    14761618             * Now, just perform the locking and calculate the return address.
    14771619             */
    1478             PPGMPAGEMAP pMap = pTlbe->pMap;
    1479             if (pMap)
    1480                 pMap->cRefs++;
    1481 
    1482             unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
    1483             if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
    1484             {
    1485                 if (cLocks == 0)
    1486                     pVM->pgm.s.cWriteLockedPages++;
    1487                 PGM_PAGE_INC_WRITE_LOCKS(pPage);
    1488             }
    1489             else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
    1490             {
    1491                 PGM_PAGE_INC_WRITE_LOCKS(pPage);
    1492                 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
    1493                 if (pMap)
    1494                     pMap->cRefs++; /* Extra ref to prevent it from going away. */
    1495             }
    1496 
     1620            pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
    14971621            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1498             pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
    1499             pLock->pvMap = pMap;
    15001622        }
    15011623    }
     
    15101632 * Requests the mapping of a guest page into the current context.
    15111633 *
    1512  * This API should only be used for very short term, as it will consume
    1513  * scarse resources (R0 and GC) in the mapping cache. When you're done
    1514  * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
     1634 * This API should only be used for very short term, as it will consume scarse
     1635 * resources (R0 and GC) in the mapping cache.  When you're done with the page,
     1636 * call PGMPhysReleasePageMappingLock() ASAP to release it.
    15151637 *
    15161638 * @returns VBox status code.
     
    15201642 *
    15211643 * @param   pVM         The VM handle.
    1522  * @param   GCPhys      The guest physical address of the page that should be mapped.
     1644 * @param   GCPhys      The guest physical address of the page that should be
     1645 *                      mapped.
    15231646 * @param   ppv         Where to store the address corresponding to GCPhys.
    1524  * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
     1647 * @param   pLock       Where to store the lock information that
     1648 *                      PGMPhysReleasePageMappingLock needs.
    15251649 *
    15261650 * @remarks The caller is responsible for dealing with access handlers.
    15271651 * @todo    Add an informational return code for pages with access handlers?
    15281652 *
    1529  * @remark  Avoid calling this API from within critical sections (other than
     1653 * @remarks Avoid calling this API from within critical sections (other than
    15301654 *          the PGM one) because of the deadlock risk.
     1655 * @remarks Only one page is mapped!  Make no assumption about what's after or
     1656 *          before the returned page!
    15311657 * @thread  Any thread.
    15321658 */
     
    15671693
    15681694#else  /* IN_RING3 || IN_RING0 */
    1569 
    1570     /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
    1571     /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
    1572     /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
    1573 
    15741695    /*
    15751696     * Query the Physical TLB entry for the page (may fail).
     
    15881709             * Now, just perform the locking and calculate the return address.
    15891710             */
    1590             PPGMPAGEMAP pMap = pTlbe->pMap;
    1591             if (pMap)
    1592                 pMap->cRefs++;
    1593 
    1594             unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
    1595             if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
    1596             {
    1597                 if (cLocks == 0)
    1598                     pVM->pgm.s.cReadLockedPages++;
    1599                 PGM_PAGE_INC_READ_LOCKS(pPage);
    1600             }
    1601             else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
    1602             {
    1603                 PGM_PAGE_INC_READ_LOCKS(pPage);
    1604                 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
    1605                 if (pMap)
    1606                     pMap->cRefs++; /* Extra ref to prevent it from going away. */
    1607             }
    1608 
     1711            pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
    16091712            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    1610             pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
    1611             pLock->pvMap = pMap;
    16121713        }
    16131714    }
     
    17641865    pgmUnlock(pVM);
    17651866#endif /* IN_RING3 */
     1867}
     1868
     1869
     1870/**
     1871 * Release the internal mapping of a guest page.
     1872 *
     1873 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
     1874 * pgmPhysGCPhys2CCPtrInternalReadOnly.
     1875 *
     1876 * @param   pVM         The VM handle.
     1877 * @param   pLock       The lock structure initialized by the mapping function.
     1878 *
     1879 * @remarks Caller must hold the PGM lock.
     1880 */
     1881void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
     1882{
     1883    PGM_LOCK_ASSERT_OWNER(pVM);
     1884    PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
    17661885}
    17671886
     
    17801899 * @param   pVM         The VM handle.
    17811900 * @param   GCPhys      The GC physical address to convert.
    1782  * @param   cbRange     Physical range
    17831901 * @param   pR3Ptr      Where to store the R3 pointer on success.
    17841902 *
    17851903 * @deprecated  Avoid when possible!
    17861904 */
    1787 VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
     1905int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
    17881906{
    17891907/** @todo this is kind of hacky and needs some more work. */
     
    17921910#endif
    17931911
    1794     Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
     1912    Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
    17951913#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    17961914    AssertFailedReturn(VERR_NOT_IMPLEMENTED);
     
    18021920    int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
    18031921    if (RT_SUCCESS(rc))
    1804         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
     1922        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
    18051923
    18061924    pgmUnlock(pVM);
     
    18091927#endif
    18101928}
    1811 
    1812 
    1813 #ifdef VBOX_STRICT
    1814 /**
    1815  * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
    1816  *
    1817  * @returns The R3Ptr, NIL_RTR3PTR on failure.
    1818  * @param   pVM         The VM handle.
    1819  * @param   GCPhys      The GC Physical address.
    1820  * @param   cbRange     Physical range.
    1821  *
    1822  * @deprecated  Avoid when possible.
    1823  */
    1824 VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
    1825 {
    1826     RTR3PTR R3Ptr;
    1827     int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
    1828     if (RT_SUCCESS(rc))
    1829         return R3Ptr;
    1830     return NIL_RTR3PTR;
    1831 }
    1832 #endif /* VBOX_STRICT */
    18331929
    18341930
     
    19312027     * Whatever we do we need the source page, map it first.
    19322028     */
    1933     const void *pvSrc = NULL;
    1934     int         rc    = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
     2029    PGMPAGEMAPLOCK PgMpLck;
     2030    const void    *pvSrc = NULL;
     2031    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
    19352032    if (RT_FAILURE(rc))
    19362033    {
     
    19772074        /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
    19782075        //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
     2076        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    19792077        return VERR_PGM_PHYS_WR_HIT_HANDLER;
    19802078#endif
     
    20182116        /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
    20192117        //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
     2118        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    20202119        return VERR_PGM_PHYS_WR_HIT_HANDLER;
    20212120#endif
     
    20272126    if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
    20282127        memcpy(pvBuf, pvSrc, cb);
     2128    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    20292129    return rc;
    20302130}
     
    20942194                     * Get the pointer to the page.
    20952195                     */
    2096                     const void *pvSrc;
    2097                     int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
     2196                    PGMPAGEMAPLOCK PgMpLck;
     2197                    const void    *pvSrc;
     2198                    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
    20982199                    if (RT_SUCCESS(rc))
     2200                    {
    20992201                        memcpy(pvBuf, pvSrc, cb);
     2202                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     2203                    }
    21002204                    else
    21012205                    {
     
    21642268static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
    21652269{
    2166     void *pvDst = NULL;
    2167     int rc;
     2270    PGMPAGEMAPLOCK  PgMpLck;
     2271    void           *pvDst = NULL;
     2272    int             rc;
    21682273
    21692274    /*
     
    21962301            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
    21972302            if (!PGM_PAGE_IS_MMIO(pPage))
    2198                 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
     2303                rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
    21992304            else
    22002305                rc = VINF_SUCCESS;
     
    22172322                pCur = NULL; /* might not be valid anymore. */
    22182323# endif
    2219                 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
    2220                     memcpy(pvDst, pvBuf, cbRange);
     2324                if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
     2325                {
     2326                    if (pvDst)
     2327                        memcpy(pvDst, pvBuf, cbRange);
     2328                }
    22212329                else
    22222330                    AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
     
    22262334                                             GCPhys, pPage, rc), rc);
    22272335            if (RT_LIKELY(cbRange == cbWrite))
     2336            {
     2337                if (pvBuf)
     2338                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    22282339                return VINF_SUCCESS;
     2340            }
    22292341
    22302342            /* more fun to be had below */
     
    22622374
    22632375            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
    2264             rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
     2376            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
    22652377            if (RT_SUCCESS(rc))
    22662378            {
     
    22852397                                             GCPhys, pPage, rc), rc);
    22862398            if (RT_LIKELY(cbRange == cbWrite))
     2399            {
     2400                if (pvBuf)
     2401                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    22872402                return VINF_SUCCESS;
     2403            }
    22882404
    22892405            /* more fun to be had below */
     
    23042420    if (!pvDst)
    23052421    {
    2306         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
     2422        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
    23072423        AssertLogRelMsgReturn(RT_SUCCESS(rc),
    23082424                              ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
     
    24342550            NOREF(cbRange);
    24352551            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
     2552            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    24362553            return VERR_PGM_PHYS_WR_HIT_HANDLER;
    24372554#endif
     
    24632580            NOREF(cbRange);
    24642581            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
     2582            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    24652583            return VERR_PGM_PHYS_WR_HIT_HANDLER;
    24662584#endif
     
    25192637            NOREF(cbRange);
    25202638            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
     2639            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    25212640            return VERR_PGM_PHYS_WR_HIT_HANDLER;
    25222641#endif
     
    25292648         */
    25302649        if (cbRange >= cbWrite)
     2650        {
     2651            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    25312652            return VINF_SUCCESS;
     2653        }
    25322654
    25332655        cbWrite         -= cbRange;
     
    26082730                     * Get the pointer to the page.
    26092731                     */
    2610                     void *pvDst;
    2611                     int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
     2732                    PGMPAGEMAPLOCK PgMpLck;
     2733                    void          *pvDst;
     2734                    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
    26122735                    if (RT_SUCCESS(rc))
    26132736                    {
    26142737                        Assert(!PGM_PAGE_IS_BALLOONED(pPage));
    26152738                        memcpy(pvDst, pvBuf, cb);
     2739                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    26162740                    }
    2617                     else
    26182741                    /* Ignore writes to ballooned pages. */
    2619                     if (!PGM_PAGE_IS_BALLOONED(pPage))
     2742                    else if (!PGM_PAGE_IS_BALLOONED(pPage))
    26202743                        AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
    26212744                                                pRam->GCPhys + off, pPage, rc));
     
    32793402        {
    32803403            /** @todo we should check reserved bits ... */
    3281             void *pvSrc;
    3282             rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
     3404            PGMPAGEMAPLOCK PgMpLck;
     3405            void const    *pvSrc;
     3406            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
    32833407            switch (rc)
    32843408            {
     
    32863410                    Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
    32873411                    memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
     3412                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
    32883413                    break;
    32893414                case VERR_PGM_PHYS_PAGE_RESERVED:
    32903415                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
    3291                     memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
     3416                    memset(pvDst, 0xff, cb);
    32923417                    break;
    32933418                default:
     3419                    Assert(RT_FAILURE_NP(rc));
    32943420                    return rc;
    32953421            }
     
    33213447            /** @todo we should check reserved bits ... */
    33223448            AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
    3323             void *pvSrc1;
    3324             rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
     3449            PGMPAGEMAPLOCK PgMpLck;
     3450            void const *pvSrc1;
     3451            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
    33253452            switch (rc)
    33263453            {
    33273454                case VINF_SUCCESS:
    33283455                    memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
     3456                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
    33293457                    break;
    33303458                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
    3331                     memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
     3459                    memset(pvDst, 0xff, cb1);
    33323460                    break;
    33333461                default:
     3462                    Assert(RT_FAILURE_NP(rc));
    33343463                    return rc;
    33353464            }
    33363465
    3337             void *pvSrc2;
    3338             rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
     3466            void const *pvSrc2;
     3467            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
    33393468            switch (rc)
    33403469            {
    33413470                case VINF_SUCCESS:
    33423471                    memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
     3472                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
    33433473                    break;
    33443474                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
    3345                     memset((uint8_t *)pvDst + cb1, 0, cb2);  /** @todo this is wrong, it should be 0xff */
     3475                    memset((uint8_t *)pvDst + cb1, 0xff, cb2);
    33463476                    break;
    33473477                default:
     3478                    Assert(RT_FAILURE_NP(rc));
    33483479                    return rc;
    33493480            }
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r37950 r38953  
    6868VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
    6969{
    70     Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
     70    PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
    7171
    7272    /*
     
    183183VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
    184184{
    185     Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
    186 
     185    PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
    187186    Assert(!pVM->pgm.s.cLargeHandyPages);
    188     int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
     187
     188    int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
     189                                    &pVM->pgm.s.aLargeHandyPage[0].idPage,
     190                                    &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
    189191    if (RT_SUCCESS(rc))
    190192        pVM->pgm.s.cLargeHandyPages = 1;
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r38712 r38953  
    11861186    AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
    11871187    AssertCompile(sizeof(pVM->aCpus[0].pgm.s) <= sizeof(pVM->aCpus[0].pgm.padding));
    1188     AssertCompileMemberAlignment(PGM, CritSect, sizeof(uintptr_t));
     1188    AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
    11891189
    11901190    /*
     
    13421342     * Initialize the PGM critical section and flush the phys TLBs
    13431343     */
    1344     rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, RT_SRC_POS, "PGM");
     1344    rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
    13451345    AssertRCReturn(rc, rc);
    13461346
     
    14541454
    14551455    /* Almost no cleanup necessary, MM frees all memory. */
    1456     PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
     1456    PDMR3CritSectDelete(&pVM->pgm.s.CritSectX);
    14571457
    14581458    return rc;
     
    25782578
    25792579    PGMDeregisterStringFormatTypes();
    2580     return PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
     2580    return PDMR3CritSectDelete(&pVM->pgm.s.CritSectX);
    25812581}
    25822582
     
    26892689     * Get page directory addresses.
    26902690     */
     2691    pgmLock(pVM);
    26912692    PX86PD     pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
    26922693    Assert(pPDSrc);
    2693     Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
    26942694
    26952695    /*
     
    27152715        }
    27162716    }
     2717    pgmUnlock(pVM);
    27172718}
    27182719
     
    27262727VMMR3DECL(int) PGMR3LockCall(PVM pVM)
    27272728{
    2728     int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSect, true /* fHostCall */);
     2729    int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSectX, true /* fHostCall */);
    27292730    AssertRC(rc);
    27302731    return rc;
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r38838 r38953  
    150150                 * Simple stuff, go ahead.
    151151                 */
    152                 size_t   cb    = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
     152                size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
    153153                if (cb > cbRead)
    154154                    cb = cbRead;
    155                 const void *pvSrc;
    156                 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
     155                PGMPAGEMAPLOCK PgMpLck;
     156                const void    *pvSrc;
     157                int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
    157158                if (RT_SUCCESS(rc))
     159                {
    158160                    memcpy(pvBuf, pvSrc, cb);
     161                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     162                }
    159163                else
    160164                {
     
    290294                 * Simple stuff, go ahead.
    291295                 */
    292                 size_t      cb    = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
     296                size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
    293297                if (cb > cbWrite)
    294298                    cb = cbWrite;
    295                 void *pvDst;
    296                 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
     299                PGMPAGEMAPLOCK PgMpLck;
     300                void          *pvDst;
     301                int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
    297302                if (RT_SUCCESS(rc))
     303                {
    298304                    memcpy(pvDst, pvBuf, cb);
     305                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     306                }
    299307                else
    300308                    AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
     
    39964004        pVM->pgm.s.cMappedChunks++;
    39974005
    3998         /* If we're running out of virtual address space, then we should unmap another chunk. */
     4006        /*
     4007         * If we're running out of virtual address space, then we should
     4008         * unmap another chunk.
     4009         *
     4010         * Currently, an unmap operation requires that all other virtual CPUs
     4011         * are idling and not by chance making use of the memory we're
     4012         * unmapping.  So, we create an async unmap operation here.
     4013         *
     4014         * Now, when creating or restoring a saved state this wont work very
     4015         * well since we may want to restore all guest RAM + a little something.
     4016         * So, we have to do the unmap synchronously.  Fortunately for us
     4017         * though, during these operations the other virtual CPUs are inactive
     4018         * and it should be safe to do this.
     4019         */
     4020        /** @todo Eventually we should lock all memory when used and do
     4021         *        map+unmap as one kernel call without any rendezvous or
     4022         *        other precautions. */
    39994023        if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
    40004024        {
    4001             /* Postpone the unmap operation (which requires a rendezvous operation) as we own the PGM lock here. */
    4002             rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
    4003             AssertRC(rc);
     4025            switch (VMR3GetState(pVM))
     4026            {
     4027                case VMSTATE_LOADING:
     4028                case VMSTATE_SAVING:
     4029                {
     4030                    PVMCPU pVCpu = VMMGetCpu(pVM);
     4031                    if (   pVCpu
     4032                        && pVM->pgm.s.cDeprecatedPageLocks == 0)
     4033                    {
     4034                        pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
     4035                        break;
     4036                    }
     4037                    /* fall thru */
     4038                }
     4039                default:
     4040                    rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
     4041                    AssertRC(rc);
     4042                    break;
     4043            }
    40044044        }
    40054045    }
  • trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

    r38707 r38953  
    12431243static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
    12441244{
    1245     RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
    1246     void const *pvPage;
    1247     int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
     1245    RTGCPHYS        GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
     1246    PGMPAGEMAPLOCK  PgMpLck;
     1247    void const     *pvPage;
     1248    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
    12481249    if (RT_SUCCESS(rc))
     1250    {
    12491251        paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
     1252        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     1253    }
    12501254    else
    12511255        paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
     
    12901294    if (paLSPages[iPage].u32Crc != UINT32_MAX)
    12911295    {
    1292         RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
    1293         void const *pvPage;
    1294         int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
     1296        RTGCPHYS        GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
     1297        PGMPAGEMAPLOCK  PgMpLck;
     1298        void const     *pvPage;
     1299        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
    12951300        if (RT_SUCCESS(rc))
     1301        {
    12961302            pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
     1303            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     1304        }
    12971305    }
    12981306}
     
    13331341                        && (iPage & 0x7ff) == 0x100
    13341342#endif
    1335                         && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
     1343                        && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
    13361344                        && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
    13371345                    {
     
    15581566                    if (   uPass != SSM_PASS_FINAL
    15591567                        && (iPage & 0x7ff) == 0x100
    1560                         && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
     1568                        && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
    15611569                        && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
    15621570                    {
     
    16221630                         * SSM call may block).
    16231631                         */
    1624                         uint8_t     abPage[PAGE_SIZE];
    1625                         void const *pvPage;
    1626                         rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage);
     1632                        uint8_t         abPage[PAGE_SIZE];
     1633                        PGMPAGEMAPLOCK  PgMpLck;
     1634                        void const     *pvPage;
     1635                        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
    16271636                        if (RT_SUCCESS(rc))
    16281637                        {
     
    16321641                                pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
    16331642#endif
     1643                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    16341644                        }
    16351645                        pgmUnlock(pVM);
     
    22312241     * Load the page.
    22322242     */
    2233     void *pvPage;
    2234     int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
     2243    PGMPAGEMAPLOCK PgMpLck;
     2244    void          *pvPage;
     2245    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
    22352246    if (RT_SUCCESS(rc))
     2247    {
    22362248        rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
     2249        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     2250    }
    22372251
    22382252    return rc;
     
    26772691                            || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW)
    26782692                        {
    2679                             void *pvDstPage;
    2680                             rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
     2693                            PGMPAGEMAPLOCK PgMpLck;
     2694                            void          *pvDstPage;
     2695                            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
    26812696                            AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
     2697
    26822698                            ASMMemZeroPage(pvDstPage);
     2699                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    26832700                        }
    26842701                        /* Free it only if it's not part of a previously
     
    27192736                    case PGM_STATE_REC_RAM_RAW:
    27202737                    {
    2721                         void *pvDstPage;
    2722                         rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
     2738                        PGMPAGEMAPLOCK PgMpLck;
     2739                        void          *pvDstPage;
     2740                        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
    27232741                        AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
    27242742                        rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
     2743                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    27252744                        if (RT_FAILURE(rc))
    27262745                            return rc;
  • trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp

    r36891 r38953  
    342342                    case PGM_PAGE_STATE_WRITE_MONITORED:
    343343                    {
    344                         const void *pvPage;
    345344                        /* Check if the page was allocated, but completely zero. */
    346                         int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
    347                         if (    rc == VINF_SUCCESS
     345                        PGMPAGEMAPLOCK PgMpLck;
     346                        const void    *pvPage;
     347                        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
     348                        if (    RT_SUCCESS(rc)
    348349                            &&  ASMMemIsZeroPage(pvPage))
    349                         {
    350350                            cAllocZero++;
    351                         }
    352                         else
    353                         if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
     351                        else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
    354352                            cDuplicate++;
    355353                        else
    356354                            cUnique++;
    357 
     355                        if (RT_SUCCESS(rc))
     356                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
    358357                        break;
    359358                    }
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r38708 r38953  
    257257#else
    258258# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
    259      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
     259     pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    260260#endif
    261261
     
    305305#else
    306306# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    307      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
     307     pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
    308308#endif
    309309
     
    31463146    /** The address of the ring-0 mapping cache if we're making use of it.  */
    31473147    RTR0PTR                         pvR0DynMapUsed;
    3148 #if HC_ARCH_BITS == 32
    3149     /** Alignment padding that makes the next member start on a 8 byte boundary. */
     3148
     3149    /** Hack: Number of deprecated page mapping locks taken by the current lock
     3150     *  owner via pgmPhysGCPhys2CCPtrInternalDepr. */
     3151    uint32_t                        cDeprecatedPageLocks;
     3152#if HC_ARCH_BITS == 64
     3153    /** Alignment padding.  */
    31503154    uint32_t                        u32Alignment2;
    31513155#endif
     3156
    31523157
    31533158    /** PGM critical section.
     
    31553160     * and the page flag updating (some of it anyway).
    31563161     */
    3157     PDMCRITSECT                     CritSect;
     3162    PDMCRITSECT                     CritSectX;
    31583163
    31593164    /**
     
    33413346AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
    33423347AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
    3343 AssertCompileMemberAlignment(PGM, CritSect, 8);
     3348AssertCompileMemberAlignment(PGM, CritSectX, 8);
    33443349AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
    33453350AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
     
    38543859 * @param   a_pVM           The VM handle.
    38553860 */
    3856 #define PGM_LOCK_ASSERT_OWNER(a_pVM)    Assert(PDMCritSectIsOwner(&(a_pVM)->pgm.s.CritSect))
     3861#define PGM_LOCK_ASSERT_OWNER(a_pVM)    Assert(PDMCritSectIsOwner(&(a_pVM)->pgm.s.CritSectX))
     3862/**
     3863 * Asserts that the caller owns the PDM lock.
     3864 * This is the internal variant of PGMIsLockOwner.
     3865 * @param   a_pVM           The VM handle.
     3866 * @param   a_pVCpu         The current CPU handle.
     3867 */
     3868#define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu)  Assert(PDMCritSectIsOwnerEx(&(a_pVM)->pgm.s.CritSectX, pVCpu))
    38573869
    38583870int             pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
     
    38873899int             pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
    38883900int             pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
    3889 int             pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
    3890 int             pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
     3901int             pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr);
     3902int             pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
     3903int             pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
     3904int             pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock);
     3905void            pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock);
    38913906VMMDECL(int)    pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    38923907VMMDECL(int)    pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
  • trunk/src/VBox/VMM/include/internal/pgm.h

    r35346 r38953  
    6565VMMDECL(int)        PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock);
    6666VMMDECL(int)        PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock);
    67 VMMDECL(int)        PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr);
    68 #ifdef VBOX_STRICT
    69 VMMDECL(RTR3PTR)    PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
    70 #endif
    7167VMMR3DECL(void)     PGMR3ResetNoMorePhysWritesFlag(PVM pVM);
    7268
  • trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp

    r38837 r38953  
    599599    GEN_CHECK_OFF(PGMCPU, fA20Enabled);
    600600    GEN_CHECK_OFF(PGMCPU, fSyncFlags);
    601     GEN_CHECK_OFF(PGM, CritSect);
     601    GEN_CHECK_OFF(PGM, CritSectX);
    602602    GEN_CHECK_OFF(PGM, pPoolR3);
    603603    GEN_CHECK_OFF(PGM, pPoolR0);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r38489 r38953  
    203203    CHECK_PADDING_VM(64, vmm);
    204204    PRINT_OFFSET(VM, pgm);
    205     PRINT_OFFSET(VM, pgm.s.CritSect);
     205    PRINT_OFFSET(VM, pgm.s.CritSectX);
    206206    CHECK_PADDING_VM(64, pgm);
    207207    PRINT_OFFSET(VM, hwaccm);
     
    383383    CHECK_MEMBER_ALIGNMENT(IOM, CritSect, sizeof(uintptr_t));
    384384    CHECK_MEMBER_ALIGNMENT(EM, CritSectREM, sizeof(uintptr_t));
    385     CHECK_MEMBER_ALIGNMENT(PGM, CritSect, sizeof(uintptr_t));
     385    CHECK_MEMBER_ALIGNMENT(PGM, CritSectX, sizeof(uintptr_t));
    386386    CHECK_MEMBER_ALIGNMENT(PDM, CritSect, sizeof(uintptr_t));
    387387    CHECK_MEMBER_ALIGNMENT(MMHYPERHEAP, Lock, sizeof(uintptr_t));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette