VirtualBox

Changeset 77241 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Feb 10, 2019 10:30:33 PM (6 years ago)
Author:
vboxsync
Message:

VMM: Added dev helps for bulk page mapping locking. VMMDev will be using this for a new variation on the HGCM page list parameter type. bugref:9172

Location:
trunk/src/VBox/VMM/VMMR3
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp

    r76553 r77241  
    961961
    962962    Log(("pdmR3DevHlp_PhysReleasePageMappingLock: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
     963}
     964
     965
     966/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtr} */
     967static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtr(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     968                                                          uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
     969{
     970    PDMDEV_ASSERT_DEVINS(pDevIns);
     971    PVM pVM = pDevIns->Internal.s.pVMR3;
     972    LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,..) fFlags=%#x papvPages=%p paLocks=%p\n",
     973             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
     974    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
     975    AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
     976
     977#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
     978    if (!VM_IS_EMT(pVM))
     979    {
     980        char szNames[128];
     981        uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
     982        AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
     983    }
     984#endif
     985
     986    int rc = PGMR3PhysBulkGCPhys2CCPtrExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
     987
     988    Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     989    return rc;
     990}
     991
     992
     993/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtrReadOnly} */
     994static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     995                                                                  uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks)
     996{
     997    PDMDEV_ASSERT_DEVINS(pDevIns);
     998    PVM pVM = pDevIns->Internal.s.pVMR3;
     999    LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,...) fFlags=%#x papvPages=%p paLocks=%p\n",
     1000             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
     1001    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
     1002    AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
     1003
     1004#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
     1005    if (!VM_IS_EMT(pVM))
     1006    {
     1007        char szNames[128];
     1008        uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
     1009        AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
     1010    }
     1011#endif
     1012
     1013    int rc = PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
     1014
     1015    Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     1016    return rc;
     1017}
     1018
     1019
     1020/** @interface_method_impl{PDMDEVHLPR3,pfnPhysReleasePageMappingLocks} */
     1021static DECLCALLBACK(void) pdmR3DevHlp_PhysBulkReleasePageMappingLocks(PPDMDEVINS pDevIns, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
     1022{
     1023    PDMDEV_ASSERT_DEVINS(pDevIns);
     1024    PVM pVM = pDevIns->Internal.s.pVMR3;
     1025    LogFlow(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: cPages=%#x paLocks=%p\n",
     1026             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paLocks));
     1027    Assert(cPages > 0);
     1028
     1029    PGMPhysBulkReleasePageMappingLocks(pVM, cPages, paLocks);
     1030
     1031    Log(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
    9631032}
    9641033
     
    37263795    pdmR3DevHlp_VMGetSuspendReason,
    37273796    pdmR3DevHlp_VMGetResumeReason,
    3728     0,
    3729     0,
    3730     0,
     3797    pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
     3798    pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
     3799    pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
    37313800    0,
    37323801    0,
     
    39964065    pdmR3DevHlp_VMGetSuspendReason,
    39974066    pdmR3DevHlp_VMGetResumeReason,
    3998     0,
    3999     0,
    4000     0,
     4067    pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
     4068    pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
     4069    pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
    40014070    0,
    40024071    0,
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r76993 r77241  
    471471                    &&  !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
    472472#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
    473                     &&  !pgmPoolIsDirtyPage(pVM, GCPhys)
     473                    &&  !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
    474474#endif
    475475                   )
     
    591591
    592592    pgmUnlock(pVM);
     593    return rc;
     594}
     595
     596
     597/**
     598 * Requests the mapping of multiple guest page into ring-3, external threads.
     599 *
     600 * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
     601 * ASAP to release them.
     602 *
     603 * This API will assume your intention is to write to the pages, and will
     604 * therefore replace shared and zero pages. If you do not intend to modify the
     605 * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
     606 *
     607 * @returns VBox status code.
     608 * @retval  VINF_SUCCESS on success.
     609 * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     610 *          backing or if any of the pages the page has any active access
     611 *          handlers. The caller must fall back on using PGMR3PhysWriteExternal.
     612 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     613 *          an invalid physical address.
     614 *
     615 * @param   pVM             The cross context VM structure.
     616 * @param   cPages          Number of pages to lock.
     617 * @param   paGCPhysPages   The guest physical address of the pages that
     618 *                          should be mapped (@a cPages entries).
     619 * @param   fFlags          Flags reserved for future use, MBZ.
     620 * @param   papvPages       Where to store the ring-3 mapping addresses
     621 *                          corresponding to @a paGCPhysPages.
     622 * @param   paLocks         Where to store the locking information that
     623 *                          pfnPhysBulkReleasePageMappingLock needs (@a cPages
     624 *                          in length).
     625 *
     626 * @remark  Avoid calling this API from within critical sections (other than the
     627 *          PGM one) because of the deadlock risk when we have to delegating the
     628 *          task to an EMT.
     629 * @thread  Any.
     630 */
     631VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     632                                                 void **papvPages, PPGMPAGEMAPLOCK paLocks)
     633{
     634    Assert(cPages > 0);
     635    AssertPtr(papvPages);
     636    AssertPtr(paLocks);
     637
     638    Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
     639
     640    int rc = pgmLock(pVM);
     641    AssertRCReturn(rc, rc);
     642
     643    /*
     644     * Lock the pages one by one.
     645     * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
     646     */
     647    int32_t  cNextYield = 128;
     648    uint32_t iPage;
     649    for (iPage = 0; iPage < cPages; iPage++)
     650    {
     651        if (--cNextYield > 0)
     652        { /* likely */ }
     653        else
     654        {
     655            pgmUnlock(pVM);
     656            ASMNopPause();
     657            pgmLock(pVM);
     658            cNextYield = 128;
     659        }
     660
     661        /*
     662         * Query the Physical TLB entry for the page (may fail).
     663         */
     664        PPGMPAGEMAPTLBE pTlbe;
     665        rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
     666        if (RT_SUCCESS(rc))
     667        { }
     668        else
     669            break;
     670        PPGMPAGE pPage = pTlbe->pPage;
     671
     672        /*
     673         * No MMIO or active access handlers.
     674         */
     675        if (   !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
     676            && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     677        { }
     678        else
     679        {
     680            rc = VERR_PGM_PHYS_PAGE_RESERVED;
     681            break;
     682        }
     683
     684        /*
     685         * The page must be in the allocated state and not be a dirty pool page.
     686         * We can handle converting a write monitored page to an allocated one, but
     687         * anything more complicated must be delegated to an EMT.
     688         */
     689        bool fDelegateToEmt = false;
     690        if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
     691#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     692            fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
     693#else
     694            fDelegateToEmt = false;
     695#endif
     696        else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
     697        {
     698#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     699            if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
     700                pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
     701            else
     702                fDelegateToEmt = true;
     703#endif
     704        }
     705        else
     706            fDelegateToEmt = true;
     707        if (!fDelegateToEmt)
     708        { }
     709        else
     710        {
     711            /* We could do this delegation in bulk, but considered too much work vs gain. */
     712            pgmUnlock(pVM);
     713            rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
     714                                         pVM, paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
     715            pgmLock(pVM);
     716            if (RT_FAILURE(rc))
     717                break;
     718            cNextYield = 128;
     719        }
     720
     721        /*
     722         * Now, just perform the locking and address calculation.
     723         */
     724        PPGMPAGEMAP pMap = pTlbe->pMap;
     725        if (pMap)
     726            pMap->cRefs++;
     727
     728        unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
     729        if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     730        {
     731            if (cLocks == 0)
     732                pVM->pgm.s.cWriteLockedPages++;
     733            PGM_PAGE_INC_WRITE_LOCKS(pPage);
     734        }
     735        else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
     736        {
     737            PGM_PAGE_INC_WRITE_LOCKS(pPage);
     738            AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
     739            if (pMap)
     740                pMap->cRefs++; /* Extra ref to prevent it from going away. */
     741        }
     742
     743        papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
     744        paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
     745        paLocks[iPage].pvMap        = pMap;
     746    }
     747
     748    pgmUnlock(pVM);
     749
     750    /*
     751     * On failure we must unlock any pages we managed to get already.
     752     */
     753    if (RT_FAILURE(rc) && iPage > 0)
     754        PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
     755
     756    return rc;
     757}
     758
     759
     760/**
     761 * Requests the mapping of multiple guest page into ring-3, for reading only,
     762 * external threads.
     763 *
     764 * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
     765 * to release them.
     766 *
     767 * @returns VBox status code.
     768 * @retval  VINF_SUCCESS on success.
     769 * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     770 *          backing or if any of the pages the page has an active ALL access
     771 *          handler. The caller must fall back on using PGMR3PhysWriteExternal.
     772 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     773 *          an invalid physical address.
     774 *
     775 * @param   pVM             The cross context VM structure.
     776 * @param   cPages          Number of pages to lock.
     777 * @param   paGCPhysPages   The guest physical address of the pages that
     778 *                          should be mapped (@a cPages entries).
     779 * @param   fFlags          Flags reserved for future use, MBZ.
     780 * @param   papvPages       Where to store the ring-3 mapping addresses
     781 *                          corresponding to @a paGCPhysPages.
     782 * @param   paLocks         Where to store the lock information that
     783 *                          pfnPhysReleasePageMappingLock needs (@a cPages
     784 *                          in length).
     785 *
     786 * @remark  Avoid calling this API from within critical sections (other than
     787 *          the PGM one) because of the deadlock risk.
     788 * @thread  Any.
     789 */
     790VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     791                                                         void const **papvPages, PPGMPAGEMAPLOCK paLocks)
     792{
     793    Assert(cPages > 0);
     794    AssertPtr(papvPages);
     795    AssertPtr(paLocks);
     796
     797    Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
     798
     799    int rc = pgmLock(pVM);
     800    AssertRCReturn(rc, rc);
     801
     802    /*
     803     * Lock the pages one by one.
     804     * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
     805     */
     806    int32_t  cNextYield = 256;
     807    uint32_t iPage;
     808    for (iPage = 0; iPage < cPages; iPage++)
     809    {
     810        if (--cNextYield > 0)
     811        { /* likely */ }
     812        else
     813        {
     814            pgmUnlock(pVM);
     815            ASMNopPause();
     816            pgmLock(pVM);
     817            cNextYield = 256;
     818        }
     819
     820        /*
     821         * Query the Physical TLB entry for the page (may fail).
     822         */
     823        PPGMPAGEMAPTLBE pTlbe;
     824        rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
     825        if (RT_SUCCESS(rc))
     826        { }
     827        else
     828            break;
     829        PPGMPAGE pPage = pTlbe->pPage;
     830
     831        /*
     832         * No MMIO or active all access handlers, everything else can be accessed.
     833         */
     834        if (   !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
     835            && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
     836        { }
     837        else
     838        {
     839            rc = VERR_PGM_PHYS_PAGE_RESERVED;
     840            break;
     841        }
     842
     843        /*
     844         * Now, just perform the locking and address calculation.
     845         */
     846        PPGMPAGEMAP pMap = pTlbe->pMap;
     847        if (pMap)
     848            pMap->cRefs++;
     849
     850        unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
     851        if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     852        {
     853            if (cLocks == 0)
     854                pVM->pgm.s.cReadLockedPages++;
     855            PGM_PAGE_INC_READ_LOCKS(pPage);
     856        }
     857        else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
     858        {
     859            PGM_PAGE_INC_READ_LOCKS(pPage);
     860            AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
     861            if (pMap)
     862                pMap->cRefs++; /* Extra ref to prevent it from going away. */
     863        }
     864
     865        papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
     866        paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
     867        paLocks[iPage].pvMap        = pMap;
     868    }
     869
     870    pgmUnlock(pVM);
     871
     872    /*
     873     * On failure we must unlock any pages we managed to get already.
     874     */
     875    if (RT_FAILURE(rc) && iPage > 0)
     876        PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
     877
    593878    return rc;
    594879}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette