VirtualBox

Changeset 77241 in vbox


Ignore:
Timestamp:
Feb 10, 2019 10:30:33 PM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
128748
Message:

VMM: Added dev helps for bulk page mapping locking. VMMDev will be using this for a new variation on the HGCM page list parameter type. bugref:9172

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/pdmdev.h

    r76585 r77241  
    18901890
    18911891/** Current PDMDEVHLPR3 version number. */
    1892 #define PDM_DEVHLPR3_VERSION                    PDM_VERSION_MAKE_PP(0xffe7, 22, 0)
     1892#define PDM_DEVHLPR3_VERSION                    PDM_VERSION_MAKE_PP(0xffe7, 22, 1)
    18931893
    18941894/**
     
    32953295    DECLR3CALLBACKMEMBER(VMRESUMEREASON, pfnVMGetResumeReason,(PPDMDEVINS pDevIns));
    32963296
     3297    /**
     3298     * Requests the mapping of multiple guest page into ring-3.
     3299     *
     3300     * When you're done with the pages, call pfnPhysBulkReleasePageMappingLocks()
     3301     * ASAP to release them.
     3302     *
     3303     * This API will assume your intention is to write to the pages, and will
     3304     * therefore replace shared and zero pages. If you do not intend to modify the
     3305     * pages, use the pfnPhysBulkGCPhys2CCPtrReadOnly() API.
     3306     *
     3307     * @returns VBox status code.
     3308     * @retval  VINF_SUCCESS on success.
     3309     * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     3310     *          backing or if any of the pages the page has any active access
     3311     *          handlers. The caller must fall back on using PGMR3PhysWriteExternal.
     3312     * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     3313     *          an invalid physical address.
     3314     *
     3315     * @param   pDevIns             The device instance.
     3316     * @param   cPages              Number of pages to lock.
     3317     * @param   paGCPhysPages       The guest physical address of the pages that
     3318     *                              should be mapped (@a cPages entries).
     3319     * @param   fFlags              Flags reserved for future use, MBZ.
     3320     * @param   papvPages           Where to store the ring-3 mapping addresses
     3321     *                              corresponding to @a paGCPhysPages.
     3322     * @param   paLocks             Where to store the locking information that
     3323     *                              pfnPhysBulkReleasePageMappingLock needs (@a cPages
     3324     *                              in length).
     3325     *
     3326     * @remark  Avoid calling this API from within critical sections (other than the
     3327     *          PGM one) because of the deadlock risk when we have to delegating the
     3328     *          task to an EMT.
     3329     * @thread  Any.
     3330     * @since   6.0.6
     3331     */
     3332    DECLR3CALLBACKMEMBER(int, pfnPhysBulkGCPhys2CCPtr,(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     3333                                                       uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks));
     3334
     3335    /**
     3336     * Requests the mapping of multiple guest page into ring-3, for reading only.
     3337     *
     3338     * When you're done with the pages, call pfnPhysBulkReleasePageMappingLocks()
     3339     * ASAP to release them.
     3340     *
     3341     * @returns VBox status code.
     3342     * @retval  VINF_SUCCESS on success.
     3343     * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     3344     *          backing or if any of the pages the page has an active ALL access
     3345     *          handler. The caller must fall back on using PGMR3PhysWriteExternal.
     3346     * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     3347     *          an invalid physical address.
     3348     *
     3349     * @param   pDevIns             The device instance.
     3350     * @param   cPages              Number of pages to lock.
     3351     * @param   paGCPhysPages       The guest physical address of the pages that
     3352     *                              should be mapped (@a cPages entries).
     3353     * @param   fFlags              Flags reserved for future use, MBZ.
     3354     * @param   papvPages           Where to store the ring-3 mapping addresses
     3355     *                              corresponding to @a paGCPhysPages.
     3356     * @param   paLocks             Where to store the lock information that
     3357     *                              pfnPhysReleasePageMappingLock needs (@a cPages
     3358     *                              in length).
     3359     *
     3360     * @remark  Avoid calling this API from within critical sections.
     3361     * @thread  Any.
     3362     * @since   6.0.6
     3363     */
     3364    DECLR3CALLBACKMEMBER(int, pfnPhysBulkGCPhys2CCPtrReadOnly,(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     3365                                                               uint32_t fFlags, void const **papvPages, PPGMPAGEMAPLOCK paLocks));
     3366
     3367    /**
     3368     * Release the mappings of multiple guest pages.
     3369     *
     3370     * This is the counter part of pfnPhysBulkGCPhys2CCPtr and
     3371     * pfnPhysBulkGCPhys2CCPtrReadOnly.
     3372     *
     3373     * @param   pDevIns             The device instance.
     3374     * @param   cPages              Number of pages to unlock.
     3375     * @param   paLocks             The lock structures initialized by the mapping
     3376     *                              function (@a cPages in length).
     3377     * @thread  Any.
     3378     * @since   6.0.6
     3379     */
     3380    DECLR3CALLBACKMEMBER(void, pfnPhysBulkReleasePageMappingLocks,(PPDMDEVINS pDevIns, uint32_t cPages, PPGMPAGEMAPLOCK paLocks));
     3381
    32973382    /** Space reserved for future members.
    32983383     * @{ */
     
    33043389    DECLR3CALLBACKMEMBER(void, pfnReserved6,(void));
    33053390    DECLR3CALLBACKMEMBER(void, pfnReserved7,(void));
    3306     DECLR3CALLBACKMEMBER(void, pfnReserved8,(void));
    3307     DECLR3CALLBACKMEMBER(void, pfnReserved9,(void));
    3308     DECLR3CALLBACKMEMBER(void, pfnReserved10,(void));
     3391    //DECLR3CALLBACKMEMBER(void, pfnReserved8,(void));
     3392    //DECLR3CALLBACKMEMBER(void, pfnReserved9,(void));
     3393    //DECLR3CALLBACKMEMBER(void, pfnReserved10,(void));
    33093394    /** @} */
    33103395
     
    45994684{
    46004685    pDevIns->CTX_SUFF(pHlp)->pfnPhysReleasePageMappingLock(pDevIns, pLock);
     4686}
     4687
     4688/**
     4689 * @copydoc PDMDEVHLPR3::pfnPhysBulkGCPhys2CCPtr
     4690 */
     4691DECLINLINE(int) PDMDevHlpPhysBulkGCPhys2CCPtr(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     4692                                              uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
     4693{
     4694    return pDevIns->CTX_SUFF(pHlp)->pfnPhysBulkGCPhys2CCPtr(pDevIns, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
     4695}
     4696
     4697/**
     4698 * @copydoc PDMDEVHLPR3::pfnPhysBulkGCPhys2CCPtrReadOnly
     4699 */
     4700DECLINLINE(int) PDMDevHlpPhysBulkGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     4701                                                      uint32_t fFlags, void const **papvPages, PPGMPAGEMAPLOCK paLocks)
     4702{
     4703    return pDevIns->CTX_SUFF(pHlp)->pfnPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
     4704}
     4705
     4706/**
     4707 * @copydoc PDMDEVHLPR3::pfnPhysReleasePageMappingLocks
     4708 */
     4709DECLINLINE(void) PDMDevHlpPhysBulkReleasePageMappingLocks(PPDMDEVINS pDevIns, uint32_t cLocks, PPGMPAGEMAPLOCK paLocks)
     4710{
     4711    pDevIns->CTX_SUFF(pHlp)->pfnPhysBulkReleasePageMappingLocks(pDevIns, cLocks, paLocks);
    46014712}
    46024713
  • trunk/include/VBox/vmm/pgm.h

    r76585 r77241  
    558558VMMDECL(int)        PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
    559559VMMDECL(void)       PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock);
     560VMMDECL(void)       PGMPhysBulkReleasePageMappingLocks(PVM pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLock);
    560561
    561562/** @def PGM_PHYS_RW_IS_SUCCESS
     
    918919VMMR3DECL(int)      PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
    919920VMMR3DECL(int)      PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock);
     921VMMR3DECL(int)      PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     922                                                      void **papvPages, PPGMPAGEMAPLOCK paLocks);
     923VMMR3DECL(int)      PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     924                                                              void const **papvPages, PPGMPAGEMAPLOCK paLocks);
    920925VMMR3DECL(int)      PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk);
    921926VMMR3DECL(void)     PGMR3PhysChunkInvalidateTLB(PVM pVM);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r76553 r77241  
    21262126#endif /* IN_RING3 */
    21272127}
     2128
     2129
     2130#ifdef IN_RING3
     2131/**
     2132 * Release the mapping of multiple guest pages.
     2133 *
     2134 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
     2135 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
     2136 *
     2137 * @param   pVM         The cross context VM structure.
     2138 * @param   cPages      Number of pages to unlock.
     2139 * @param   paLocks     Array of locks lock structure initialized by the mapping
     2140 *                      function.
     2141 */
     2142VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVM pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
     2143{
     2144    Assert(cPages > 0);
     2145    bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
     2146#ifdef VBOX_STRICT
     2147    for (uint32_t i = 1; i < cPages; i++)
     2148    {
     2149        Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
     2150        AssertPtr(paLocks[i].uPageAndType);
     2151    }
     2152#endif
     2153
     2154    pgmLock(pVM);
     2155    if (fWriteLock)
     2156    {
     2157        /*
     2158         * Write locks:
     2159         */
     2160        for (uint32_t i = 0; i < cPages; i++)
     2161        {
     2162            PPGMPAGE pPage  = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
     2163            unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
     2164            Assert(cLocks > 0);
     2165            if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
     2166            {
     2167                if (cLocks == 1)
     2168                {
     2169                    Assert(pVM->pgm.s.cWriteLockedPages > 0);
     2170                    pVM->pgm.s.cWriteLockedPages--;
     2171                }
     2172                PGM_PAGE_DEC_WRITE_LOCKS(pPage);
     2173            }
     2174
     2175            if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
     2176            { /* probably extremely likely */ }
     2177            else
     2178                pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
     2179
     2180            PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
     2181            if (pMap)
     2182            {
     2183                Assert(pMap->cRefs >= 1);
     2184                pMap->cRefs--;
     2185            }
     2186
     2187            /* Yield the lock: */
     2188            if ((i & 1023) == 1023)
     2189            {
     2190                pgmLock(pVM);
     2191                pgmUnlock(pVM);
     2192            }
     2193        }
     2194    }
     2195    else
     2196    {
     2197        /*
     2198         * Read locks:
     2199         */
     2200        for (uint32_t i = 0; i < cPages; i++)
     2201        {
     2202            PPGMPAGE pPage  = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
     2203            unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
     2204            Assert(cLocks > 0);
     2205            if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
     2206            {
     2207                if (cLocks == 1)
     2208                {
     2209                    Assert(pVM->pgm.s.cReadLockedPages > 0);
     2210                    pVM->pgm.s.cReadLockedPages--;
     2211                }
     2212                PGM_PAGE_DEC_READ_LOCKS(pPage);
     2213            }
     2214
     2215            PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
     2216            if (pMap)
     2217            {
     2218                Assert(pMap->cRefs >= 1);
     2219                pMap->cRefs--;
     2220            }
     2221
     2222            /* Yield the lock: */
     2223            if ((i & 1023) == 1023)
     2224            {
     2225                pgmLock(pVM);
     2226                pgmUnlock(pVM);
     2227            }
     2228        }
     2229    }
     2230    pgmUnlock(pVM);
     2231
     2232    RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
     2233}
     2234#endif /* IN_RING3 */
    21282235
    21292236
  • trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp

    r76553 r77241  
    961961
    962962    Log(("pdmR3DevHlp_PhysReleasePageMappingLock: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
     963}
     964
     965
     966/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtr} */
     967static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtr(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     968                                                          uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
     969{
     970    PDMDEV_ASSERT_DEVINS(pDevIns);
     971    PVM pVM = pDevIns->Internal.s.pVMR3;
     972    LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,..) fFlags=%#x papvPages=%p paLocks=%p\n",
     973             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
     974    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
     975    AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
     976
     977#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
     978    if (!VM_IS_EMT(pVM))
     979    {
     980        char szNames[128];
     981        uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
     982        AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
     983    }
     984#endif
     985
     986    int rc = PGMR3PhysBulkGCPhys2CCPtrExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
     987
     988    Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     989    return rc;
     990}
     991
     992
     993/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtrReadOnly} */
     994static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     995                                                                  uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks)
     996{
     997    PDMDEV_ASSERT_DEVINS(pDevIns);
     998    PVM pVM = pDevIns->Internal.s.pVMR3;
     999    LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,...) fFlags=%#x papvPages=%p paLocks=%p\n",
     1000             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
     1001    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
     1002    AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
     1003
     1004#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
     1005    if (!VM_IS_EMT(pVM))
     1006    {
     1007        char szNames[128];
     1008        uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
     1009        AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
     1010    }
     1011#endif
     1012
     1013    int rc = PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
     1014
     1015    Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     1016    return rc;
     1017}
     1018
     1019
     1020/** @interface_method_impl{PDMDEVHLPR3,pfnPhysReleasePageMappingLocks} */
     1021static DECLCALLBACK(void) pdmR3DevHlp_PhysBulkReleasePageMappingLocks(PPDMDEVINS pDevIns, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
     1022{
     1023    PDMDEV_ASSERT_DEVINS(pDevIns);
     1024    PVM pVM = pDevIns->Internal.s.pVMR3;
     1025    LogFlow(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: cPages=%#x paLocks=%p\n",
     1026             pDevIns->pReg->szName, pDevIns->iInstance, cPages, paLocks));
     1027    Assert(cPages > 0);
     1028
     1029    PGMPhysBulkReleasePageMappingLocks(pVM, cPages, paLocks);
     1030
     1031    Log(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
    9631032}
    9641033
     
    37263795    pdmR3DevHlp_VMGetSuspendReason,
    37273796    pdmR3DevHlp_VMGetResumeReason,
    3728     0,
    3729     0,
    3730     0,
     3797    pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
     3798    pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
     3799    pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
    37313800    0,
    37323801    0,
     
    39964065    pdmR3DevHlp_VMGetSuspendReason,
    39974066    pdmR3DevHlp_VMGetResumeReason,
    3998     0,
    3999     0,
    4000     0,
     4067    pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
     4068    pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
     4069    pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
    40014070    0,
    40024071    0,
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r76993 r77241  
    471471                    &&  !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
    472472#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
    473                     &&  !pgmPoolIsDirtyPage(pVM, GCPhys)
     473                    &&  !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
    474474#endif
    475475                   )
     
    591591
    592592    pgmUnlock(pVM);
     593    return rc;
     594}
     595
     596
     597/**
     598 * Requests the mapping of multiple guest page into ring-3, external threads.
     599 *
     600 * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
     601 * ASAP to release them.
     602 *
     603 * This API will assume your intention is to write to the pages, and will
     604 * therefore replace shared and zero pages. If you do not intend to modify the
     605 * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
     606 *
     607 * @returns VBox status code.
     608 * @retval  VINF_SUCCESS on success.
     609 * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     610 *          backing or if any of the pages the page has any active access
     611 *          handlers. The caller must fall back on using PGMR3PhysWriteExternal.
     612 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     613 *          an invalid physical address.
     614 *
     615 * @param   pVM             The cross context VM structure.
     616 * @param   cPages          Number of pages to lock.
     617 * @param   paGCPhysPages   The guest physical address of the pages that
     618 *                          should be mapped (@a cPages entries).
     619 * @param   fFlags          Flags reserved for future use, MBZ.
     620 * @param   papvPages       Where to store the ring-3 mapping addresses
     621 *                          corresponding to @a paGCPhysPages.
     622 * @param   paLocks         Where to store the locking information that
     623 *                          pfnPhysBulkReleasePageMappingLock needs (@a cPages
     624 *                          in length).
     625 *
     626 * @remark  Avoid calling this API from within critical sections (other than the
     627 *          PGM one) because of the deadlock risk when we have to delegating the
     628 *          task to an EMT.
     629 * @thread  Any.
     630 */
     631VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     632                                                 void **papvPages, PPGMPAGEMAPLOCK paLocks)
     633{
     634    Assert(cPages > 0);
     635    AssertPtr(papvPages);
     636    AssertPtr(paLocks);
     637
     638    Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
     639
     640    int rc = pgmLock(pVM);
     641    AssertRCReturn(rc, rc);
     642
     643    /*
     644     * Lock the pages one by one.
     645     * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
     646     */
     647    int32_t  cNextYield = 128;
     648    uint32_t iPage;
     649    for (iPage = 0; iPage < cPages; iPage++)
     650    {
     651        if (--cNextYield > 0)
     652        { /* likely */ }
     653        else
     654        {
     655            pgmUnlock(pVM);
     656            ASMNopPause();
     657            pgmLock(pVM);
     658            cNextYield = 128;
     659        }
     660
     661        /*
     662         * Query the Physical TLB entry for the page (may fail).
     663         */
     664        PPGMPAGEMAPTLBE pTlbe;
     665        rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
     666        if (RT_SUCCESS(rc))
     667        { }
     668        else
     669            break;
     670        PPGMPAGE pPage = pTlbe->pPage;
     671
     672        /*
     673         * No MMIO or active access handlers.
     674         */
     675        if (   !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
     676            && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     677        { }
     678        else
     679        {
     680            rc = VERR_PGM_PHYS_PAGE_RESERVED;
     681            break;
     682        }
     683
     684        /*
     685         * The page must be in the allocated state and not be a dirty pool page.
     686         * We can handle converting a write monitored page to an allocated one, but
     687         * anything more complicated must be delegated to an EMT.
     688         */
     689        bool fDelegateToEmt = false;
     690        if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
     691#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     692            fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
     693#else
     694            fDelegateToEmt = false;
     695#endif
     696        else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
     697        {
     698#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
     699            if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
     700                pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
     701            else
     702                fDelegateToEmt = true;
     703#endif
     704        }
     705        else
     706            fDelegateToEmt = true;
     707        if (!fDelegateToEmt)
     708        { }
     709        else
     710        {
     711            /* We could do this delegation in bulk, but considered too much work vs gain. */
     712            pgmUnlock(pVM);
     713            rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
     714                                         pVM, paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
     715            pgmLock(pVM);
     716            if (RT_FAILURE(rc))
     717                break;
     718            cNextYield = 128;
     719        }
     720
     721        /*
     722         * Now, just perform the locking and address calculation.
     723         */
     724        PPGMPAGEMAP pMap = pTlbe->pMap;
     725        if (pMap)
     726            pMap->cRefs++;
     727
     728        unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
     729        if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     730        {
     731            if (cLocks == 0)
     732                pVM->pgm.s.cWriteLockedPages++;
     733            PGM_PAGE_INC_WRITE_LOCKS(pPage);
     734        }
     735        else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
     736        {
     737            PGM_PAGE_INC_WRITE_LOCKS(pPage);
     738            AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
     739            if (pMap)
     740                pMap->cRefs++; /* Extra ref to prevent it from going away. */
     741        }
     742
     743        papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
     744        paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
     745        paLocks[iPage].pvMap        = pMap;
     746    }
     747
     748    pgmUnlock(pVM);
     749
     750    /*
     751     * On failure we must unlock any pages we managed to get already.
     752     */
     753    if (RT_FAILURE(rc) && iPage > 0)
     754        PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
     755
     756    return rc;
     757}
     758
     759
     760/**
     761 * Requests the mapping of multiple guest page into ring-3, for reading only,
     762 * external threads.
     763 *
     764 * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
     765 * to release them.
     766 *
     767 * @returns VBox status code.
     768 * @retval  VINF_SUCCESS on success.
     769 * @retval  VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
     770 *          backing or if any of the pages the page has an active ALL access
     771 *          handler. The caller must fall back on using PGMR3PhysWriteExternal.
     772 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
     773 *          an invalid physical address.
     774 *
     775 * @param   pVM             The cross context VM structure.
     776 * @param   cPages          Number of pages to lock.
     777 * @param   paGCPhysPages   The guest physical address of the pages that
     778 *                          should be mapped (@a cPages entries).
     779 * @param   fFlags          Flags reserved for future use, MBZ.
     780 * @param   papvPages       Where to store the ring-3 mapping addresses
     781 *                          corresponding to @a paGCPhysPages.
     782 * @param   paLocks         Where to store the lock information that
     783 *                          pfnPhysReleasePageMappingLock needs (@a cPages
     784 *                          in length).
     785 *
     786 * @remark  Avoid calling this API from within critical sections (other than
     787 *          the PGM one) because of the deadlock risk.
     788 * @thread  Any.
     789 */
     790VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
     791                                                         void const **papvPages, PPGMPAGEMAPLOCK paLocks)
     792{
     793    Assert(cPages > 0);
     794    AssertPtr(papvPages);
     795    AssertPtr(paLocks);
     796
     797    Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
     798
     799    int rc = pgmLock(pVM);
     800    AssertRCReturn(rc, rc);
     801
     802    /*
     803     * Lock the pages one by one.
     804     * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
     805     */
     806    int32_t  cNextYield = 256;
     807    uint32_t iPage;
     808    for (iPage = 0; iPage < cPages; iPage++)
     809    {
     810        if (--cNextYield > 0)
     811        { /* likely */ }
     812        else
     813        {
     814            pgmUnlock(pVM);
     815            ASMNopPause();
     816            pgmLock(pVM);
     817            cNextYield = 256;
     818        }
     819
     820        /*
     821         * Query the Physical TLB entry for the page (may fail).
     822         */
     823        PPGMPAGEMAPTLBE pTlbe;
     824        rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
     825        if (RT_SUCCESS(rc))
     826        { }
     827        else
     828            break;
     829        PPGMPAGE pPage = pTlbe->pPage;
     830
     831        /*
     832         * No MMIO or active all access handlers, everything else can be accessed.
     833         */
     834        if (   !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
     835            && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
     836        { }
     837        else
     838        {
     839            rc = VERR_PGM_PHYS_PAGE_RESERVED;
     840            break;
     841        }
     842
     843        /*
     844         * Now, just perform the locking and address calculation.
     845         */
     846        PPGMPAGEMAP pMap = pTlbe->pMap;
     847        if (pMap)
     848            pMap->cRefs++;
     849
     850        unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
     851        if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
     852        {
     853            if (cLocks == 0)
     854                pVM->pgm.s.cReadLockedPages++;
     855            PGM_PAGE_INC_READ_LOCKS(pPage);
     856        }
     857        else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
     858        {
     859            PGM_PAGE_INC_READ_LOCKS(pPage);
     860            AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
     861            if (pMap)
     862                pMap->cRefs++; /* Extra ref to prevent it from going away. */
     863        }
     864
     865        papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
     866        paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
     867        paLocks[iPage].pvMap        = pMap;
     868    }
     869
     870    pgmUnlock(pVM);
     871
     872    /*
     873     * On failure we must unlock any pages we managed to get already.
     874     */
     875    if (RT_FAILURE(rc) && iPage > 0)
     876        PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
     877
    593878    return rc;
    594879}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette