VirtualBox

Changeset 31402 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Aug 5, 2010 12:28:18 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
64465
Message:

PGM: Replaced the hazzardous raw-mode context dynamic mapping code with the PGMR0DynMap code used by darwin/x86. This is a risky change but it should pay off once stable by providing 100% certainty that dynamically mapped pages aren't resued behind our back (this has been observed in seemingly benign code paths recently).

Location:
trunk/src/VBox/VMM/VMMAll
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp

    r28800 r31402  
    3939
    4040
    41 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     41#if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)
    4242
    4343/**
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r31170 r31402  
    936936        PGMPOOLKIND enmKind;
    937937
    938 # if defined(IN_RC)
    939         /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    940         PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
    941 # endif
    942 
    943938        if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
    944939        {
     
    990985         */
    991986        ASMReloadCR3();
    992         PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
    993987# endif
     988        PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
    994989    }
    995990    else
     
    15241519#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    15251520#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1526 
    15271521/**
    15281522 * Performs the lazy mapping of the 32-bit guest PD.
     
    15631557    return rc;
    15641558}
    1565 
    15661559#endif
    15671560
     
    22722265#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    22732266
    2274 /** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
    2275 DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
     2267/**
     2268 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
     2269 *
     2270 * @returns VBox status code.
     2271 * @param   pVM         The VM handle.
     2272 * @param   pVCpu       The current CPU.
     2273 * @param   GCPhys      The guest physical address of the page to map.  The
     2274 *                      offset bits are not ignored.
     2275 * @param   ppv         Where to return the address corresponding to @a GCPhys.
     2276 */
     2277int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    22762278{
    22772279    pgmLock(pVM);
    22782280
    22792281    /*
    2280      * Convert it to a writable page and it on to PGMDynMapHCPage.
     2282     * Convert it to a writable page and it on to the dynamic mapper.
    22812283     */
    22822284    int rc;
     
    22872289        if (RT_SUCCESS(rc))
    22882290        {
    2289             //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
    2290 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2291             rc = pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), PGM_PAGE_GET_HCPHYS(pPage), ppv);
    2292 #else
    2293             rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
    2294 #endif
     2291            void *pv;
     2292            rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
     2293            if (RT_SUCCESS(rc))
     2294                *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
    22952295        }
    22962296        else
     
    23072307}
    23082308
    2309 /**
    2310  * Temporarily maps one guest page specified by GC physical address.
    2311  * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
    2312  *
    2313  * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
    2314  * reused after 8 mappings (or perhaps a few more if you score with the cache).
    2315  *
    2316  * @returns VBox status.
    2317  * @param   pVM         VM handle.
    2318  * @param   GCPhys      GC Physical address of the page.
    2319  * @param   ppv         Where to store the address of the mapping.
    2320  */
    2321 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    2322 {
    2323     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
    2324     return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
    2325 }
    2326 
    2327 
    2328 /**
    2329  * Temporarily maps one guest page specified by unaligned GC physical address.
    2330  * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
    2331  *
    2332  * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
    2333  * reused after 8 mappings (or perhaps a few more if you score with the cache).
    2334  *
    2335  * The caller is aware that only the speicifed page is mapped and that really bad things
    2336  * will happen if writing beyond the page!
    2337  *
    2338  * @returns VBox status.
    2339  * @param   pVM         VM handle.
    2340  * @param   GCPhys      GC Physical address within the page to be mapped.
    2341  * @param   ppv         Where to store the address of the mapping address corresponding to GCPhys.
    2342  */
    2343 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    2344 {
    2345     void *pv;
    2346     int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
    2347     if (RT_SUCCESS(rc))
    2348     {
    2349         *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    2350         return VINF_SUCCESS;
    2351     }
    2352     return rc;
    2353 }
    2354 
    2355 # ifdef IN_RC
    2356 
    2357 /**
    2358  * Temporarily maps one host page specified by HC physical address.
    2359  *
    2360  * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
    2361  * reused after 16 mappings (or perhaps a few more if you score with the cache).
    2362  *
    2363  * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
    2364  * @param   pVM         VM handle.
    2365  * @param   HCPhys      HC Physical address of the page.
    2366  * @param   ppv         Where to store the address of the mapping. This is the
    2367  *                      address of the PAGE not the exact address corresponding
    2368  *                      to HCPhys. Use PGMDynMapHCPageOff if you care for the
    2369  *                      page offset.
    2370  */
    2371 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
    2372 {
    2373     AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    2374 
    2375     /*
    2376      * Check the cache.
    2377      */
    2378     register unsigned iCache;
    2379     for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
    2380     {
    2381         static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
    2382         {
    2383             { 0,  9, 10, 11, 12, 13, 14, 15},
    2384             { 0,  1, 10, 11, 12, 13, 14, 15},
    2385             { 0,  1,  2, 11, 12, 13, 14, 15},
    2386             { 0,  1,  2,  3, 12, 13, 14, 15},
    2387             { 0,  1,  2,  3,  4, 13, 14, 15},
    2388             { 0,  1,  2,  3,  4,  5, 14, 15},
    2389             { 0,  1,  2,  3,  4,  5,  6, 15},
    2390             { 0,  1,  2,  3,  4,  5,  6,  7},
    2391             { 8,  1,  2,  3,  4,  5,  6,  7},
    2392             { 8,  9,  2,  3,  4,  5,  6,  7},
    2393             { 8,  9, 10,  3,  4,  5,  6,  7},
    2394             { 8,  9, 10, 11,  4,  5,  6,  7},
    2395             { 8,  9, 10, 11, 12,  5,  6,  7},
    2396             { 8,  9, 10, 11, 12, 13,  6,  7},
    2397             { 8,  9, 10, 11, 12, 13, 14,  7},
    2398             { 8,  9, 10, 11, 12, 13, 14, 15},
    2399         };
    2400         AssertCompile(RT_ELEMENTS(au8Trans) == 16);
    2401         AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
    2402 
    2403         if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
    2404         {
    2405             int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
    2406 
    2407             /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
    2408             if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
    2409             {
    2410                 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
    2411                 *ppv = pv;
    2412                 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);
    2413                 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
    2414                 return VINF_SUCCESS;
    2415             }
    2416             LogFlow(("Out of sync entry %d\n", iPage));
    2417         }
    2418     }
    2419     AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
    2420     AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
    2421     STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);
    2422 
    2423     /*
    2424      * Update the page tables.
    2425      */
    2426     unsigned iPage = pVM->pgm.s.iDynPageMapLast;
    2427     unsigned i;
    2428     for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
    2429     {
    2430         pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
    2431         if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
    2432             break;
    2433         iPage++;
    2434     }
    2435     AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
    2436 
    2437     pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
    2438     pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
    2439     pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u   =           HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
    2440     pVM->pgm.s.aLockedDynPageMapCache[iPage]    = 0;
    2441 
    2442     void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
    2443     *ppv = pv;
    2444     ASMInvalidatePage(pv);
    2445     Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
    2446     return VINF_SUCCESS;
    2447 }
    2448 
    2449 
    2450 /**
    2451  * Temporarily lock a dynamic page to prevent it from being reused.
    2452  *
    2453  * @param   pVM         VM handle.
    2454  * @param   GCPage      GC address of page
    2455  */
    2456 VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
    2457 {
    2458     unsigned iPage;
    2459 
    2460     Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
    2461     iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
    2462     ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2463     Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
    2464 }
    2465 
    2466 
    2467 /**
    2468  * Unlock a dynamic page
    2469  *
    2470  * @param   pVM         VM handle.
    2471  * @param   GCPage      GC address of page
    2472  */
    2473 VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
    2474 {
    2475     unsigned iPage;
    2476 
    2477     AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
    2478     AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
    2479 
    2480     Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
    2481     iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
    2482     Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2483     ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2484     Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
    2485 }
    2486 
    2487 
    2488 #  ifdef VBOX_STRICT
    2489 /**
    2490  * Check for lock leaks.
    2491  *
    2492  * @param   pVM         VM handle.
    2493  */
    2494 VMMDECL(void) PGMDynCheckLocks(PVM pVM)
    2495 {
    2496     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
    2497         Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
    2498 }
    2499 #  endif /* VBOX_STRICT */
    2500 
    2501 # endif /* IN_RC */
    25022309#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    2503 
    25042310#if !defined(IN_R0) || defined(LOG_ENABLED)
    25052311
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31207 r31402  
    383383    *pfLockTaken = false;
    384384
    385 # if defined(IN_RC) && defined(VBOX_STRICT)
    386     PGMDynCheckLocks(pVM);
    387 # endif
    388 
    389385# if  (   PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
    390386       || PGM_GST_TYPE == PGM_TYPE_PAE   || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     
    433429    if (uErr & X86_TRAP_PF_RSVD)
    434430    {
     431/** @todo This is not complete code. take locks */
    435432        Assert(uErr & X86_TRAP_PF_P);
    436433        PPGMPAGE pPage;
     
    563560            return VINF_SUCCESS;
    564561        }
    565 #ifndef IN_RC
    566562        AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
    567563        AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
    568 #else
    569         /* Ugly hack, proper fix is comming up later. */
    570         if (   !(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u)
    571             || !(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u) )
    572         {
    573             rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);
    574             if (RT_FAILURE_NP(rc))
    575                 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
    576         }
    577 #endif
    578564    }
    579565
     
    11481134    }
    11491135
    1150 # if defined(IN_RC)
    1151     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1152     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    1153 # endif
    1154 
    11551136    /*
    11561137     * Get the guest PD entry and calc big page.
     
    12951276                    LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
    12961277                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
    1297 # if defined(IN_RC)
    1298                     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1299                     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    1300 # endif
     1278                    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    13011279                    return VINF_SUCCESS;
    13021280                }
     
    13351313        }
    13361314    }
    1337 # if defined(IN_RC)
    1338     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1339     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    1340 # endif
     1315    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    13411316    return rc;
    13421317
     
    17851760    PPGMPOOLPAGE    pShwPde  = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
    17861761    Assert(pShwPde);
    1787 # endif
    1788 
    1789 # if defined(IN_RC)
    1790     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1791     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    17921762# endif
    17931763
     
    20211991                }
    20221992            }
    2023 # if defined(IN_RC)
    2024             /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2025             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2026 # endif
     1993            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    20271994            return VINF_SUCCESS;
    20281995        }
     
    20502017    ASMAtomicWriteSize(pPdeDst, 0);
    20512018
    2052 # if defined(IN_RC)
    2053     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2054     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2055 # endif
     2019    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    20562020    PGM_INVL_VCPU_TLBS(pVCpu);
    20572021    return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
     
    25642528    Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
    25652529
    2566 # if defined(IN_RC)
    2567     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2568     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    2569 # endif
    2570 
    25712530    /*
    25722531     * Sync page directory entry.
     
    26462605            }
    26472606            ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2648 # if defined(IN_RC)
    2649             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2650 # endif
     2607            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26512608            return VINF_SUCCESS;
    26522609        }
     
    26542611        {
    26552612            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    2656 # if defined(IN_RC)
    2657             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2658 # endif
     2613            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26592614            return VINF_PGM_SYNC_CR3;
    26602615        }
     
    26872642                         | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
    26882643                ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2689 # if defined(IN_RC)
    2690                 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2691 # endif
     2644                PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26922645
    26932646                /*
     
    27682721
    27692722            /**
    2770              * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
     2723             * @todo It might be more efficient to sync only a part of the 4MB
     2724             *       page (similar to what we do for 4KB PDs).
    27712725             */
    27722726
     
    27952749            }
    27962750            ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2797 # if defined(IN_RC)
    2798             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2799 # endif
     2751            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    28002752
    28012753            /*
     
    33913343# endif
    33923344
    3393 # if defined(IN_RC)
    3394     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3395     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    3396 # endif
    3397 
    33983345    if (!pPdeDst->n.u1Present)
    33993346    {
     
    34013348        if (rc != VINF_SUCCESS)
    34023349        {
    3403 # if defined(IN_RC)
    3404             /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3405             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    3406 # endif
     3350            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    34073351            pgmUnlock(pVM);
    34083352            AssertRC(rc);
     
    34493393        }
    34503394    }
    3451 # if defined(IN_RC)
    3452     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3453     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    3454 # endif
     3395    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    34553396    pgmUnlock(pVM);
    34563397    return rc;
     
    43594300    AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2);
    43604301    HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3);
    4361     /** @todo this needs some reworking wrt. locking.  */
     4302    /** @todo this needs some reworking wrt. locking?  */
    43624303# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    43634304    HCPtrGuestCR3 = NIL_RTHCPTR;
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r31174 r31402  
    247247                PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
    248248                AssertFatal(pShw32BitPd);
    249 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
    250                 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
    251 #endif
     249
    252250                /* Free any previous user, unless it's us. */
    253251                Assert(   (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
     
    260258                pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
    261259                                          | (uint32_t)pMap->aPTs[i].HCPhysPT;
    262 #ifdef IN_RC
    263                 /* Unlock dynamic mappings again. */
    264                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
    265 #endif
     260                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
    266261                break;
    267262            }
     
    274269                PX86PDPT        pShwPdpt  = pgmShwGetPaePDPTPtr(pVCpu);
    275270                Assert(pShwPdpt);
    276 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
    277                 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
    278 #endif
    279271
    280272                /*
     
    302294                }
    303295                Assert(pShwPaePd);
    304 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
    305                 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
    306 #endif
    307296
    308297                /*
     
    357346                pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
    358347
    359 #ifdef IN_RC
    360                 /* Unlock dynamic mappings again. */
    361                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
    362                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
    363 #endif
     348                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
     349                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
    364350                break;
    365351            }
     
    406392    if (    PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
    407393        &&  pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
    408     {
    409394        pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
    410 #ifdef IN_RC    /* Lock mapping to prevent it from being reused (currently not possible). */
    411         if (pCurrentShwPdpt)
    412             PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
    413 #endif
    414     }
    415395
    416396    unsigned i = pMap->cPTs;
     
    503483        }
    504484    }
    505 #ifdef IN_RC
    506     /* Unlock dynamic mappings again. */
    507     if (pCurrentShwPdpt)
    508         PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
    509 #endif
     485
     486    PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
    510487}
    511488#endif /* !IN_RING0 */
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r31208 r31402  
    738738    AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
    739739
    740 #ifdef IN_RC
     740#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    741741    /*
    742742     * Map it by HCPhys.
    743743     */
    744     return PGMDynMapHCPage(pVM, HCPhys, ppv);
    745 
    746 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    747     /*
    748      * Map it by HCPhys.
    749      */
    750     return pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
     744    return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv  RTLOG_COMMA_SRC_POS);
    751745
    752746#else
     
    824818    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    825819    Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
    826 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    827     pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
    828 # else
    829     PGMDynMapHCPage(pVM, HCPhys, ppv);
    830 # endif
     820    pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
    831821    return VINF_SUCCESS;
    832822
     
    11381128     */
    11391129#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1140     *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
     1130    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS);
    11411131#else
    11421132    PPGMPAGEMAPTLBE pTlbe;
     
    11761166     */
    11771167#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1178     *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1168    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    11791169#else
    11801170    PPGMPAGEMAPTLBE pTlbe;
     
    12341224        if (RT_SUCCESS(rc))
    12351225        {
    1236             *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1226            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    12371227# if 0
    12381228            pLock->pvMap = 0;
     
    13451335        else
    13461336        {
    1347             *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1337            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    13481338# if 0
    13491339            pLock->pvMap = 0;
     
    14931483    pLock->u32Dummy = 0;
    14941484
    1495 #else   /* IN_RING3 */
     1485#else
    14961486    PPGMPAGEMAP pMap       = (PPGMPAGEMAP)pLock->pvMap;
    14971487    PPGMPAGE    pPage      = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r31170 r31402  
    8888}
    8989
    90 /** @def PGMPOOL_PAGE_2_LOCKED_PTR
    91  * Maps a pool page pool into the current context and lock it (RC only).
    92  *
    93  * @returns VBox status code.
    94  * @param   pVM     The VM handle.
    95  * @param   pPage   The pool page.
    96  *
    97  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    98  *          small page window employeed by that function. Be careful.
    99  * @remark  There is no need to assert on the result.
    100  */
    101 #if defined(IN_RC)
    102 DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)
    103 {
    104     void *pv = pgmPoolMapPageInlined(pVM, pPage);
    105 
    106     /* Make sure the dynamic mapping will not be reused. */
    107     if (pv)
    108         PGMDynLockHCPage(pVM, (uint8_t *)pv);
    109 
    110     return pv;
    111 }
    112 #else
    113 # define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage)  PGMPOOL_PAGE_2_PTR(pVM, pPage)
    114 #endif
    115 
    116 /** @def PGMPOOL_UNLOCK_PTR
    117  * Unlock a previously locked dynamic caching (RC only).
    118  *
    119  * @returns VBox status code.
    120  * @param   pVM     The VM handle.
    121  * @param   pPage   The pool page.
    122  *
    123  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    124  *          small page window employeed by that function. Be careful.
    125  * @remark  There is no need to assert on the result.
    126  */
    127 #if defined(IN_RC)
    128 DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)
    129 {
    130     if (pvPage)
    131         PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);
    132 }
    133 #else
    134 # define PGMPOOL_UNLOCK_PTR(pVM, pPage)  do {} while (0)
    135 #endif
    136 
    13790
    13891/**
     
    247200            {
    248201                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
    249                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     202                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    250203                const unsigned iShw = off / sizeof(X86PTE);
    251204                LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
     
    270223            {
    271224                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
    272                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     225                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    273226                if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
    274227                {
     
    300253                unsigned iShwPdpt = iGst / 256;
    301254                unsigned iShw     = (iGst % 256) * 2;
    302                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     255                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    303256
    304257                LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
     
    363316            case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
    364317            {
    365                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     318                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    366319                const unsigned iShw = off / sizeof(X86PTEPAE);
    367320                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
     
    409362            case PGMPOOLKIND_32BIT_PD:
    410363            {
    411                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     364                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    412365                const unsigned iShw = off / sizeof(X86PTE);         // ASSUMING 32-bit guest paging!
    413366
     
    489442            case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
    490443            {
    491                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     444                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    492445                const unsigned iShw = off / sizeof(X86PDEPAE);
    493446                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
     
    566519                const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
    567520
    568                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     521                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    569522                const unsigned iShw = offPdpt / sizeof(X86PDPE);
    570523                if (iShw < X86_PG_PAE_PDPE_ENTRIES)          /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
     
    633586            {
    634587                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
    635                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     588                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    636589                const unsigned iShw = off / sizeof(X86PDEPAE);
    637590                Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
     
    673626                 * - messing with the bits of pd pointers without changing the physical address
    674627                 */
    675                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     628                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    676629                const unsigned iShw = off / sizeof(X86PDPE);
    677630                if (uShw.pPDPT->a[iShw].n.u1Present)
     
    703656                 * - messing with the bits of pd pointers without changing the physical address
    704657                 */
    705                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     658                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    706659                const unsigned iShw = off / sizeof(X86PDPE);
    707660                if (uShw.pPML4->a[iShw].n.u1Present)
     
    730683                AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
    731684        }
    732         PGMPOOL_UNLOCK_PTR(pVM, uShw.pv);
     685        PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv);
    733686
    734687        /* next */
     
    960913    while (pRegFrame->rcx)
    961914    {
    962 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    963         uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     915#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     916        uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    964917        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
    965         PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     918        PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    966919#else
    967920        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
     
    1012965     * Clear all the pages. ASSUMES that pvFault is readable.
    1013966     */
    1014 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1015     uint32_t    iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     967#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     968    uint32_t    iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    1016969    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
    1017     PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     970    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    1018971#else
    1019972    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
     
    11131066    if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
    11141067    {
    1115         void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1068        void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    11161069        void *pvGst;
    11171070        int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     
    14211374                    if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
    14221375                    {
    1423                         PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);
     1376                        PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);
    14241377
    14251378                        for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
     
    15391492    pPage->fDirty = false;
    15401493
    1541 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1542     uint32_t iPrevSubset = PGMDynMapPushAutoSubset(VMMGetCpu(pVM));
     1494#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     1495    PVMCPU   pVCpu = VMMGetCpu(pVM);
     1496    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    15431497#endif
    15441498
     
    15571511    /* Flush those PTEs that have changed. */
    15581512    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
    1559     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1513    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    15601514    void *pvGst;
    15611515    bool  fFlush;
     
    15891543        Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
    15901544
    1591 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1592     PGMDynMapPopAutoSubset(VMMGetCpu(pVM), iPrevSubset);
     1545#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
     1546    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    15931547#endif
    15941548}
     
    16271581     * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!)
    16281582     */
    1629     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1583    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    16301584    void *pvGst;
    16311585    int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     
    33523306        else
    33533307        {
    3354 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3308# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    33553309            /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
    33563310               pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
    3357             uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     3311            uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    33583312# endif
    33593313
     
    33703324            *pfFlushTLBs = true;
    33713325
    3372 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3373             PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     3326# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
     3327            PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    33743328# endif
    33753329        }
     
    36633617            AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
    36643618    }
     3619    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64);
    36653620}
    36663621
     
    44354390     * Map the shadow page and take action according to the page kind.
    44364391     */
    4437     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     4392    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    44384393    switch (pPage->enmKind)
    44394394    {
     
    45394494    STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
    45404495    pPage->fZeroed = true;
    4541     PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
     4496    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), pvShw);
    45424497    Assert(!pPage->cPresent);
    45434498}
     
    45964551    }
    45974552
    4598 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     4553#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    45994554    /* Start a subset so we won't run out of mapping space. */
    46004555    PVMCPU pVCpu = VMMGetCpu(pVM);
    4601     uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     4556    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    46024557#endif
    46034558
     
    46294584    pgmPoolCacheFlushPage(pPool, pPage);
    46304585
    4631 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     4586#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
    46324587    /* Heavy stuff done. */
    4633     PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     4588    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    46344589#endif
    46354590
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r30493 r31402  
    693693                        STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[iOrgTrap], o);
    694694
    695                     CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], eflags.u32, ss_r0, (RTRCPTR)esp_r0);
     695                    PGMRZDynMapReleaseAutoSet(pVCpu);
     696                    CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate],
     697                                               eflags.u32, ss_r0, (RTRCPTR)esp_r0);
    696698                    /* does not return */
    697699#else
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette