VirtualBox

Changeset 30326 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jun 21, 2010 12:35:33 PM (15 years ago)
Author:
vboxsync
Message:

PGM: PGMShwModifyPage/PGMShwSetPage -> PGMShwMakePageWritable, PGMShwMakePageReadonly & PGMShwMakePageNotPresent and made the low level worker make the page writable before setting the X86_PTE_RW bit. PGMR3PhysTlbGCPhys2Ptr should make write monitored pages writable (?). PGMDynMapGCPage and PGMDynMapGCPageOff must make the pages writable and take the PGM lock.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r29250 r30326  
    825825
    826826/**
    827  * Sets (replaces) the page flags for a range of pages in the shadow context.
    828  *
    829  * @returns VBox status.
    830  * @param   pVCpu       VMCPU handle.
    831  * @param   GCPtr       The address of the first page.
    832  * @param   cb          The size of the range in bytes.
    833  * @param   fFlags      Page flags X86_PTE_*, excluding the page mask of course.
    834  * @remark  You must use PGMMapSetPage() for pages in a mapping.
    835  */
    836 VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
    837 {
    838     return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
    839 }
    840 
    841 
    842 /**
    843827 * Modify page flags for a range of pages in the shadow context.
    844828 *
     
    848832 * @param   pVCpu       VMCPU handle.
    849833 * @param   GCPtr       Virtual address of the first page in the range.
    850  * @param   cb          Size (in bytes) of the range to apply the modification to.
    851834 * @param   fFlags      The OR  mask - page flags X86_PTE_*, excluding the page mask of course.
    852835 * @param   fMask       The AND mask - page flags X86_PTE_*.
    853836 *                      Be very CAREFUL when ~'ing constants which could be 32-bit!
     837 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
    854838 * @remark  You must use PGMMapModifyPage() for pages in a mapping.
    855839 */
    856 VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
     840DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
    857841{
    858842    AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
    859     Assert(cb);
    860 
    861     /*
    862      * Align the input.
    863      */
    864     cb     += GCPtr & PAGE_OFFSET_MASK;
    865     cb      = RT_ALIGN_Z(cb, PAGE_SIZE);
    866     GCPtr   = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
    867 
    868     /*
    869      * Call worker.
    870      */
     843    Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
     844
     845    GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
     846
    871847    PVM pVM = pVCpu->CTX_SUFF(pVM);
    872848    pgmLock(pVM);
    873     int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
     849    int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
    874850    pgmUnlock(pVM);
    875851    return rc;
    876852}
     853
     854
     855/**
     856 * Changing the page flags for a single page in the shadow page tables so as to
     857 * make it read-only.
     858 *
     859 * @returns VBox status code.
     860 * @param   pVCpu       VMCPU handle.
     861 * @param   GCPtr       Virtual address of the first page in the range.
     862 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
     863 */
     864VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     865{
     866    return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
     867}
     868
     869
     870/**
     871 * Changing the page flags for a single page in the shadow page tables so as to
     872 * make it writable.
     873 *
     874 * The call must know with 101% certainty that the guest page tables maps this
     875 * as writable too.  This function will deal shared, zero and write monitored
     876 * pages.
     877 *
     878 * @returns VBox status code.
     879 * @param   pVCpu       VMCPU handle.
     880 * @param   GCPtr       Virtual address of the first page in the range.
     881 * @param   fMmio2      Set if it is an MMIO2 page.
     882 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
     883 */
     884VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     885{
     886    return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
     887}
     888
     889
     890/**
     891 * Changing the page flags for a single page in the shadow page tables so as to
     892 * make it not present.
     893 *
     894 * @returns VBox status code.
     895 * @param   pVCpu       VMCPU handle.
     896 * @param   GCPtr       Virtual address of the first page in the range.
     897 * @param   fOpFlags    A combination of the PGM_MK_PG_XXX flags.
     898 */
     899VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     900{
     901    return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
     902}
     903
    877904
    878905/**
     
    21562183#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    21572184
     2185/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
     2186DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
     2187{
     2188    pgmLock(pVM);
     2189
     2190    /*
     2191     * Convert it to a writable page and it on to PGMDynMapHCPage.
     2192     */
     2193    int rc;
     2194    PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
     2195    if (RT_LIKELY(pPage))
     2196    {
     2197        rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     2198        if (RT_SUCCESS(rc))
     2199        {
     2200            //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
     2201#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     2202            rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv);
     2203#else
     2204            rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
     2205#endif
     2206        }
     2207        else
     2208            AssertRC(rc);
     2209    }
     2210    else
     2211    {
     2212        AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
     2213        rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     2214    }
     2215
     2216    pgmUnlock(pVM);
     2217    return rc;
     2218}
     2219
    21582220/**
    21592221 * Temporarily maps one guest page specified by GC physical address.
     
    21712233{
    21722234    AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
    2173 
    2174     /*
    2175      * Get the ram range.
    2176      */
    2177     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2178     while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
    2179         pRam = pRam->CTX_SUFF(pNext);
    2180     if (!pRam)
    2181     {
    2182         AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
    2183         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    2184     }
    2185 
    2186     /*
    2187      * Pass it on to PGMDynMapHCPage.
    2188      */
    2189     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
    2190     //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
    2191 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2192     pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
    2193 #else
    2194     PGMDynMapHCPage(pVM, HCPhys, ppv);
    2195 #endif
    2196     return VINF_SUCCESS;
     2235    return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
    21972236}
    21982237
     
    22152254VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    22162255{
    2217     /*
    2218      * Get the ram range.
    2219      */
    2220     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2221     while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
    2222         pRam = pRam->CTX_SUFF(pNext);
    2223     if (!pRam)
    2224     {
    2225         AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
    2226         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    2227     }
    2228 
    2229     /*
    2230      * Pass it on to PGMDynMapHCPage.
    2231      */
    2232     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
    2233 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2234     pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
    2235 #else
    2236     PGMDynMapHCPage(pVM, HCPhys, ppv);
    2237 #endif
    2238     *ppv = (void *)((uintptr_t)*ppv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    2239     return VINF_SUCCESS;
     2256    void *pv;
     2257    int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
     2258    if (RT_SUCCESS(rc))
     2259    {
     2260        *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     2261        return VINF_SUCCESS;
     2262    }
     2263    return rc;
    22402264}
    22412265
     
    23812405VMMDECL(void) PGMDynCheckLocks(PVM pVM)
    23822406{
    2383     for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
     2407    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
    23842408        Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
    23852409}
     
    23942418 * @copydoc FNRTSTRFORMATTYPE */
    23952419static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
    2396                                                     const char *pszType, void const *pvValue,
    2397                                                     int cchWidth, int cchPrecision, unsigned fFlags,
    2398                                                     void *pvUser)
     2420                                                     const char *pszType, void const *pvValue,
     2421                                                     int cchWidth, int cchPrecision, unsigned fFlags,
     2422                                                     void *pvUser)
    23992423{
    24002424    size_t    cch;
     
    24902514
    24912515#endif /* !IN_R0 || LOG_ENABLED */
    2492 
    24932516
    24942517/**
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r30301 r30326  
    13321332            if (rc == VINF_SUCCESS)
    13331333            {
    1334                 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     1334                rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    13351335                AssertMsg(rc == VINF_SUCCESS
    13361336                        /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r28800 r30326  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    118118RT_C_DECLS_BEGIN
    119119PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    120 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask);
     120PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
    121121RT_C_DECLS_END
    122122
     
    272272 * @param   fMask       The AND mask - page flags X86_PTE_*.
    273273 *                      Be extremely CAREFUL with ~'ing values because they can be 32-bit!
     274 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
    274275 * @remark  You must use PGMMapModifyPage() for pages in a mapping.
    275276 */
    276 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
     277PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
    277278{
    278279# if PGM_SHW_TYPE == PGM_TYPE_NESTED
     
    353354            if (pPT->a[iPTE].n.u1Present)
    354355            {
    355                 SHWPTE Pte;
    356 
    357                 Pte.u = (pPT->a[iPTE].u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK);
    358                 ASMAtomicWriteSize(&pPT->a[iPTE], Pte.u);
    359                 Assert(pPT->a[iPTE].n.u1Present);
     356                SHWPTE const    OrgPte = pPT->a[iPTE];
     357                SHWPTE          NewPte;
     358
     359                NewPte.u = (OrgPte.u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK);
     360                Assert(NewPte.n.u1Present);
     361                if (!NewPte.n.u1Present)
     362                {
     363                    /** @todo Some CSAM code path might end up here and upset
     364                     *  the page pool. */
     365                    AssertFailed();
     366                }
     367                else if (   NewPte.n.u1Write
     368                         && !OrgPte.n.u1Write
     369                         && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
     370                {
     371                    /** @todo Optimize \#PF handling by caching data.  We can
     372                     *        then use this when PGM_MK_PG_IS_WRITE_FAULT is
     373                     *        set instead of resolving the guest physical
     374                     *        address yet again. */
     375                    RTGCPHYS GCPhys;
     376                    uint64_t fGstPte;
     377                    rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
     378                    AssertRC(rc);
     379                    if (RT_SUCCESS(rc))
     380                    {
     381                        Assert(fGstPte & X86_PTE_RW);
     382                        PPGMPAGE pPage = pgmPhysGetPage(&pVCpu->CTX_SUFF(pVM)->pgm.s, GCPhys);
     383                        Assert(pPage);
     384                        if (pPage)
     385                        {
     386                            rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     387                            AssertRCReturn(rc, rc);
     388                            Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
     389                        }
     390                    }
     391                }
     392
     393                ASMAtomicWriteSize(&pPT->a[iPTE], NewPte.u);
    360394# if PGM_SHW_TYPE == PGM_TYPE_EPT
    361395                HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette