VirtualBox

Changeset 30326 in vbox for trunk/src


Ignore:
Timestamp:
Jun 21, 2010 12:35:33 PM (15 years ago)
Author:
vboxsync
Message:

PGM: PGMShwModifyPage/PGMShwSetPage -> PGMShwMakePageWritable, PGMShwMakePageReadonly & PGMShwMakePageNotPresent and made the low level worker make the page writable before setting the X86_PTE_RW bit. PGMR3PhysTlbGCPhys2Ptr should make write monitored pages writable (?). PGMDynMapGCPage and PGMDynMapGCPageOff must make the pages writable and take the PGM lock.

Location:
trunk/src/VBox
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Graphics/DevVGA.cpp

    r28951 r30326  
    38983898    {
    38993899#ifndef IN_RING3
    3900         rc = PGMShwModifyPage(PDMDevHlpGetVMCPU(pThis->CTX_SUFF(pDevIns)), GCPtr, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     3900        rc = PGMShwMakePageWritable(PDMDevHlpGetVMCPU(pThis->CTX_SUFF(pDevIns)), GCPtr,
     3901                                    PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
    39013902        PDMCritSectLeave(&pThis->lock);
    39023903        AssertMsgReturn(    rc == VINF_SUCCESS
     
    39063907                        ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc),
    39073908                        rc);
    3908         return VINF_SUCCESS;
    39093909#else /* IN_RING3 : We don't have any virtual page address of the access here. */
    39103910        PDMCritSectLeave(&pThis->lock);
    39113911        Assert(GCPtr == 0);
     3912#endif
    39123913        return VINF_SUCCESS;
    3913 #endif
    3914     }
    3915     else
    3916     {
    3917         PDMCritSectLeave(&pThis->lock);
    3918         AssertMsgFailed(("PGMHandlerPhysicalPageTempOff -> rc=%d\n", rc));
    3919     }
     3914    }
     3915
     3916    PDMCritSectLeave(&pThis->lock);
     3917    AssertMsgFailed(("PGMHandlerPhysicalPageTempOff -> rc=%d\n", rc));
    39203918    return rc;
    39213919}
  • trunk/src/VBox/VMM/PATM/CSAM.cpp

    r28800 r30326  
    17941794        AssertRC(rc);
    17951795
    1796         rc = PGMShwModifyPage(pVCpu, GCPtr, 1, 0, ~(uint64_t)X86_PTE_RW);
     1796        rc = PGMShwMakePageReadonly(pVCpu, GCPtr, 0 /*fFlags*/);
    17971797        Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    17981798
     
    19061906        AssertRC(rc);
    19071907
    1908         rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
     1908        rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
    19091909        Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    19101910
     
    19281928
    19291929        /* Make sure it's readonly. Page invalidation may have modified the attributes. */
    1930         rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
     1930        rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
    19311931        Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    19321932    }
     
    19501950        AssertRC(rc);
    19511951        /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
    1952         rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
     1952        rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
    19531953        Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    19541954    }
     
    23302330
    23312331        /* Enable write protection again. (use the fault address as it might be an alias) */
    2332         rc = PGMShwModifyPage(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 1, 0, ~(uint64_t)X86_PTE_RW);
     2332        rc = PGMShwMakePageReadonly(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 0 /*fFlags*/);
    23332333        Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    23342334
     
    23742374
    23752375        Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
    2376         PGMShwSetPage(pVCpu, GCPtr, 1, 0);
     2376        PGMShwMakePageNotPresent(pVCpu, GCPtr, 0 /*fFlags*/);
    23772377        /* Resync the page to make sure instruction fetch will fault */
    23782378        CSAMMarkPage(pVM, GCPtr, false);
  • trunk/src/VBox/VMM/PATM/PATM.cpp

    r28800 r30326  
    61366136
    61376137            /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
    6138             rc = PGMShwModifyPage(pVCpu, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     6138            rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
    61396139            AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
    61406140            if (rc == VINF_SUCCESS)
  • trunk/src/VBox/VMM/PATM/VMMGC/CSAMGC.cpp

    r29250 r30326  
    8686         * Make this particular page R/W.
    8787         */
    88         rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     88        rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    8989        AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
    9090        ASMInvalidatePage((void *)(uintptr_t)pvFault);
     
    127127     */
    128128    Log(("CSAMGCCodePageWriteHandler: enabled r/w for page %RGv\n", pvFault));
    129     rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     129    rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    130130    AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
    131131    ASMInvalidatePage((void *)(uintptr_t)pvFault);
  • trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp

    r28800 r30326  
    101101#ifdef LOG_ENABLED
    102102    if (pPatchPage)
    103         Log(("PATMIsWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n", pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
     103        Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n", pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
    104104#endif
    105105
  • trunk/src/VBox/VMM/PGMHandler.cpp

    r28800 r30326  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
  • trunk/src/VBox/VMM/PGMInternal.h

    r30301 r30326  
    233233# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    234234     MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
    235 #endif
    236 
    237 /** @def PGM_HCPHYS_2_PTR_BY_PGM
    238  * Maps a HC physical page pool address to a virtual address.
    239  *
    240  * @returns VBox status code.
    241  * @param   pPGM    The PGM instance data.
    242  * @param   HCPhys  The HC physical address to map to a virtual one.
    243  * @param   ppv     Where to store the virtual address. No need to cast this.
    244  *
    245  * @remark  In GC this uses PGMGCDynMapHCPage(), so it will consume of the
    246  *          small page window employeed by that function. Be careful.
    247  * @remark  There is no need to assert on the result.
    248  */
    249 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    250 # define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
    251      pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
    252 #else
    253 # define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
    254      PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
    255235#endif
    256236
     
    24152395    DECLR3CALLBACKMEMBER(int,       pfnR3ShwExit,(PVMCPU pVCpu));
    24162396    DECLR3CALLBACKMEMBER(int,       pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2417     DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     2397    DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    24182398
    24192399    DECLRCCALLBACKMEMBER(int,       pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2420     DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     2400    DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    24212401
    24222402    DECLR0CALLBACKMEMBER(int,       pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2423     DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     2403    DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    24242404    /** @} */
    24252405
     
    30493029    DECLR3CALLBACKMEMBER(int,       pfnR3ShwExit,(PVMCPU pVCpu));
    30503030    DECLR3CALLBACKMEMBER(int,       pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    3051     DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     3031    DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    30523032
    30533033    DECLRCCALLBACKMEMBER(int,       pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    3054     DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     3034    DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    30553035
    30563036    DECLR0CALLBACKMEMBER(int,       pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    3057     DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     3037    DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
    30583038
    30593039    /** @} */
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r30236 r30326  
    38643864
    38653865            /* Make sure what we return is writable. */
    3866             if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
     3866            if (fWritable)
    38673867                switch (PGM_PAGE_GET_STATE(pPage))
    38683868                {
     
    38743874                    case PGM_PAGE_STATE_ZERO:
    38753875                    case PGM_PAGE_STATE_SHARED:
     3876                        if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
     3877                            break;
    38763878                    case PGM_PAGE_STATE_WRITE_MONITORED:
    38773879                        rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
  • trunk/src/VBox/VMM/PGMPool.cpp

    r30301 r30326  
    606606
    607607    pgmLock(pVM);
    608     Log(("pgmR3PoolClearAllRendezvous: cUsedPages=%d\n", pPool->cUsedPages));
     608    Log(("pgmR3PoolClearAllRendezvous: cUsedPages=%d fpvFlushRemTbl=%RTbool\n", pPool->cUsedPages, !!fpvFlushRemTbl));
    609609
    610610    /*
  • trunk/src/VBox/VMM/PGMSavedState.cpp

    r30302 r30326  
    12371237 * @param   iPage               The page index.
    12381238 */
    1239 static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
     1239static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const  char *pszWhere)
    12401240{
    12411241    if (paLSPages[iPage].u32Crc != UINT32_MAX)
    12421242    {
    12431243        uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
    1244         Assert((!PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]) && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage])) || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
     1244        Assert(   (   !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
     1245                   && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
     1246               || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
    12451247        AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
    1246                   ("%08x != %08x for %RGp %R[pgmpage]\n", paLSPages[iPage].u32Crc, u32Crc,
    1247                    pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
     1248                  ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
     1249                   pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
    12481250    }
    12491251}
     
    12591261 * @param   iPage               The page index.
    12601262 */
    1261 static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
     1263static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
    12621264{
    12631265    if (paLSPages[iPage].u32Crc != UINT32_MAX)
     
    12671269        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
    12681270        if (RT_SUCCESS(rc))
    1269             pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage);
     1271            pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
    12701272    }
    12711273}
     
    13701372                                        pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
    13711373                                    else
    1372                                         pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
     1374                                        pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
    13731375#endif
    13741376                                    paLSPages[iPage].fWriteMonitoredJustNow = 0;
     
    15681570#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
    15691571                            if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
    1570                                 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
     1572                                pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
    15711573#endif
    15721574                            continue;
     
    15971599#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
    15981600                            if (paLSPages)
    1599                                 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage);
     1601                                pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
    16001602#endif
    16011603                        }
     
    16191621#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
    16201622                        if (paLSPages)
    1621                             pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
     1623                            pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
    16221624#endif
    16231625                        pgmUnlock(pVM);
  • trunk/src/VBox/VMM/PGMShw.h

    r28800 r30326  
    123123/* all */
    124124PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    125 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
     125PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
    126126RT_C_DECLS_END
    127127
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r29250 r30326  
    825825
    826826/**
    827  * Sets (replaces) the page flags for a range of pages in the shadow context.
    828  *
    829  * @returns VBox status.
    830  * @param   pVCpu       VMCPU handle.
    831  * @param   GCPtr       The address of the first page.
    832  * @param   cb          The size of the range in bytes.
    833  * @param   fFlags      Page flags X86_PTE_*, excluding the page mask of course.
    834  * @remark  You must use PGMMapSetPage() for pages in a mapping.
    835  */
    836 VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
    837 {
    838     return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
    839 }
    840 
    841 
    842 /**
    843827 * Modify page flags for a range of pages in the shadow context.
    844828 *
     
    848832 * @param   pVCpu       VMCPU handle.
    849833 * @param   GCPtr       Virtual address of the first page in the range.
    850  * @param   cb          Size (in bytes) of the range to apply the modification to.
    851834 * @param   fFlags      The OR  mask - page flags X86_PTE_*, excluding the page mask of course.
    852835 * @param   fMask       The AND mask - page flags X86_PTE_*.
    853836 *                      Be very CAREFUL when ~'ing constants which could be 32-bit!
     837 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
    854838 * @remark  You must use PGMMapModifyPage() for pages in a mapping.
    855839 */
    856 VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
     840DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
    857841{
    858842    AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
    859     Assert(cb);
    860 
    861     /*
    862      * Align the input.
    863      */
    864     cb     += GCPtr & PAGE_OFFSET_MASK;
    865     cb      = RT_ALIGN_Z(cb, PAGE_SIZE);
    866     GCPtr   = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
    867 
    868     /*
    869      * Call worker.
    870      */
     843    Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
     844
     845    GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
     846
    871847    PVM pVM = pVCpu->CTX_SUFF(pVM);
    872848    pgmLock(pVM);
    873     int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
     849    int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
    874850    pgmUnlock(pVM);
    875851    return rc;
    876852}
     853
     854
     855/**
     856 * Changing the page flags for a single page in the shadow page tables so as to
     857 * make it read-only.
     858 *
     859 * @returns VBox status code.
     860 * @param   pVCpu       VMCPU handle.
     861 * @param   GCPtr       Virtual address of the first page in the range.
     862 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
     863 */
     864VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     865{
     866    return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
     867}
     868
     869
     870/**
     871 * Changing the page flags for a single page in the shadow page tables so as to
     872 * make it writable.
     873 *
     874 * The call must know with 101% certainty that the guest page tables maps this
     875 * as writable too.  This function will deal shared, zero and write monitored
     876 * pages.
     877 *
     878 * @returns VBox status code.
     879 * @param   pVCpu       VMCPU handle.
     880 * @param   GCPtr       Virtual address of the first page in the range.
     881 * @param   fMmio2      Set if it is an MMIO2 page.
     882 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
     883 */
     884VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     885{
     886    return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
     887}
     888
     889
     890/**
     891 * Changing the page flags for a single page in the shadow page tables so as to
     892 * make it not present.
     893 *
     894 * @returns VBox status code.
     895 * @param   pVCpu       VMCPU handle.
     896 * @param   GCPtr       Virtual address of the first page in the range.
     897 * @param   fOpFlags    A combination of the PGM_MK_PG_XXX flags.
     898 */
     899VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
     900{
     901    return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
     902}
     903
    877904
    878905/**
     
    21562183#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    21572184
     2185/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
     2186DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
     2187{
     2188    pgmLock(pVM);
     2189
     2190    /*
     2191     * Convert it to a writable page and it on to PGMDynMapHCPage.
     2192     */
     2193    int rc;
     2194    PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
     2195    if (RT_LIKELY(pPage))
     2196    {
     2197        rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     2198        if (RT_SUCCESS(rc))
     2199        {
     2200            //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
     2201#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     2202            rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv);
     2203#else
     2204            rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
     2205#endif
     2206        }
     2207        else
     2208            AssertRC(rc);
     2209    }
     2210    else
     2211    {
     2212        AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
     2213        rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     2214    }
     2215
     2216    pgmUnlock(pVM);
     2217    return rc;
     2218}
     2219
    21582220/**
    21592221 * Temporarily maps one guest page specified by GC physical address.
     
    21712233{
    21722234    AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
    2173 
    2174     /*
    2175      * Get the ram range.
    2176      */
    2177     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2178     while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
    2179         pRam = pRam->CTX_SUFF(pNext);
    2180     if (!pRam)
    2181     {
    2182         AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
    2183         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    2184     }
    2185 
    2186     /*
    2187      * Pass it on to PGMDynMapHCPage.
    2188      */
    2189     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
    2190     //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
    2191 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2192     pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
    2193 #else
    2194     PGMDynMapHCPage(pVM, HCPhys, ppv);
    2195 #endif
    2196     return VINF_SUCCESS;
     2235    return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
    21972236}
    21982237
     
    22152254VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    22162255{
    2217     /*
    2218      * Get the ram range.
    2219      */
    2220     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2221     while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
    2222         pRam = pRam->CTX_SUFF(pNext);
    2223     if (!pRam)
    2224     {
    2225         AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
    2226         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    2227     }
    2228 
    2229     /*
    2230      * Pass it on to PGMDynMapHCPage.
    2231      */
    2232     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
    2233 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2234     pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
    2235 #else
    2236     PGMDynMapHCPage(pVM, HCPhys, ppv);
    2237 #endif
    2238     *ppv = (void *)((uintptr_t)*ppv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    2239     return VINF_SUCCESS;
     2256    void *pv;
     2257    int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
     2258    if (RT_SUCCESS(rc))
     2259    {
     2260        *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
     2261        return VINF_SUCCESS;
     2262    }
     2263    return rc;
    22402264}
    22412265
     
    23812405VMMDECL(void) PGMDynCheckLocks(PVM pVM)
    23822406{
    2383     for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
     2407    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
    23842408        Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
    23852409}
     
    23942418 * @copydoc FNRTSTRFORMATTYPE */
    23952419static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
    2396                                                     const char *pszType, void const *pvValue,
    2397                                                     int cchWidth, int cchPrecision, unsigned fFlags,
    2398                                                     void *pvUser)
     2420                                                     const char *pszType, void const *pvValue,
     2421                                                     int cchWidth, int cchPrecision, unsigned fFlags,
     2422                                                     void *pvUser)
    23992423{
    24002424    size_t    cch;
     
    24902514
    24912515#endif /* !IN_R0 || LOG_ENABLED */
    2492 
    24932516
    24942517/**
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r30301 r30326  
    13321332            if (rc == VINF_SUCCESS)
    13331333            {
    1334                 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
     1334                rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    13351335                AssertMsg(rc == VINF_SUCCESS
    13361336                        /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r28800 r30326  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    118118RT_C_DECLS_BEGIN
    119119PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    120 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask);
     120PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
    121121RT_C_DECLS_END
    122122
     
    272272 * @param   fMask       The AND mask - page flags X86_PTE_*.
    273273 *                      Be extremely CAREFUL with ~'ing values because they can be 32-bit!
     274 * @param   fOpFlags    A combination of the PGM_MK_PK_XXX flags.
    274275 * @remark  You must use PGMMapModifyPage() for pages in a mapping.
    275276 */
    276 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
     277PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
    277278{
    278279# if PGM_SHW_TYPE == PGM_TYPE_NESTED
     
    353354            if (pPT->a[iPTE].n.u1Present)
    354355            {
    355                 SHWPTE Pte;
    356 
    357                 Pte.u = (pPT->a[iPTE].u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK);
    358                 ASMAtomicWriteSize(&pPT->a[iPTE], Pte.u);
    359                 Assert(pPT->a[iPTE].n.u1Present);
     356                SHWPTE const    OrgPte = pPT->a[iPTE];
     357                SHWPTE          NewPte;
     358
     359                NewPte.u = (OrgPte.u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK);
     360                Assert(NewPte.n.u1Present);
     361                if (!NewPte.n.u1Present)
     362                {
     363                    /** @todo Some CSAM code path might end up here and upset
     364                     *  the page pool. */
     365                    AssertFailed();
     366                }
     367                else if (   NewPte.n.u1Write
     368                         && !OrgPte.n.u1Write
     369                         && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
     370                {
     371                    /** @todo Optimize \#PF handling by caching data.  We can
     372                     *        then use this when PGM_MK_PG_IS_WRITE_FAULT is
     373                     *        set instead of resolving the guest physical
     374                     *        address yet again. */
     375                    RTGCPHYS GCPhys;
     376                    uint64_t fGstPte;
     377                    rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
     378                    AssertRC(rc);
     379                    if (RT_SUCCESS(rc))
     380                    {
     381                        Assert(fGstPte & X86_PTE_RW);
     382                        PPGMPAGE pPage = pgmPhysGetPage(&pVCpu->CTX_SUFF(pVM)->pgm.s, GCPhys);
     383                        Assert(pPage);
     384                        if (pPage)
     385                        {
     386                            rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     387                            AssertRCReturn(rc, rc);
     388                            Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
     389                        }
     390                    }
     391                }
     392
     393                ASMAtomicWriteSize(&pPT->a[iPTE], NewPte.u);
    360394# if PGM_SHW_TYPE == PGM_TYPE_EPT
    361395                HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
  • trunk/src/VBox/VMM/VMMGC/VMMGC.def

    r29902 r30326  
    2929    PDMQueueInsert
    3030    PGMHandlerPhysicalPageTempOff
    31     PGMShwModifyPage
     31    PGMShwMakePageWritable
    3232    PGMPhysSimpleWriteGCPhys
    3333    PGMPhysSimpleReadGCPtr
  • trunk/src/VBox/VMM/VMMR0/VMMR0.def

    r29902 r30326  
    2929    PDMQueueInsert
    3030    PGMHandlerPhysicalPageTempOff
    31     PGMShwModifyPage
     31    PGMShwMakePageWritable
    3232    PGMPhysSimpleWriteGCPhys
    3333    PGMPhysSimpleReadGCPtr
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette