VirtualBox

Changeset 92162 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Oct 31, 2021 11:34:31 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
147985
Message:

VMM/PGM,DevVGA: Baked MMIO2 dirty page tracking into PGM, moving it out of DevVGA. Using the handler state to record a page as dirty (PGM_PAGE_HNDL_PHYS_STATE_DISABLED). bugref:10122

Location:
trunk/src/VBox
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Graphics/DevVGA.cpp

    r91930 r92162  
    309309{
    310310    AssertMsg(offVRAM < pThis->vram_size, ("offVRAM = %p, pThis->vram_size = %p\n", offVRAM, pThis->vram_size));
    311     ASMBitSet(&pThis->au32DirtyBitmap[0], offVRAM >> PAGE_SHIFT);
     311    ASMBitSet(&pThis->bmDirtyBitmap[0], offVRAM >> PAGE_SHIFT);
    312312    pThis->fHasDirtyBits = true;
    313313}
     
    324324{
    325325    AssertMsg(offVRAM < pThis->vram_size, ("offVRAM = %p, pThis->vram_size = %p\n", offVRAM, pThis->vram_size));
    326     return ASMBitTest(&pThis->au32DirtyBitmap[0], offVRAM >> PAGE_SHIFT);
     326    return ASMBitTest(&pThis->bmDirtyBitmap[0], offVRAM >> PAGE_SHIFT);
    327327}
    328328
    329329#ifdef IN_RING3
     330
    330331/**
    331332 * Reset dirty flags in a give range.
     
    340341    Assert(offVRAMEnd <= pThis->vram_size);
    341342    Assert(offVRAMStart < offVRAMEnd);
    342     ASMBitClearRange(&pThis->au32DirtyBitmap[0], offVRAMStart >> PAGE_SHIFT, offVRAMEnd >> PAGE_SHIFT);
    343 }
     343    ASMBitClearRange(&pThis->bmDirtyBitmap[0], offVRAMStart >> PAGE_SHIFT, offVRAMEnd >> PAGE_SHIFT);
     344}
     345
     346
     347/**
     348 * Queries the VRAM dirty bits and resets the monitoring.
     349 */
     350static void vgaR3UpdateDirtyBitsAndResetMonitoring(PPDMDEVINS pDevIns, PVGASTATE pThis)
     351{
     352    size_t const cbBitmap = RT_ALIGN_Z(RT_MIN(pThis->vram_size, VGA_VRAM_MAX), PAGE_SIZE * 64) / PAGE_SIZE / 8;
     353
     354    /*
     355     * If we don't have any dirty bits from MMIO accesses, we can just query
     356     * straight into the dirty buffer.
     357     */
     358    if (!pThis->fHasDirtyBits)
     359    {
     360        int rc = PDMDevHlpMmio2QueryAndResetDirtyBitmap(pDevIns, pThis->hMmio2VRam, pThis->bmDirtyBitmap, cbBitmap);
     361        AssertRC(rc);
     362    }
     363    /*
     364     * Otherwise we'll have to query and merge the two.
     365     */
     366    else
     367    {
     368        uint64_t bmDirtyPages[VGA_VRAM_MAX / PAGE_SIZE / 64]; /* (256 MB VRAM -> 8KB bitmap) */
     369        int rc = PDMDevHlpMmio2QueryAndResetDirtyBitmap(pDevIns, pThis->hMmio2VRam, bmDirtyPages, cbBitmap);
     370        if (RT_SUCCESS(rc))
     371        {
     372            /** @todo could use ORPS or VORPS here, I think. */
     373            uint64_t     *pbmDst      = pThis->bmDirtyBitmap;
     374            size_t const  cTodo       = cbBitmap / sizeof(uint64_t);
     375
     376            /* Do 64 bytes at a time first. */
     377            size_t const  cTodoFirst  = cTodo & ~(size_t)7;
     378            size_t        idx;
     379            for (idx = 0; idx < cTodoFirst; idx += 8)
     380            {
     381                pbmDst[idx + 0] |= bmDirtyPages[idx + 0];
     382                pbmDst[idx + 1] |= bmDirtyPages[idx + 1];
     383                pbmDst[idx + 2] |= bmDirtyPages[idx + 2];
     384                pbmDst[idx + 3] |= bmDirtyPages[idx + 3];
     385                pbmDst[idx + 4] |= bmDirtyPages[idx + 4];
     386                pbmDst[idx + 5] |= bmDirtyPages[idx + 5];
     387                pbmDst[idx + 6] |= bmDirtyPages[idx + 6];
     388                pbmDst[idx + 7] |= bmDirtyPages[idx + 7];
     389            }
     390
     391            /* Then do a mopup of anything remaining. */
     392            switch (cTodo - idx)
     393            {
     394                case 7:     pbmDst[idx + 6] |= bmDirtyPages[idx + 6]; RT_FALL_THRU();
     395                case 6:     pbmDst[idx + 5] |= bmDirtyPages[idx + 5]; RT_FALL_THRU();
     396                case 5:     pbmDst[idx + 4] |= bmDirtyPages[idx + 4]; RT_FALL_THRU();
     397                case 4:     pbmDst[idx + 3] |= bmDirtyPages[idx + 3]; RT_FALL_THRU();
     398                case 3:     pbmDst[idx + 2] |= bmDirtyPages[idx + 2]; RT_FALL_THRU();
     399                case 2:     pbmDst[idx + 1] |= bmDirtyPages[idx + 1]; RT_FALL_THRU();
     400                case 1:     pbmDst[idx]     |= bmDirtyPages[idx];     break;
     401                case 0:     break;
     402                default:    AssertFailedBreak();
     403            }
     404
     405            pThis->fHasDirtyBits = false;
     406        }
     407    }
     408}
     409
    344410#endif /* IN_RING3 */
    345411
     
    24802546        /* round up page_max by one page, as otherwise this can be -PAGE_SIZE,
    24812547         * which causes assertion trouble in vgaR3ResetDirty. */
    2482         page_max = (  pThis->start_addr * 4 + pThis->line_offset * pThis->last_scr_height
    2483                     - 1 + PAGE_SIZE) & ~PAGE_OFFSET_MASK;
     2548        page_max = (pThis->start_addr * 4 + pThis->line_offset * pThis->last_scr_height - 1 + PAGE_SIZE) & ~PAGE_OFFSET_MASK;
    24842549        vgaR3ResetDirty(pThis, page_min, page_max + PAGE_SIZE);
    24852550    }
     
    25162581
    25172582/**
    2518  * Worker for vgaR3PortUpdateDisplay(), vboxR3UpdateDisplayAllInternal() and
     2583 * Worker for vgaR3PortUpdateDisplay(), vgaR3UpdateDisplayAllInternal() and
    25192584 * vgaR3PortTakeScreenshot().
    25202585 */
     
    36143679
    36153680
    3616 /**
    3617  * Handle LFB access.
    3618  *
    3619  * @returns Strict VBox status code.
    3620  * @param   pVM         VM handle.
    3621  * @param   pDevIns     The device instance.
    3622  * @param   pThis       The shared VGA instance data.
    3623  * @param   GCPhys      The access physical address.
    3624  * @param   GCPtr       The access virtual address (only GC).
    3625  */
    3626 static VBOXSTRICTRC vgaLFBAccess(PVMCC pVM, PPDMDEVINS pDevIns, PVGASTATE pThis, RTGCPHYS GCPhys, RTGCPTR GCPtr)
    3627 {
    3628     RT_NOREF(pVM);
    3629 
    3630     VBOXSTRICTRC rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_EM_RAW_EMULATE_INSTR);
    3631     if (rc == VINF_SUCCESS)
    3632     {
    3633         /*
    3634          * Set page dirty bit.
    3635          */
    3636         vgaR3MarkDirty(pThis, GCPhys - pThis->GCPhysVRAM);
    3637         pThis->fLFBUpdated = true;
    3638 
    3639         /*
    3640          * Turn of the write handler for this particular page and make it R/W.
    3641          * Then return telling the caller to restart the guest instruction.
    3642          * ASSUME: the guest always maps video memory RW.
    3643          */
    3644         rc = PDMDevHlpPGMHandlerPhysicalPageTempOff(pDevIns, pThis->GCPhysVRAM, GCPhys);
    3645         if (RT_SUCCESS(rc))
    3646         {
    3647 #ifndef IN_RING3
    3648             rc = PGMShwMakePageWritable(PDMDevHlpGetVMCPU(pDevIns), GCPtr,
    3649                                         PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
    3650             PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
    3651             AssertMsgReturn(    rc == VINF_SUCCESS
    3652                             /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
    3653                             ||  rc == VERR_PAGE_TABLE_NOT_PRESENT
    3654                             ||  rc == VERR_PAGE_NOT_PRESENT,
    3655                             ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, VBOXSTRICTRC_VAL(rc)),
    3656                             rc);
    3657 #else  /* IN_RING3 - We don't have any virtual page address of the access here. */
    3658             PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
    3659             Assert(GCPtr == 0);
    3660             RT_NOREF1(GCPtr);
    3661 #endif
    3662             return VINF_SUCCESS;
    3663         }
    3664 
    3665         PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
    3666         AssertMsgFailed(("PGMHandlerPhysicalPageTempOff -> rc=%d\n", VBOXSTRICTRC_VAL(rc)));
    3667     }
    3668     return rc;
    3669 }
    3670 
    3671 
    3672 #ifndef IN_RING3
    3673 /**
    3674  * @callback_method_impl{FNPGMRCPHYSHANDLER, \#PF Handler for VBE LFB access.}
    3675  */
    3676 PDMBOTHCBDECL(VBOXSTRICTRC) vgaLbfAccessPfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
    3677                                                   RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    3678 {
    3679     PPDMDEVINS  pDevIns = (PPDMDEVINS)pvUser;
    3680     PVGASTATE   pThis   = PDMDEVINS_2_DATA(pDevIns, PVGASTATE);
    3681     //PVGASTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVGASTATECC);
    3682     Assert(GCPhysFault >= pThis->GCPhysVRAM);
    3683     AssertMsg(uErrorCode & X86_TRAP_PF_RW, ("uErrorCode=%#x\n", uErrorCode));
    3684     RT_NOREF3(pVCpu, pRegFrame, uErrorCode);
    3685 
    3686     return vgaLFBAccess(pVM, pDevIns, pThis, GCPhysFault, pvFault);
    3687 }
    3688 #endif /* !IN_RING3 */
    3689 
    3690 
    3691 /**
    3692  * @callback_method_impl{FNPGMPHYSHANDLER,
    3693  *      VBE LFB write access handler for the dirty tracking.}
    3694  */
    3695 PGM_ALL_CB_DECL(VBOXSTRICTRC) vgaLFBAccessHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys,
    3696                                                   void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
    3697                                                   PGMACCESSORIGIN enmOrigin, void *pvUser)
    3698 {
    3699     PPDMDEVINS  pDevIns = (PPDMDEVINS)pvUser;
    3700     PVGASTATE   pThis   = PDMDEVINS_2_DATA(pDevIns, PVGASTATE);
    3701     //PVGASTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVGASTATECC);
    3702     Assert(GCPhys >= pThis->GCPhysVRAM);
    3703     RT_NOREF(pVCpu, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
    3704 
    3705     VBOXSTRICTRC rc = vgaLFBAccess(pVM, pDevIns, pThis, GCPhys, 0);
    3706     if (rc == VINF_SUCCESS)
    3707         rc = VINF_PGM_HANDLER_DO_DEFAULT;
    3708 #ifdef IN_RING3
    3709     else
    3710         AssertMsg(rc < VINF_SUCCESS, ("rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
    3711 #endif
    3712     return rc;
    3713 }
    3714 
    3715 
    37163681/* -=-=-=-=-=- All rings: VGA BIOS I/Os -=-=-=-=-=- */
    37173682
     
    47854750
    47864751    STAM_COUNTER_INC(&pThis->StatUpdateDisp);
    4787     if (pThis->fHasDirtyBits && pThis->GCPhysVRAM && pThis->GCPhysVRAM != NIL_RTGCPHYS)
    4788     {
    4789         PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM);
    4790         pThis->fHasDirtyBits = false;
    4791     }
     4752
     4753    if (pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS)
     4754        vgaR3UpdateDirtyBitsAndResetMonitoring(pDevIns, pThis);
     4755
    47924756    if (pThis->fRemappedVGA)
    47934757    {
     
    48064770 * Internal vgaR3PortUpdateDisplayAll worker called under pThis->CritSect.
    48074771 */
    4808 /** @todo Why the 'vboxR3' prefix? */
    4809 static int vboxR3UpdateDisplayAllInternal(PPDMDEVINS pDevIns, PVGASTATE pThis, PVGASTATECC pThisCC, bool fFailOnResize)
     4772static int vgaR3UpdateDisplayAllInternal(PPDMDEVINS pDevIns, PVGASTATE pThis, PVGASTATECC pThisCC, bool fFailOnResize)
    48104773{
    48114774# ifdef VBOX_WITH_VMSVGA
     
    48144777# endif
    48154778    {
    4816         /* The dirty bits array has been just cleared, reset handlers as well. */
    4817         if (pThis->GCPhysVRAM && pThis->GCPhysVRAM != NIL_RTGCPHYS)
    4818             PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM);
    4819     }
     4779        /* Update the dirty bits. */
     4780        if (pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS)
     4781            vgaR3UpdateDirtyBitsAndResetMonitoring(pDevIns, pThis);
     4782    }
     4783
    48204784    if (pThis->fRemappedVGA)
    48214785    {
     
    48494813    AssertRCReturn(rc, rc);
    48504814
    4851     rc = vboxR3UpdateDisplayAllInternal(pDevIns, pThis, pThisCC, fFailOnResize);
     4815    rc = vgaR3UpdateDisplayAllInternal(pDevIns, pThis, pThisCC, fFailOnResize);
    48524816
    48534817    PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
     
    49264890    /*
    49274891     * Get screenshot. This function will fail if a resize is required.
    4928      * So there is not need to do a 'vboxR3UpdateDisplayAllInternal' before taking screenshot.
     4892     * So there is not need to do a 'vgaR3UpdateDisplayAllInternal' before taking screenshot.
    49294893     */
    49304894
     
    55005464int vgaR3RegisterVRAMHandler(PPDMDEVINS pDevIns, PVGASTATE pThis, uint64_t cbFrameBuffer)
    55015465{
    5502     Assert(pThis->GCPhysVRAM);
    5503     int rc = PDMDevHlpPGMHandlerPhysicalRegister(pDevIns,
    5504                                                  pThis->GCPhysVRAM, pThis->GCPhysVRAM + (cbFrameBuffer - 1),
    5505                                                  pThis->hLfbAccessHandlerType, pDevIns, pDevIns->pDevInsR0RemoveMe,
    5506                                                  pDevIns->pDevInsForRC, "VGA LFB");
    5507 
     5466    Assert(pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS);
     5467    int rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, true /*fEnabled*/);
     5468    RT_NOREF(cbFrameBuffer);
    55085469    AssertRC(rc);
    55095470    return rc;
     
    55165477int vgaR3UnregisterVRAMHandler(PPDMDEVINS pDevIns, PVGASTATE pThis)
    55175478{
    5518     Assert(pThis->GCPhysVRAM);
    5519     int rc = PDMDevHlpPGMHandlerPhysicalDeregister(pDevIns, pThis->GCPhysVRAM);
     5479    Assert(pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS);
     5480    int rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, false /*fEnabled*/);
    55205481    AssertRC(rc);
    55215482    return rc;
     
    55575518    {
    55585519        /*
    5559          * Mapping the VRAM.
     5520         * Make sure the dirty page tracking state is up to date before mapping it.
     5521         */
     5522# ifdef VBOX_WITH_VMSVGA
     5523        rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam,
     5524                                                    !pThis->svga.fEnabled ||(pThis->svga.fEnabled && pThis->svga.fVRAMTracking));
     5525# else
     5526        rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, true /*fEnabled*/);
     5527# endif
     5528        AssertLogRelRC(rc);
     5529
     5530        /*
     5531         * Map the VRAM.
    55605532         */
    55615533        rc = PDMDevHlpMmio2Map(pDevIns, pThis->hMmio2VRam, GCPhysAddress);
     
    55635535        if (RT_SUCCESS(rc))
    55645536        {
    5565 # ifdef VBOX_WITH_VMSVGA
    5566             if (    !pThis->svga.fEnabled
    5567                 ||  (   pThis->svga.fEnabled
    5568                      && pThis->svga.fVRAMTracking
    5569                     )
    5570                )
    5571 # endif
    5572             {
    5573                 rc = PDMDevHlpPGMHandlerPhysicalRegister(pDevIns, GCPhysAddress, GCPhysAddress + (pThis->vram_size - 1),
    5574                                                          pThis->hLfbAccessHandlerType, pDevIns, pDevIns->pDevInsR0RemoveMe,
    5575                                                          pDevIns->pDevInsForRC, "VGA LFB");
    5576                 AssertLogRelRC(rc);
    5577             }
    5578 
    55795537            pThis->GCPhysVRAM = GCPhysAddress;
    55805538            pThis->vbe_regs[VBE_DISPI_INDEX_FB_BASE_HI] = GCPhysAddress >> 16;
     
    55875545        /*
    55885546         * Unmapping of the VRAM in progress (caller will do that).
    5589          * Deregister the access handler so PGM doesn't get upset.
    55905547         */
    55915548        Assert(pThis->GCPhysVRAM);
    5592 # ifdef VBOX_WITH_VMSVGA
    5593         if (    !pThis->svga.fEnabled
    5594             ||  (   pThis->svga.fEnabled
    5595                  && pThis->svga.fVRAMTracking
    5596                 )
    5597            )
    5598 # endif
    5599         {
    5600             rc = PDMDevHlpPGMHandlerPhysicalDeregister(pDevIns, pThis->GCPhysVRAM);
    5601             AssertRC(rc);
    5602         }
    5603 # ifdef VBOX_WITH_VMSVGA
    5604         else
    5605             rc = VINF_SUCCESS;
    5606 # endif
    56075549        pThis->GCPhysVRAM = 0;
     5550        rc = VINF_SUCCESS;
    56085551        /* NB: VBE_DISPI_INDEX_FB_BASE_HI is left unchanged here. */
    56095552    }
     
    60495992     * Reset the LFB mapping.
    60505993     */
    6051     pThis->fLFBUpdated = false;
    6052     if (    (   pDevIns->fRCEnabled
    6053              || pDevIns->fR0Enabled)
    6054         &&  pThis->GCPhysVRAM
    6055         &&  pThis->GCPhysVRAM != NIL_RTGCPHYS)
    6056     {
    6057         int rc = PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM);
     5994    if (   (   pDevIns->fRCEnabled
     5995            || pDevIns->fR0Enabled)
     5996        && pThis->GCPhysVRAM != 0
     5997        && pThis->GCPhysVRAM != NIL_RTGCPHYS)
     5998    {
     5999        /** @todo r=bird: This used to be a PDMDevHlpPGMHandlerPhysicalReset call.
     6000         *        Not quite sure if it was/is needed. Besides, where do we reset the
     6001         *        dirty bitmap (bmDirtyBitmap)? */
     6002        int rc = PDMDevHlpMmio2ResetDirtyBitmap(pDevIns, pThis->hMmio2VRam);
    60586003        AssertRC(rc);
    60596004    }
     
    66066551     */
    66076552    rc = PDMDevHlpPCIIORegionCreateMmio2Ex(pDevIns, pThis->pciRegions.iVRAM, pThis->vram_size,
    6608                                            PCI_ADDRESS_SPACE_MEM_PREFETCH, 0 /*fFlags*/, vgaR3PciIORegionVRamMapUnmap,
    6609                                            "VRam", (void **)&pThisCC->pbVRam, &pThis->hMmio2VRam);
     6553                                           PCI_ADDRESS_SPACE_MEM_PREFETCH, PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES,
     6554                                           vgaR3PciIORegionVRamMapUnmap, "VRam", (void **)&pThisCC->pbVRam, &pThis->hMmio2VRam);
    66106555    AssertLogRelRCReturn(rc, PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
    66116556                                                 N_("Failed to allocate %u bytes of VRAM"), pThis->vram_size));
    6612 
    6613     /*
    6614      * Register access handler types for tracking dirty VRAM pages.
    6615      */
    6616     rc = PDMDevHlpPGMHandlerPhysicalTypeRegister(pDevIns, PGMPHYSHANDLERKIND_WRITE,
    6617                                                  vgaLFBAccessHandler,
    6618                                                  "vgaLFBAccessHandler", "vgaLbfAccessPfHandler",
    6619                                                  "vgaLFBAccessHandler", "vgaLbfAccessPfHandler",
    6620                                                  "VGA LFB", &pThis->hLfbAccessHandlerType);
    6621     AssertRCReturn(rc, rc);
    66226557
    66236558    /*
  • trunk/src/VBox/Devices/Graphics/DevVGA.h

    r87105 r92162  
    349349    uint32_t                    cMilliesRefreshInterval;
    350350    /** Bitmap tracking dirty pages. */
    351     uint32_t                    au32DirtyBitmap[VGA_VRAM_MAX / PAGE_SIZE / 32];
     351    uint64_t                    bmDirtyBitmap[VGA_VRAM_MAX / PAGE_SIZE / 64];
    352352
    353353    /** Flag indicating that there are dirty bits. This is used to optimize the handler resetting. */
    354354    bool                        fHasDirtyBits;
    355     /** LFB was updated flag. */
    356     bool                        fLFBUpdated;
    357355    /** Flag indicating that the VGA memory in the 0xa0000-0xbffff region has been remapped to allow direct access. */
    358356    bool                        fRemappedVGA;
     
    369367    bool                        fVMSVGAPciId;
    370368    bool                        fVMSVGAPciBarLayout;
    371     bool                        Padding4[2];
     369    bool                        Padding4[3];
    372370#else
    373     bool                        Padding4[4+2];
     371    bool                        Padding4[4+3];
    374372#endif
    375373
     
    382380#endif
    383381    } pciRegions;
    384 
    385     /** Physical access type for the linear frame buffer dirty page tracking. */
    386     PGMPHYSHANDLERTYPE          hLfbAccessHandlerType;
    387382
    388383    /** The physical address the VRAM was assigned. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r92046 r92162  
    312312            if (pCurType->CTX_SUFF(pfnPfHandler))
    313313            {
    314                 PPGMPOOL    pPool  = pVM->pgm.s.CTX_SUFF(pPool);
    315                 void       *pvUser = pCur->CTX_SUFF(pvUser);
    316 
    317314                STAM_PROFILE_START(&pCur->Stat, h);
    318                 if (pCur->hType != pPool->hAccessHandlerType)
     315
     316                if (pCurType->fKeepPgmLock)
    319317                {
     318                    rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault,
     319                                                                pCur->CTX_SUFF(pvUser));
     320
     321#  ifdef VBOX_WITH_STATISTICS
     322                    pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault); /* paranoia in case the handler deregistered itself */
     323                    if (pCur)
     324                        STAM_PROFILE_STOP(&pCur->Stat, h);
     325#  endif
     326                }
     327                else
     328                {
     329                    void * const pvUser = pCur->CTX_SUFF(pvUser);
    320330                    PGM_UNLOCK(pVM);
    321331                    *pfLockTaken = false;
     332
     333                    rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);
     334
     335#  ifdef VBOX_WITH_STATISTICS
     336                    PGM_LOCK_VOID(pVM);
     337                    pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
     338                    if (pCur)
     339                        STAM_PROFILE_STOP(&pCur->Stat, h);
     340                    PGM_UNLOCK(pVM);
     341#  endif
    322342                }
    323 
    324                 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);
    325 
    326 #  ifdef VBOX_WITH_STATISTICS
    327                 PGM_LOCK_VOID(pVM);
    328                 pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
    329                 if (pCur)
    330                     STAM_PROFILE_STOP(&pCur->Stat, h);
    331                 PGM_UNLOCK(pVM);
    332 #  endif
    333343            }
    334344            else
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r92157 r92162  
    4949*   Internal Functions                                                                                                           *
    5050*********************************************************************************************************************************/
    51 static int  pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
     51static int  pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
     52                                                           void *pvBitmap, uint32_t offBitmap);
    5253static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
    5354static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
     
    286287    if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core))
    287288    {
    288         int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam);
     289        int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
    289290        if (rc == VINF_PGM_SYNC_CR3)
    290291            rc = VINF_PGM_GCPHYS_ALIASED;
     
    365366 * @retval  VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
    366367 *          the guest page aliased or/and mapped by multiple PTs. FFs set.
    367  * @param   pVM     The cross context VM structure.
    368  * @param   pCur    The physical handler.
    369  * @param   pRam    The RAM range.
    370  */
    371 static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
     368 * @param   pVM         The cross context VM structure.
     369 * @param   pCur        The physical handler.
     370 * @param   pRam        The RAM range.
     371 * @param   pvBitmap    Dirty bitmap. Optional.
     372 * @param   offBitmap   Dirty bitmap offset.
     373 */
     374static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
     375                                                          void *pvBitmap, uint32_t offBitmap)
    372376{
    373377    /*
     
    410414            }
    411415#endif
     416            if (pvBitmap)
     417                ASMBitSet(pvBitmap, offBitmap);
    412418        }
    413419
     
    416422            break;
    417423        i++;
     424        offBitmap++;
    418425    }
    419426
     
    905912                     * Set ram flags, flush shadow PT entries and finally tell REM about this.
    906913                     */
    907                     rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
     914                    rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
    908915
    909916                    /** @todo NEM: not sure we need this notification... */
     
    12151222                     * Set the flags and flush shadow PT entries.
    12161223                     */
    1217                     rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
     1224                    rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
    12181225                }
    12191226
     
    12411248
    12421249    PGM_UNLOCK(pVM);
     1250    return rc;
     1251}
     1252
     1253
     1254/**
     1255 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
     1256 * tracking.
     1257 *
     1258 * @returns VBox status code.
     1259 * @param   pVM         The cross context VM structure.
     1260 * @param   GCPhys      The start address of the handler region.
     1261 * @param   pvBitmap    Dirty bitmap. Caller has cleared this already, only
     1262 *                      dirty bits will be set. Caller also made sure it's big
     1263 *                      enough.
     1264 * @param   offBitmap   Dirty bitmap offset.
     1265 * @remarks Caller must own the PGM critical section.
     1266 */
     1267DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
     1268{
     1269    LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
     1270    PGM_LOCK_ASSERT_OWNER(pVM);
     1271
     1272    /*
     1273     * Find the handler.
     1274     */
     1275    int rc;
     1276    PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
     1277    if (RT_LIKELY(pCur))
     1278    {
     1279        /*
     1280         * Validate kind.
     1281         */
     1282        PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
     1283        if (pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
     1284        {
     1285            STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
     1286
     1287            PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
     1288            Assert(pRam);
     1289            Assert(pRam->GCPhys     <= pCur->Core.Key);
     1290            Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
     1291
     1292            /*
     1293             * Set the flags and flush shadow PT entries.
     1294             */
     1295            if (pCur->cTmpOffPages > 0)
     1296            {
     1297                rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
     1298                pCur->cTmpOffPages  = 0;
     1299            }
     1300            else
     1301                rc = VINF_SUCCESS;
     1302        }
     1303        else
     1304        {
     1305            AssertFailed();
     1306            rc = VERR_WRONG_TYPE;
     1307        }
     1308    }
     1309    else
     1310    {
     1311        AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
     1312        rc = VERR_PGM_HANDLER_NOT_FOUND;
     1313    }
     1314
    12431315    return rc;
    12441316}
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r91855 r92162  
    322322        }
    323323    }
     324}
     325
     326
     327/**
     328 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
     329 */
     330static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uintptr_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
     331{
     332    /*
     333     * Get the MMIO2 range.
     334     */
     335    AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
     336    AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
     337    PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
     338    Assert(pMmio2->idMmio2 == hMmio2);
     339    AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
     340                 VERR_INTERNAL_ERROR_4);
     341
     342    /*
     343     * Get the page and make sure it's an MMIO2 page.
     344     */
     345    PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
     346    AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
     347    AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
     348
     349    /*
     350     * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
     351     * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
     352     * page is dirty, saving the need for additional storage (bitmap).)
     353     */
     354    pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
     355
     356    /*
     357     * Disable the handler for this page.
     358     */
     359    int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
     360    AssertRC(rc);
     361#ifndef IN_RING3
     362    if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
     363    {
     364        rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
     365        AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
     366    }
     367#else
     368    RT_NOREF(pVCpu, GCPtr);
     369#endif
     370    return VINF_SUCCESS;
     371}
     372
     373
     374#ifndef IN_RING3
     375/**
     376 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
     377 *      \#PF access handler callback for guest MMIO2 dirty page tracing.}
     378 *
     379 * @remarks The @a pvUser is the MMIO2 index.
     380 */
     381DECLEXPORT(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     382                                                    RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
     383{
     384    RT_NOREF(pVCpu, uErrorCode, pRegFrame);
     385    VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
     386    if (RT_SUCCESS(rcStrict))
     387    {
     388        rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhysFault, pvFault);
     389        PGM_UNLOCK(pVM);
     390    }
     391    return rcStrict;
     392}
     393#endif /* !IN_RING3 */
     394
     395
     396/**
     397 * @callback_method_impl{FNPGMPHYSHANDLER,
     398 *      Access handler callback for MMIO2 dirty page tracing.}
     399 *
     400 * @remarks The @a pvUser is the MMIO2 index.
     401 */
     402PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     403pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
     404                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     405{
     406    VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
     407    if (RT_SUCCESS(rcStrict))
     408    {
     409        rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhys, ~(RTGCPTR)0);
     410        PGM_UNLOCK(pVM);
     411        if (rcStrict == VINF_SUCCESS)
     412            rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
     413    }
     414    RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
     415    return rcStrict;
    324416}
    325417
     
    25772669        if (RT_SUCCESS(rcStrict))
    25782670        {
    2579             PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
    2580             void *pvUser = pCur->CTX_SUFF(pvUser);
     2671            PPGMPHYSHANDLERTYPEINT const pCurType   = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
     2672            PFNPGMPHYSHANDLER const      pfnHandler = pCurType->CTX_SUFF(pfnHandler);
     2673            void * const                 pvUser     = pCur->CTX_SUFF(pvUser);
    25812674            STAM_PROFILE_START(&pCur->Stat, h);
    25822675
    2583             /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
     2676            /* Most handlers will want to release the PGM lock for deadlock prevention
     2677               (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
     2678               dirty page trackers will want to keep it for performance reasons. */
    25842679            PGM_LOCK_ASSERT_OWNER(pVM);
    2585             PGM_UNLOCK(pVM);
    2586             rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
    2587             PGM_LOCK_VOID(pVM);
     2680            if (pCurType->fKeepPgmLock)
     2681                rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
     2682            else
     2683            {
     2684                PGM_UNLOCK(pVM);
     2685                rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
     2686                PGM_LOCK_VOID(pVM);
     2687            }
    25882688
    25892689#ifdef VBOX_WITH_STATISTICS
     
    26942794                cbRange = offPhysLast + 1;
    26952795
    2696             PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
    2697             void *pvUser = pPhys->CTX_SUFF(pvUser);
     2796            PPGMPHYSHANDLERTYPEINT const pCurType   = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys);
     2797            PFNPGMPHYSHANDLER const      pfnHandler = pCurType->CTX_SUFF(pfnHandler);
     2798            void * const                 pvUser     = pPhys->CTX_SUFF(pvUser);
    26982799
    26992800            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
    27002801            STAM_PROFILE_START(&pPhys->Stat, h);
    27012802
    2702             /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
     2803            /* Most handlers will want to release the PGM lock for deadlock prevention
     2804               (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
     2805               dirty page trackers will want to keep it for performance reasons. */
    27032806            PGM_LOCK_ASSERT_OWNER(pVM);
    2704             PGM_UNLOCK(pVM);
    2705             rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
    2706             PGM_LOCK_VOID(pVM);
     2807            if (pCurType->fKeepPgmLock)
     2808                rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
     2809            else
     2810            {
     2811                PGM_UNLOCK(pVM);
     2812                rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
     2813                PGM_LOCK_VOID(pVM);
     2814            }
    27072815
    27082816#ifdef VBOX_WITH_STATISTICS
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r91263 r92162  
    172172    {
    173173        PVMCPU pVCpu0 = pVM->apCpusR3[0];
    174         int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, iemVmxApicAccessPageHandler,
     174        int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, false /*fKeepPgmLock*/,
     175                                                  iemVmxApicAccessPageHandler,
    175176                                                  NULL /* pszModR0 */,
    176177                                                  "iemVmxApicAccessPageHandler", NULL /* pszPfHandlerR0 */,
  • trunk/src/VBox/VMM/VMMR3/IOM.cpp

    r82968 r92162  
    159159     * Register the MMIO access handler type.
    160160     */
    161     rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO,
     161    rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, false /*fKeepPgmLock*/,
    162162                                          iomMmioHandlerNew,
    163163                                          NULL, "iomMmioHandlerNew", "iomMmioPfHandlerNew",
  • trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp

    r92071 r92162  
    348348    PDMDEV_ASSERT_DEVINS(pDevIns);
    349349    PVM pVM = pDevIns->Internal.s.pVMR3;
    350     LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX6r\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
     350    LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
    351351    VM_ASSERT_EMT0_RETURN(pVM, NIL_RTGCPHYS);
    352352
     
    356356    return GCPhys;
    357357}
     358
     359
     360/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2QueryAndResetDirtyBitmap} */
     361static DECLCALLBACK(int) pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion,
     362                                                                   void *pvBitmap, size_t cbBitmap)
     363{
     364    PDMDEV_ASSERT_DEVINS(pDevIns);
     365    PVM pVM = pDevIns->Internal.s.pVMR3;
     366    LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: hRegion=%#RX64 pvBitmap=%p cbBitmap=%#zx\n",
     367             pDevIns->pReg->szName, pDevIns->iInstance, hRegion, pvBitmap, cbBitmap));
     368
     369    int rc = PGMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pDevIns, hRegion, pvBitmap, cbBitmap);
     370
     371    LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     372    return rc;
     373}
     374
     375
     376/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2ControlDirtyPageTracking} */
     377static DECLCALLBACK(int) pdmR3DevHlp_Mmio2ControlDirtyPageTracking(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, bool fEnabled)
     378{
     379    PDMDEV_ASSERT_DEVINS(pDevIns);
     380    PVM pVM = pDevIns->Internal.s.pVMR3;
     381    LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: hRegion=%#RX64 fEnabled=%RTbool\n",
     382             pDevIns->pReg->szName, pDevIns->iInstance, hRegion, fEnabled));
     383
     384    int rc = PGMR3PhysMmio2ControlDirtyPageTracking(pVM, pDevIns, hRegion, fEnabled);
     385
     386    LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     387    return rc;
     388}
     389
    358390
    359391/**
     
    364396    PDMDEV_ASSERT_DEVINS(pDevIns);
    365397    PVM pVM = pDevIns->Internal.s.pVMR3;
    366     LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX6r iNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion));
     398    LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX64 iNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion));
    367399    VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
    368400
     
    841873             pszDesc, pszDesc, phType));
    842874
    843     int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, pfnHandlerR3,
     875    int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, false /*fKeepPgmLock*/, pfnHandlerR3,
    844876                                              pDevIns->pReg->pszR0Mod, pszHandlerR0, pszPfHandlerR0,
    845877                                              pDevIns->pReg->pszRCMod, pszHandlerRC, pszPfHandlerRC,
     
    47994831    pdmR3DevHlp_Mmio2Reduce,
    48004832    pdmR3DevHlp_Mmio2GetMappingAddress,
     4833    pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
     4834    pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
    48014835    pdmR3DevHlp_Mmio2ChangeRegionNo,
    48024836    pdmR3DevHlp_MmioMapMmio2Page,
     
    51935227    pdmR3DevHlp_Mmio2Reduce,
    51945228    pdmR3DevHlp_Mmio2GetMappingAddress,
     5229    pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
     5230    pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
    51955231    pdmR3DevHlp_Mmio2ChangeRegionNo,
    51965232    pdmR3DevHlp_MmioMapMmio2Page,
     
    59015937    pdmR3DevHlp_Mmio2Reduce,
    59025938    pdmR3DevHlp_Mmio2GetMappingAddress,
     5939    pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
     5940    pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
    59035941    pdmR3DevHlp_Mmio2ChangeRegionNo,
    59045942    pdmR3DevHlp_MmioMapMmio2Page,
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r92046 r92162  
    950950     */
    951951    if (RT_SUCCESS(rc))
    952         rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE,
     952        /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0?   */
     953        rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, false /*fKeepPgmLock*/,
    953954                                              pgmPhysRomWriteHandler,
    954955                                              NULL, NULL, "pgmPhysRomWritePfHandler",
     
    956957                                              "ROM write protection",
    957958                                              &pVM->pgm.s.hRomPhysHandlerType);
     959
     960    /*
     961     * Register the physical access handler doing dirty MMIO2 tracing.
     962     */
     963    if (RT_SUCCESS(rc))
     964        rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/,
     965                                              pgmPhysMmio2WriteHandler,
     966                                              NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler",
     967                                              NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler",
     968                                              "MMIO2 dirty page tracing",
     969                                              &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
    958970
    959971    /*
  • trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp

    r91848 r92162  
    6565 * @param   pVM             The cross context VM structure.
    6666 * @param   enmKind         The kind of access handler.
     67 * @param   fKeepPgmLock    Whether to hold the PGM lock while calling the
     68 *                          handler or not.  Mainly for PGM callers.
    6769 * @param   pfnHandlerR3    Pointer to the ring-3 handler callback.
    6870 * @param   pfnHandlerR0    Pointer to the ring-0 handler callback.
     
    7375 *                          safe).
    7476 */
    75 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind,
     77VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock,
    7678                                                       PFNPGMPHYSHANDLER pfnHandlerR3,
    7779                                                       R0PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR0,
     
    9799        pType->uState           = enmKind == PGMPHYSHANDLERKIND_WRITE
    98100                                ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL;
     101        pType->fKeepPgmLock     = fKeepPgmLock;
    99102        pType->pfnHandlerR3     = pfnHandlerR3;
    100103        pType->pfnHandlerR0     = pfnHandlerR0;
     
    122125 * @param   pVM             The cross context VM structure.
    123126 * @param   enmKind         The kind of access handler.
     127 * @param   fKeepPgmLock    Whether to hold the PGM lock while calling the
     128 *                          handler or not.  Mainly for PGM callers.
    124129 * @param   pfnHandlerR3    Pointer to the ring-3 handler callback.
    125130 * @param   pszModR0        The name of the ring-0 module, NULL is an alias for
     
    139144 *                          safe).
    140145 */
    141 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind,
     146VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock,
    142147                                                R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3,
    143148                                                const char *pszModR0, const char *pszHandlerR0, const char *pszPfHandlerR0,
     
    194199            }
    195200            if (RT_SUCCESS(rc))
    196                 return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, pfnHandlerR3,
     201                return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, fKeepPgmLock, pfnHandlerR3,
    197202                                                          pfnHandlerR0, pfnPfHandlerR0, pszDesc, phType);
    198203        }
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r92157 r92162  
    26852685
    26862686
     2687
     2688/*********************************************************************************************************************************
     2689*   MMIO2                                                                                                                        *
     2690*********************************************************************************************************************************/
     2691
    26872692/**
    26882693 * Locate a MMIO2 range.
     
    27382743
    27392744/**
     2745 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map.
     2746 */
     2747static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
     2748{
     2749    int rc = VINF_SUCCESS;
     2750    for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
     2751    {
     2752        Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING));
     2753        int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys,
     2754                                               pCurMmio2->RamRange.GCPhysLast);
     2755        AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
     2756                                pCurMmio2->RamRange.pszDesc, rc2));
     2757        if (RT_SUCCESS(rc2))
     2758            pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING;
     2759        else if (RT_SUCCESS(rc))
     2760            rc = rc2;
     2761        if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     2762            return rc;
     2763    }
     2764    AssertFailed();
     2765    return rc;
     2766}
     2767
     2768
     2769/**
     2770 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap.
     2771 */
     2772static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
     2773{
     2774    for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
     2775    {
     2776        if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)
     2777        {
     2778            int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3);
     2779            AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
     2780                                    pCurMmio2->RamRange.pszDesc, rc2));
     2781            pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING;
     2782        }
     2783        if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     2784            return VINF_SUCCESS;
     2785    }
     2786    AssertFailed();
     2787    return VINF_SUCCESS;
     2788
     2789}
     2790
     2791
     2792/**
    27402793 * Calculates the number of chunks
    27412794 *
     
    27572810     * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
    27582811     *       us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
     2812     *
     2813     * P.S. If we want to include a dirty bitmap, we'd have to drop down to 1040384 pages.
    27592814     */
    27602815    uint32_t cbChunk = 16U*_1M;
    2761     uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */
    2762     AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
     2816    uint32_t cPagesPerChunk = 1048000; /* max ~1048059 */
     2817    Assert(cPagesPerChunk / 64 * 64 == cPagesPerChunk); /* (NEM requirement) */
     2818    AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048000 < 16U*_1M - PAGE_SIZE * 2);
    27632819    AssertRelease(cPagesPerChunk <= PGM_MMIO2_MAX_PAGE_COUNT); /* See above note. */
    27642820    AssertRelease(RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
     
    27952851 *                          UINT8_MAX.
    27962852 * @param   cb              The size of the region.  Must be page aligned.
     2853 * @param   fFlags          PGMPHYS_MMIO2_FLAGS_XXX.
     2854 * @param   idMmio2         The MMIO2 ID for the first chunk.
    27972855 * @param   pszDesc         The description.
    27982856 * @param   ppHeadRet       Where to return the pointer to the first
     
    28012859 * @thread  EMT
    28022860 */
    2803 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
    2804                                 const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)
     2861static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
     2862                                uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)
    28052863{
    28062864    /*
     
    28192877    int rc = VINF_SUCCESS;
    28202878    uint32_t cPagesLeft = cb >> X86_PAGE_SHIFT;
    2821     for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++)
     2879    for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++)
    28222880    {
    28232881        /*
     
    28752933        //pNew->pvR3                = NULL;
    28762934        //pNew->pNext               = NULL;
    2877         //pNew->fFlags              = 0;
    28782935        if (iChunk == 0)
    28792936            pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
    28802937        if (iChunk + 1 == cChunks)
    28812938            pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
     2939        if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
     2940            pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES;
    28822941        pNew->iSubDev               = iSubDev;
    28832942        pNew->iRegion               = iRegion;
    28842943        pNew->idSavedState          = UINT8_MAX;
    2885         pNew->idMmio2               = UINT8_MAX;
     2944        pNew->idMmio2               = idMmio2;
    28862945        //pNew->pPhysHandlerR3      = NULL;
    28872946        //pNew->paLSPages           = NULL;
     
    28982957        cPagesLeft -= cPagesTrackedByChunk;
    28992958        ppNext = &pNew->pNextR3;
     2959
     2960        /*
     2961         * Pre-allocate a handler if we're tracking dirty pages.
     2962         */
     2963        if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
     2964        {
     2965            rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType,
     2966                                            (RTR3PTR)(uintptr_t)idMmio2, idMmio2, idMmio2, pszDesc, &pNew->pPhysHandlerR3);
     2967            AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2));
     2968        }
    29002969    }
    29012970    Assert(cPagesLeft == 0);
     
    29142983        PPGMREGMMIO2RANGE pFree = *ppHeadRet;
    29152984        *ppHeadRet = pFree->pNextR3;
     2985
     2986        if (pFree->pPhysHandlerR3)
     2987        {
     2988            pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3);
     2989            pFree->pPhysHandlerR3 = NULL;
     2990        }
    29162991
    29172992        if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
     
    30363111    AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    30373112    AssertReturn(cb, VERR_INVALID_PARAMETER);
    3038     AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
     3113    AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
    30393114
    30403115    const uint32_t cPages = cb >> PAGE_SHIFT;
     
    30603135     */
    30613136    unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL);
     3137
    30623138    PGM_LOCK_VOID(pVM);
    3063     uint8_t  idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
    3064     unsigned cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
     3139    AssertCompile(PGM_MMIO2_MAX_RANGES < 255);
     3140    uint8_t const  idMmio2          = pVM->pgm.s.cMmio2Regions + 1;
     3141    unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
    30653142    if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES)
    30663143    {
     
    30963173                 */
    30973174                PPGMREGMMIO2RANGE pNew;
    3098                 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, pszDesc, &pNew);
     3175                rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew);
    30993176                if (RT_SUCCESS(rc))
    31003177                {
     
    31113188#endif
    31123189                        pCur->RamRange.pvR3 = pbCurPages;
    3113                         pCur->idMmio2       = idMmio2;
    31143190
    31153191                        uint32_t iDstPage = pCur->RamRange.cb >> X86_PAGE_SHIFT;
     
    31253201                        iSrcPage   += pCur->RamRange.cb >> X86_PAGE_SHIFT;
    31263202                        pbCurPages += pCur->RamRange.cb;
    3127                         idMmio2++;
    31283203                    }
    31293204
     
    32143289
    32153290            uint8_t idMmio2 = pCur->idMmio2;
    3216             if (idMmio2 != UINT8_MAX)
     3291            Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3));
     3292            if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3))
    32173293            {
    32183294                Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
     
    32343310            if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
    32353311                rc = rc2;
     3312
     3313            if (pCur->pPhysHandlerR3)
     3314            {
     3315                pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3);
     3316                pCur->pPhysHandlerR3 = NULL;
     3317            }
    32363318
    32373319            /* we're leaking hyper memory here if done at runtime. */
     
    35173599    }
    35183600
    3519 #if 0 /* will be reused */
    3520     /*
    3521      * Register the access handler if plain MMIO.
     3601    /*
     3602     * If the range have dirty page monitoring enabled, enable that.
    35223603     *
    3523      * We must register access handlers for each range since the access handler
    3524      * code refuses to deal with multiple ranges (and we can).
    3525      */
    3526     if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2))
    3527     {
    3528         AssertFailed();
    3529         int rc = VINF_SUCCESS;
    3530         for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
    3531         {
    3532             Assert(!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED));
    3533             rc = pgmHandlerPhysicalExRegister(pVM, pCurMmio->pPhysHandlerR3, pCurMmio->RamRange.GCPhys,
    3534                                               pCurMmio->RamRange.GCPhysLast);
    3535             if (RT_FAILURE(rc))
    3536                 break;
    3537             pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Use this to mark that the handler is registered. */
    3538             if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
    3539                 break;
    3540         }
    3541         if (RT_FAILURE(rc))
    3542         {
    3543             /* Almost impossible, but try clean up properly and get out of here. */
    3544             for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
    3545             {
    3546                 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
    3547                 {
    3548                     pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED;
    3549                     pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3);
    3550                 }
    3551 
    3552                 if (!fRamExists)
    3553                     pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
    3554                 else
    3555                 {
    3556                     Assert(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */
    3557 
    3558                     uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT;
    3559                     PPGMPAGE pPageDst = &pRam->aPages[(pCurMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    3560                     while (cPagesLeft-- > 0)
    3561                     {
    3562                         PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
    3563                         pPageDst++;
    3564                     }
    3565                 }
    3566 
    3567                 pCurMmio->RamRange.GCPhys     = NIL_RTGCPHYS;
    3568                 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
    3569                 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
    3570                     break;
    3571             }
    3572 
    3573             /** @todo NEM notification cleanup */
    3574             PGM_UNLOCK(pVM);
    3575             return rc;
    3576         }
    3577     }
    3578 #endif
     3604     * We ignore failures here for now because if we fail, the whole mapping
     3605     * will have to be reversed and we'll end up with nothing at all on the
     3606     * screen and a grumpy guest, whereas if we just go on, we'll only have
     3607     * visual distortions to gripe about.  There will be something in the
     3608     * release log.
     3609     */
     3610    if (   pFirstMmio->pPhysHandlerR3
     3611        && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     3612        pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio);
    35793613
    35803614    /*
     
    36743708    AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
    36753709
    3676 #if 0 /* will be reused */
    3677     /*
    3678      * If plain MMIO, we must deregister the handlers first.
    3679      */
    3680     if (!(fOldFlags & PGMREGMMIO2RANGE_F_MMIO2))
    3681     {
    3682         AssertFailed();
    3683 
    3684         PPGMREGMMIO2RANGE pCurMmio = pFirstMmio;
    3685         rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3);
    3686         AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
    3687         while (!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK))
    3688         {
    3689             pCurMmio = pCurMmio->pNextR3;
    3690             rc = pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3);
    3691             AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), VERR_PGM_PHYS_MMIO_EX_IPE);
    3692         }
    3693     }
    3694 #endif
     3710    /*
     3711     * If monitoring dirty pages, we must deregister the handlers first.
     3712     */
     3713    if (   pFirstMmio->pPhysHandlerR3
     3714        && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     3715        pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio);
    36953716
    36963717    /*
     
    38423863                                ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
    38433864                                rc = VERR_NOT_SUPPORTED);
     3865
     3866#ifdef VBOX_WITH_PGM_NEM_MODE
     3867            /*
     3868             * Currently not supported for NEM in simple memory mode.
     3869             */
     3870            /** @todo implement this for NEM. */
     3871            if (RT_SUCCESS(rc))
     3872                AssertLogRelMsgStmt(VM_IS_NEM_ENABLED(pVM), ("%s: %#x\n", pFirstMmio->RamRange.pszDesc),
     3873                                    rc = VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
     3874#endif
    38443875            if (RT_SUCCESS(rc))
    38453876            {
     
    39183949    return NIL_RTGCPHYS;
    39193950}
     3951
     3952
     3953/**
     3954 * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap.
     3955 *
     3956 * Called holding the PGM lock.
     3957 */
     3958static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
     3959                                                        void *pvBitmap, size_t cbBitmap)
     3960{
     3961    /*
     3962     * Continue validation.
     3963     */
     3964    PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
     3965    AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
     3966    AssertReturn(   (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
     3967                 ==                          (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK),
     3968                 VERR_INVALID_FUNCTION);
     3969    AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
     3970
     3971    RTGCPHYS cbTotal     = 0;
     3972    uint16_t fTotalDirty = 0;
     3973    for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
     3974    {
     3975        cbTotal     += pCur->cbReal; /** @todo good question for NEM... */
     3976        fTotalDirty |= pCur->fFlags;
     3977        if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     3978            break;
     3979        pCur = pCur->pNextR3;
     3980        AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
     3981        AssertReturn(   (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
     3982                     ==                  PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
     3983                     VERR_INTERNAL_ERROR_4);
     3984    }
     3985    size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, PAGE_SIZE * 64, RTGCPHYS) / PAGE_SIZE / 8;
     3986
     3987    if (cbBitmap)
     3988    {
     3989        AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER);
     3990        AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER);
     3991        AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER);
     3992    }
     3993
     3994    /*
     3995     * Do the work.
     3996     */
     3997    int rc = VINF_SUCCESS;
     3998    if (pvBitmap)
     3999    {
     4000        if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY)
     4001        {
     4002            if (   (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     4003                ==                          (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     4004            {
     4005                /*
     4006                 * Reset each chunk, gathering dirty bits.
     4007                 */
     4008                RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */
     4009                uint32_t iPageNo = 0;
     4010                for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
     4011                {
     4012                    if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
     4013                    {
     4014                        int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pCur->RamRange.GCPhys, pvBitmap, iPageNo);
     4015                        if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
     4016                            rc = rc2;
     4017                        pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
     4018                    }
     4019                    if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     4020                        break;
     4021                    iPageNo += pCur->RamRange.cb >> PAGE_SHIFT;
     4022                }
     4023            }
     4024            else
     4025            {
     4026                /*
     4027                 * If not mapped or tracking is disabled, we return the
     4028                 * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages.  We cannot
     4029                 * get more accurate data than that after unmapping or disabling.
     4030                 */
     4031                RT_BZERO(pvBitmap, cbBitmap);
     4032                uint32_t iPageNo = 0;
     4033                for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
     4034                {
     4035                    if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
     4036                    {
     4037                        ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pCur->RamRange.cb >> PAGE_SHIFT));
     4038                        pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
     4039                    }
     4040                    if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     4041                        break;
     4042                    iPageNo += pCur->RamRange.cb >> PAGE_SHIFT;
     4043                }
     4044            }
     4045        }
     4046        /*
     4047         * No dirty chunks.
     4048         */
     4049        else
     4050            RT_BZERO(pvBitmap, cbBitmap);
     4051    }
     4052    /*
     4053     * No bitmap. Reset the region if tracking is currently enabled.
     4054     */
     4055    else if (   (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     4056             ==                          (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     4057        rc = PGMHandlerPhysicalReset(pVM, pFirstRegMmio->RamRange.GCPhys);
     4058
     4059    return rc;
     4060}
     4061
     4062
     4063/**
     4064 * Queries the dirty page bitmap and resets the monitoring.
     4065 *
     4066 * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when
     4067 * creating the range for this to work.
     4068 *
     4069 * @returns VBox status code.
     4070 * @retval  VERR_INVALID_FUNCTION if not created using
     4071 *          PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES.
     4072 * @param   pVM         The cross context VM structure.
     4073 * @param   pDevIns     The device owning the MMIO2 handle.
     4074 * @param   hMmio2      The region handle.
     4075 * @param   pvBitmap    The output bitmap.  Must be 8-byte aligned.  Ignored
     4076 *                      when @a cbBitmap is zero.
     4077 * @param   cbBitmap    The size of the bitmap.  Must be the size of the whole
     4078 *                      MMIO2 range, rounded up to the nearest 8 bytes.
     4079 *                      When zero only a reset is done.
     4080 */
     4081VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
     4082                                                           void *pvBitmap, size_t cbBitmap)
     4083{
     4084    /*
     4085     * Do some basic validation before grapping the PGM lock and continuing.
     4086     */
     4087    AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
     4088    AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER);
     4089    int rc = PGM_LOCK(pVM);
     4090    if (RT_SUCCESS(rc))
     4091    {
     4092        rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap);
     4093        PGM_UNLOCK(pVM);
     4094    }
     4095    return rc;
     4096}
     4097
     4098/**
     4099 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking
     4100 *
     4101 * Called owning the PGM lock.
     4102 */
     4103static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
     4104{
     4105    /*
     4106     * Continue validation.
     4107     */
     4108    PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
     4109    AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
     4110    AssertReturn(   (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
     4111                 ==                          (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)
     4112                 , VERR_INVALID_FUNCTION);
     4113    AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
     4114
     4115    /*
     4116     * Anyting needing doing?
     4117     */
     4118    if (fEnabled != RT_BOOL(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
     4119    {
     4120        LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
     4121
     4122        /*
     4123         * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag.
     4124         */
     4125        for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
     4126        {
     4127            if (fEnabled)
     4128                pCur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
     4129            else
     4130                pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
     4131            if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
     4132                break;
     4133            pCur = pCur->pNextR3;
     4134            AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
     4135            AssertReturn(   (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
     4136                         ==                  PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
     4137                         , VERR_INTERNAL_ERROR_4);
     4138        }
     4139
     4140        /*
     4141         * Enable/disable handlers if currently mapped.
     4142         *
     4143         * We ignore status codes here as we've already changed the flags and
     4144         * returning a failure status now would be confusing.  Besides, the two
     4145         * functions will continue past failures.  As argued in the mapping code,
     4146         * it's in the release log.
     4147         */
     4148        if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
     4149        {
     4150            if (fEnabled)
     4151                pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio);
     4152            else
     4153                pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio);
     4154        }
     4155    }
     4156    else
     4157        LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
     4158
     4159    return VINF_SUCCESS;
     4160}
     4161
     4162
     4163/**
     4164 * Controls the dirty page tracking for an MMIO2 range.
     4165 *
     4166 * @returns VBox status code.
     4167 * @param   pVM         The cross context VM structure.
     4168 * @param   pDevIns     The device owning the MMIO2 memory.
     4169 * @param   hMmio2      The handle of the region.
     4170 * @param   fEnabled    The new tracking state.
     4171 */
     4172VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
     4173{
     4174    /*
     4175     * Do some basic validation before grapping the PGM lock and continuing.
     4176     */
     4177    AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
     4178    int rc = PGM_LOCK(pVM);
     4179    if (RT_SUCCESS(rc))
     4180    {
     4181        rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled);
     4182        PGM_UNLOCK(pVM);
     4183    }
     4184    return rc;
     4185}
     4186
    39204187
    39214188/**
  • trunk/src/VBox/VMM/VMMR3/PGMPool.cpp

    r91854 r92162  
    280280
    281281    pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE;
    282     rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE,
     282    rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/,
    283283                                          pgmPoolAccessHandler,
    284284                                          NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler",
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r92157 r92162  
    489489    PGMPHYSHANDLERKIND                  enmKind;
    490490    /** The PGM_PAGE_HNDL_PHYS_STATE_XXX value corresponding to enmKind. */
    491     uint32_t                            uState;
     491    uint8_t                             uState;
     492    /** Whether to keep the PGM lock when calling the handler. */
     493    bool                                fKeepPgmLock;
     494    bool                                afPadding[2];
    492495    /** Pointer to R3 callback function. */
    493496    R3PTRTYPE(PFNPGMPHYSHANDLER)        pfnHandlerR3;
     
    14321435 * @{ */
    14331436/** Set if this is the first chunk in the MMIO2 range. */
    1434 #define PGMREGMMIO2RANGE_F_FIRST_CHUNK      UINT16_C(0x0001)
     1437#define PGMREGMMIO2RANGE_F_FIRST_CHUNK          UINT16_C(0x0001)
    14351438/** Set if this is the last chunk in the MMIO2 range. */
    1436 #define PGMREGMMIO2RANGE_F_LAST_CHUNK       UINT16_C(0x0002)
     1439#define PGMREGMMIO2RANGE_F_LAST_CHUNK           UINT16_C(0x0002)
    14371440/** Set if the whole range is mapped. */
    1438 #define PGMREGMMIO2RANGE_F_MAPPED           UINT16_C(0x0004)
     1441#define PGMREGMMIO2RANGE_F_MAPPED               UINT16_C(0x0004)
    14391442/** Set if it's overlapping, clear if not. */
    1440 #define PGMREGMMIO2RANGE_F_OVERLAPPING      UINT16_C(0x0008)
     1443#define PGMREGMMIO2RANGE_F_OVERLAPPING          UINT16_C(0x0008)
     1444/** This mirrors the PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES creation flag.*/
     1445#define PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES    UINT16_C(0x0010)
     1446/** Set if the access handler is registered.   */
     1447#define PGMREGMMIO2RANGE_F_IS_TRACKING          UINT16_C(0x0020)
     1448/** Set if dirty page tracking is currently enabled. */
     1449#define PGMREGMMIO2RANGE_F_TRACKING_ENABLED     UINT16_C(0x0040)
     1450/** Set if there are dirty pages in the range.   */
     1451#define PGMREGMMIO2RANGE_F_IS_DIRTY             UINT16_C(0x0080)
    14411452/** @} */
    14421453
     
    30433054    /** Physical access handler type for ROM protection. */
    30443055    PGMPHYSHANDLERTYPE              hRomPhysHandlerType;
    3045     /** Alignment padding.  */
    3046     uint32_t                        u32Padding;
     3056    /** Physical access handler type for MMIO2 dirty page tracing. */
     3057    PGMPHYSHANDLERTYPE              hMmio2DirtyPhysHandlerType;
    30473058
    30483059    /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
     
    37393750bool            pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys);
    37403751void            pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting);
     3752DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap);
    37413753DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    37423754int             pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
     
    37603772void            pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock);
    37613773PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPhysRomWriteHandler;
     3774PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPhysMmio2WriteHandler;
    37623775#ifndef IN_RING3
    37633776DECLEXPORT(FNPGMPHYSHANDLER)        pgmPhysHandlerRedirectToHC;
    37643777DECLEXPORT(FNPGMRZPHYSPFHANDLER)    pgmPhysPfHandlerRedirectToHC;
    37653778DECLEXPORT(FNPGMRZPHYSPFHANDLER)    pgmPhysRomWritePfHandler;
     3779DECLEXPORT(FNPGMRZPHYSPFHANDLER)    pgmPhysMmio2WritePfHandler;
    37663780#endif
    37673781int             pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette