VirtualBox

Changeset 64115 in vbox


Ignore:
Timestamp:
Sep 30, 2016 8:14:27 PM (9 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
111072
Message:

PDM,IOM,PGM: Morphed the MMIO2 API into a mixed MMIO2 and pre-registered MMIO API that is able to deal with really large (<= 64GB) MMIO ranges. Limited testing, so back out at first sign of trouble.

Location:
trunk
Files:
27 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r61847 r64115  
    520520/** Don't mess around with ballooned pages. */
    521521#define VERR_PGM_PHYS_PAGE_BALLOONED            (-1646)
     522/** Internal processing error \#1 in page access handler code. */
     523#define VERR_PGM_HANDLER_IPE_1                  (-1647)
    522524
    523525
  • trunk/include/VBox/param.h

    r62476 r64115  
    3939/** The maximum number of pages that can be allocated and mapped
    4040 * by various MM, PGM and SUP APIs. */
    41 #define VBOX_MAX_ALLOC_PAGE_COUNT   (256U * _1M / PAGE_SIZE)
     41#if ARCH_BITS == 64
     42# define VBOX_MAX_ALLOC_PAGE_COUNT   (_512M / PAGE_SIZE)
     43#else
     44# define VBOX_MAX_ALLOC_PAGE_COUNT   (_256M / PAGE_SIZE)
     45#endif
    4246
    4347/** @def VBOX_WITH_PAGE_SHARING
  • trunk/include/VBox/vmm/iom.h

    r63682 r64115  
    350350                                         RCPTRTYPE(PFNIOMMMIOFILL)  pfnFillCallback);
    351351VMMR3_INT_DECL(int)  IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange);
     352VMMR3_INT_DECL(int)  IOMR3MmioExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRange,
     353                                            uint32_t fFlags, const char *pszDesc,
     354                                            RTR3PTR pvUserR3,
     355                                            R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR3,
     356                                            R3PTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackR3,
     357                                            R3PTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackR3,
     358                                            RTR0PTR pvUserR0,
     359                                            R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR0,
     360                                            R0PTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackR0,
     361                                            R0PTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackR0,
     362                                            RTRCPTR pvUserRC,
     363                                            RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackRC,
     364                                            RCPTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackRC,
     365                                            RCPTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackRC);
     366VMMR3_INT_DECL(int)  IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys);
     367VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys);
     368VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser);
     369
    352370VMMR3_INT_DECL(VBOXSTRICTRC) IOMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict);
    353371
  • trunk/include/VBox/vmm/pdmdev.h

    r63701 r64115  
    808808
    809809    /**
    810      * Checks if the given address is an MMIO2 base address or not.
     810     * Checks if the given address is an MMIO2 or pre-registered MMIO base address.
    811811     *
    812812     * @returns true/false accordingly.
     
    814814     * @param   pOwner          The owner of the memory, optional.
    815815     * @param   GCPhys          The address to check.
    816      */
    817     DECLR3CALLBACKMEMBER(bool,  pfnIsMMIO2Base,(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys));
     816     * @sa      PGMR3PhysMMIOExIsBase
     817     */
     818    DECLR3CALLBACKMEMBER(bool,  pfnIsMMIOExBase,(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys));
    818819
    819820    /**
     
    867868
    868869/** Current PDMPCIHLPR3 version number. */
    869 #define PDM_PCIHLPR3_VERSION                    PDM_VERSION_MAKE(0xfffb, 3, 0)
     870#define PDM_PCIHLPR3_VERSION                    PDM_VERSION_MAKE(0xfffb, 3, 1)
    870871
    871872
     
    25252526
    25262527    /**
    2527      * Deregisters and frees a MMIO2 region.
     2528     * Deregisters and frees a MMIO or MMIO2 region.
    25282529     *
    25292530     * Any physical (and virtual) access handlers registered for the region must
    2530      * be deregistered before calling this function.
     2531     * be deregistered before calling this function (MMIO2 only).
    25312532     *
    25322533     * @returns VBox status code.
     
    25352536     * @thread  EMT.
    25362537     */
    2537     DECLR3CALLBACKMEMBER(int, pfnMMIO2Deregister,(PPDMDEVINS pDevIns, uint32_t iRegion));
    2538 
    2539     /**
    2540      * Maps a MMIO2 region into the physical memory space.
    2541      *
    2542      * A MMIO2 range may overlap with base memory if a lot of RAM
    2543      * is configured for the VM, in which case we'll drop the base
    2544      * memory pages. Presently we will make no attempt to preserve
    2545      * anything that happens to be present in the base memory that
    2546      * is replaced, this is of course incorrect but it's too much
    2547      * effort.
     2538    DECLR3CALLBACKMEMBER(int, pfnMMIOExDeregister,(PPDMDEVINS pDevIns, uint32_t iRegion));
     2539
     2540    /**
     2541     * Maps a MMIO or MMIO2 region into the physical memory space.
     2542     *
     2543     * A MMIO2 range or a pre-registered MMIO range may overlap with base memory if
     2544     * a lot of RAM is configured for the VM, in  which case we'll drop the base
     2545     * memory pages.  Presently we will make no attempt to preserve anything that
     2546     * happens to be present in the base memory that is replaced, this is of course
     2547     * incorrect but it's too much effort.
    25482548     *
    25492549     * @returns VBox status code.
     
    25532553     * @thread  EMT.
    25542554     */
    2555     DECLR3CALLBACKMEMBER(int, pfnMMIO2Map,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));
    2556 
    2557     /**
    2558      * Unmaps a MMIO2 region previously mapped using pfnMMIO2Map.
     2555    DECLR3CALLBACKMEMBER(int, pfnMMIOExMap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));
     2556
     2557    /**
     2558     * Unmaps a MMIO or MMIO2 region previously mapped using pfnMMIOExMap.
    25592559     *
    25602560     * @returns VBox status code.
     
    25642564     * @thread  EMT.
    25652565     */
    2566     DECLR3CALLBACKMEMBER(int, pfnMMIO2Unmap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));
     2566    DECLR3CALLBACKMEMBER(int, pfnMMIOExUnmap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));
    25672567
    25682568    /**
     
    36073607    DECLR3CALLBACKMEMBER(VMRESUMEREASON, pfnVMGetResumeReason,(PPDMDEVINS pDevIns));
    36083608
     3609    /**
     3610     * Pre-register a Memory Mapped I/O (MMIO) region.
     3611     *
     3612     * This API must be used for large PCI MMIO regions, as it handles these much
     3613     * more efficiently and with greater flexibility when it comes to heap usage.
     3614     * It is only available during device construction.
     3615     *
     3616     * To map and unmap the pre-registered region into and our of guest address
     3617     * space, use the PDMDevHlpMMIOExMap and PDMDevHlpMMIOExUnmap helpers.
     3618     *
     3619     * You may call PDMDevHlpMMIOExDeregister from the destructor to free the region
     3620     * for reasons of symmetry, but it will be automatically deregistered by PDM
     3621     * once the destructor returns.
     3622     *
     3623     * @returns VBox status.
     3624     * @param   pDevIns             The device instance to register the MMIO with.
     3625     * @param   iRegion             The region number.
     3626     * @param   cbRegion            The size of the range (in bytes).
     3627     * @param   fFlags              Flags, IOMMMIO_FLAGS_XXX.
     3628     * @param   pszDesc             Pointer to description string. This must not be freed.
     3629     * @param   pvUser              Ring-3 user argument.
     3630     * @param   pfnWrite            Pointer to function which is gonna handle Write operations.
     3631     * @param   pfnRead             Pointer to function which is gonna handle Read operations.
     3632     * @param   pfnFill             Pointer to function which is gonna handle Fill/memset operations. (optional)
     3633     * @param   pvUserR0            Ring-0 user argument. Optional.
     3634     * @param   pszWriteR0          The name of the ring-0 write handler method. Optional.
     3635     * @param   pszReadR0           The name of the ring-0 read handler method. Optional.
     3636     * @param   pszFillR0           The name of the ring-0 fill/memset handler method. Optional.
     3637     * @param   pvUserRC            Raw-mode context user argument. Optional.  If
     3638     *                              unsigned value is 0x10000 or higher, it will be
     3639     *                              automatically relocated with the hypervisor
     3640     *                              guest mapping.
     3641     * @param   pszWriteRC          The name of the raw-mode context write handler method. Optional.
     3642     * @param   pszReadRC           The name of the raw-mode context read handler method. Optional.
     3643     * @param   pszFillRC           The name of the raw-mode context fill/memset handler method. Optional.
     3644     * @thread  EMT
     3645     *
     3646     * @remarks Caller enters the device critical section prior to invoking the
     3647     *          registered callback methods.
     3648     * @sa      PDMDevHlpMMIOExMap, PDMDevHlpMMIOExUnmap, PDMDevHlpMMIOExDeregister,
     3649     *          PDMDevHlpMMIORegisterEx
     3650     */
     3651    DECLR3CALLBACKMEMBER(int, pfnMMIOExPreRegister,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion,
     3652                                                    uint32_t fFlags, const char *pszDesc, RTHCPTR pvUser,
     3653                                                    PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill,
     3654                                                    RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0,
     3655                                                    RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC));
    36093656
    36103657    /** Space reserved for future members.
     
    36163663    DECLR3CALLBACKMEMBER(void, pfnReserved5,(void));
    36173664    DECLR3CALLBACKMEMBER(void, pfnReserved6,(void));
    3618     DECLR3CALLBACKMEMBER(void, pfnReserved7,(void));
    3619     /*DECLR3CALLBACKMEMBER(void, pfnReserved8,(void));
     3665    /*DECLR3CALLBACKMEMBER(void, pfnReserved7,(void));
     3666    DECLR3CALLBACKMEMBER(void, pfnReserved8,(void));
    36203667    DECLR3CALLBACKMEMBER(void, pfnReserved9,(void));*/
    36213668    /*DECLR3CALLBACKMEMBER(void, pfnReserved10,(void));*/
     
    38193866/** Current PDMDEVHLPR3 version number. */
    38203867/* 5.0 is (18, 0) so the next version for trunk has to be (19, 0)! */
    3821 #define PDM_DEVHLPR3_VERSION                    PDM_VERSION_MAKE(0xffe7, 17, 0)
     3868#define PDM_DEVHLPR3_VERSION                    PDM_VERSION_MAKE(0xffe7, 17, 1)
    38223869
    38233870
     
    46444691
    46454692/**
    4646  * @copydoc PDMDEVHLPR3::pfnMMIO2Deregister
    4647  */
    4648 DECLINLINE(int) PDMDevHlpMMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion)
    4649 {
    4650     return pDevIns->pHlpR3->pfnMMIO2Deregister(pDevIns, iRegion);
    4651 }
    4652 
    4653 /**
    4654  * @copydoc PDMDEVHLPR3::pfnMMIO2Map
    4655  */
    4656 DECLINLINE(int) PDMDevHlpMMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    4657 {
    4658     return pDevIns->pHlpR3->pfnMMIO2Map(pDevIns, iRegion, GCPhys);
    4659 }
    4660 
    4661 /**
    4662  * @copydoc PDMDEVHLPR3::pfnMMIO2Unmap
    4663  */
    4664 DECLINLINE(int) PDMDevHlpMMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    4665 {
    4666     return pDevIns->pHlpR3->pfnMMIO2Unmap(pDevIns, iRegion, GCPhys);
     4693 * @copydoc PDMDEVHLPR3::pfnMMIOExPreRegister
     4694 */
     4695DECLINLINE(int) PDMDevHlpMMIOExPreRegister(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion,
     4696                                           uint32_t fFlags, const char *pszDesc, RTHCPTR pvUser,
     4697                                           PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill,
     4698                                           RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0,
     4699                                           RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC)
     4700{
     4701    return pDevIns->pHlpR3->pfnMMIOExPreRegister(pDevIns, iRegion, cbRegion, fFlags, pszDesc,
     4702                                                 pvUser, pfnWrite, pfnRead, pfnFill,
     4703                                                 pvUserR0, pszWriteR0, pszReadR0, pszFillR0,
     4704                                                 pvUserRC, pszWriteRC, pszReadRC, pszFillRC);
     4705}
     4706
     4707/**
     4708 * @copydoc PDMDEVHLPR3::pfnMMIOExDeregister
     4709 */
     4710DECLINLINE(int) PDMDevHlpMMIOExDeregister(PPDMDEVINS pDevIns, uint32_t iRegion)
     4711{
     4712    return pDevIns->pHlpR3->pfnMMIOExDeregister(pDevIns, iRegion);
     4713}
     4714
     4715/**
     4716 * @copydoc PDMDEVHLPR3::pfnMMIOExMap
     4717 */
     4718DECLINLINE(int) PDMDevHlpMMIOExMap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     4719{
     4720    return pDevIns->pHlpR3->pfnMMIOExMap(pDevIns, iRegion, GCPhys);
     4721}
     4722
     4723/**
     4724 * @copydoc PDMDEVHLPR3::pfnMMIOExUnmap
     4725 */
     4726DECLINLINE(int) PDMDevHlpMMIOExUnmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     4727{
     4728    return pDevIns->pHlpR3->pfnMMIOExUnmap(pDevIns, iRegion, GCPhys);
    46674729}
    46684730
  • trunk/include/VBox/vmm/pgm.h

    r63226 r64115  
    742742VMMR3DECL(int)      PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb);
    743743VMMR3DECL(int)      PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc);
    744 VMMR3DECL(int)      PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion);
    745 VMMR3DECL(int)      PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
    746 VMMR3DECL(int)      PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
    747 VMMR3DECL(bool)     PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys);
    748 VMMR3DECL(int)      PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys);
    749 VMMR3DECL(int)      PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr);
     744VMMR3DECL(int)      PGMR3PhysMMIOExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
     745                                               RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc);
     746VMMR3DECL(int)      PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion);
     747VMMR3DECL(int)      PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
     748VMMR3DECL(int)      PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
     749VMMR3DECL(bool)     PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys);
     750VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys);
     751VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr);
    750752
    751753/** @name PGMR3PhysRegisterRom flags.
  • trunk/src/VBox/Devices/Bus/DevPCI.cpp

    r63685 r64115  
    319319                        RTGCPHYS GCPhysBase = r->addr;
    320320                        int rc;
    321                         if (pBus->pPciHlpR3->pfnIsMMIO2Base(pBus->pDevInsR3, d->pDevIns, GCPhysBase))
     321                        if (pBus->pPciHlpR3->pfnIsMMIOExBase(pBus->pDevInsR3, d->pDevIns, GCPhysBase))
    322322                        {
    323323                            /* unmap it. */
    324324                            rc = r->map_func(d, i, NIL_RTGCPHYS, r->size, (PCIADDRESSSPACE)(r->type));
    325325                            AssertRC(rc);
    326                             rc = PDMDevHlpMMIO2Unmap(d->pDevIns, i, GCPhysBase);
     326                            rc = PDMDevHlpMMIOExUnmap(d->pDevIns, i, GCPhysBase);
    327327                        }
    328328                        else
  • trunk/src/VBox/Devices/Bus/DevPciIch9.cpp

    r63879 r64115  
    841841        {
    842842            RTGCPHYS GCPhysBase = pRegion->addr;
    843             if (pBus->pPciHlpR3->pfnIsMMIO2Base(pBus->pDevInsR3, pDev->pDevIns, GCPhysBase))
     843            if (pBus->pPciHlpR3->pfnIsMMIOExBase(pBus->pDevInsR3, pDev->pDevIns, GCPhysBase))
    844844            {
    845845                /* unmap it. */
    846846                rc = pRegion->map_func(pDev, iRegion, NIL_RTGCPHYS, pRegion->size, (PCIADDRESSSPACE)(pRegion->type));
    847847                AssertRC(rc);
    848                 rc = PDMDevHlpMMIO2Unmap(pDev->pDevIns, iRegion, GCPhysBase);
     848                rc = PDMDevHlpMMIOExUnmap(pDev->pDevIns, iRegion, GCPhysBase);
    849849            }
    850850            else
  • trunk/src/VBox/Devices/GIMDev/GIMDev.cpp

    r62890 r64115  
    340340    for (uint32_t i = 0; i < cRegions; i++, pCur++)
    341341    {
    342         int rc = PDMDevHlpMMIO2Deregister(pDevIns, pCur->iRegion);
     342        int rc = PDMDevHlpMMIOExDeregister(pDevIns, pCur->iRegion);
    343343        if (RT_FAILURE(rc))
    344344            return rc;
  • trunk/src/VBox/Devices/Graphics/DevVGA-SVGA.cpp

    r63690 r64115  
    38143814             * Mapping the FIFO RAM.
    38153815             */
    3816             rc = PDMDevHlpMMIO2Map(pDevIns, iRegion, GCPhysAddress);
     3816            rc = PDMDevHlpMMIOExMap(pDevIns, iRegion, GCPhysAddress);
    38173817            AssertRC(rc);
    38183818
  • trunk/src/VBox/Devices/Graphics/DevVGA.cpp

    r63690 r64115  
    54325432         * Mapping the VRAM.
    54335433         */
    5434         rc = PDMDevHlpMMIO2Map(pDevIns, iRegion, GCPhysAddress);
     5434        rc = PDMDevHlpMMIOExMap(pDevIns, iRegion, GCPhysAddress);
    54355435        AssertRC(rc);
    54365436        if (RT_SUCCESS(rc))
  • trunk/src/VBox/Devices/Network/DevE1000.cpp

    r63690 r64115  
    4848
    4949
    50 /* Options *******************************************************************/
     50/*********************************************************************************************************************************
     51*   Defined Constants And Macros                                                                                                 *
     52*********************************************************************************************************************************/
     53/** @name E1000 Build Options
     54 * @{ */
    5155/** @def E1K_INIT_RA0
    5256 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
     
    117121 */
    118122#define E1K_WITH_RXD_CACHE
     123/** @def E1K_WITH_PREREG_MMIO
     124 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
     125 * currently only done for testing the relateted PDM, IOM and PGM code. */
     126//#define E1K_WITH_PREREG_MMIO
     127/* @} */
    119128/* End of Options ************************************************************/
    120129
     
    61136122             *    byte enables.
    61146123             */
     6124#ifdef E1K_WITH_PREREG_MMIO
     6125            pThis->addrMMReg = GCPhysAddress;
     6126            if (GCPhysAddress == NIL_RTGCPHYS)
     6127                rc = VINF_SUCCESS;
     6128            else
     6129            {
     6130                Assert(!(GCPhysAddress & 7));
     6131                rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress);
     6132            }
     6133#else
    61156134            pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
    61166135            rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
     
    61236142                rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
    61246143                                             "e1kMMIOWrite", "e1kMMIORead");
     6144#endif
    61256145            break;
    61266146
     
    76417661    if (RT_FAILURE(rc))
    76427662        return rc;
     7663#ifdef E1K_WITH_PREREG_MMIO
     7664    rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
     7665                                    NULL        /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
     7666                                    NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
     7667                                    pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
     7668                                    NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
     7669                                    pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
     7670    AssertLogRelRCReturn(rc, rc);
     7671#endif
    76437672    /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
    76447673    rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
  • trunk/src/VBox/Devices/Network/DevPCNet.cpp

    r63690 r64115  
    44184418    {
    44194419        /* drop this dummy region */
    4420         rc = PDMDevHlpMMIO2Deregister(pDevIns, 2);
     4420        rc = PDMDevHlpMMIOExDeregister(pDevIns, 2);
    44214421        pThis->fSharedRegion = false;
    44224422    }
  • trunk/src/VBox/Devices/Samples/DevPlayground.cpp

    r63910 r64115  
    8080 * @callback_method_impl{FNPCIIOREGIONMAP}
    8181 */
    82 static DECLCALLBACK(int) devPlaygroundMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
    83 {
    84     NOREF(enmType);
    85     int rc;
     82static DECLCALLBACK(int)
     83devPlaygroundMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
     84{
     85    RT_NOREF(enmType, cb);
    8686
    8787    switch (iRegion)
    8888    {
    8989        case 0:
    90             rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL,
    91                                        IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
    92                                        devPlaygroundMMIOWrite, devPlaygroundMMIORead, "PG-BAR0");
    93             break;
    9490        case 2:
    95             rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL,
    96                                        IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
    97                                        devPlaygroundMMIOWrite, devPlaygroundMMIORead, "PG-BAR2");
    98             break;
     91            Assert(enmType == (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64));
     92            return PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress);
     93
    9994        default:
    10095            /* We should never get here */
    101             AssertMsgFailed(("Invalid PCI region param in map callback"));
    102             rc = VERR_INTERNAL_ERROR;
     96            AssertMsgFailedReturn(("Invalid PCI region param in map callback"), VERR_INTERNAL_ERROR);
    10397    }
    104     return rc;
    105 
    10698}
    10799
     
    153145    if (RT_FAILURE(rc))
    154146        return rc;
     147    /* First region. */
    155148    rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, 8*_1G64,
    156149                                      (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64),
    157150                                      devPlaygroundMap);
    158     if (RT_FAILURE(rc))
    159         return rc;
    160     rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, 8*_1G64,
     151    AssertLogRelRCReturn(rc, rc);
     152    rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, 8*_1G64, IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, "PG-BAR0",
     153                                    NULL /*pvUser*/,  devPlaygroundMMIOWrite, devPlaygroundMMIORead, NULL /*pfnFill*/,
     154                                    NIL_RTR0PTR /*pvUserR0*/, NULL /*pszWriteR0*/, NULL /*pszReadR0*/, NULL /*pszFillR0*/,
     155                                    NIL_RTRCPTR /*pvUserRC*/, NULL /*pszWriteRC*/, NULL /*pszReadRC*/, NULL /*pszFillRC*/);
     156    AssertLogRelRCReturn(rc, rc);
     157
     158    /* Second region. */
     159    rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, 64*_1G64,
    161160                                      (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64),
    162161                                      devPlaygroundMap);
    163     if (RT_FAILURE(rc))
    164         return rc;
     162    AssertLogRelRCReturn(rc, rc);
     163    rc = PDMDevHlpMMIOExPreRegister(pDevIns, 2, 64*_1G64, IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, "PG-BAR2",
     164                                    NULL /*pvUser*/,  devPlaygroundMMIOWrite, devPlaygroundMMIORead, NULL /*pfnFill*/,
     165                                    NIL_RTR0PTR /*pvUserR0*/, NULL /*pszWriteR0*/, NULL /*pszReadR0*/, NULL /*pszFillR0*/,
     166                                    NIL_RTRCPTR /*pvUserRC*/, NULL /*pszWriteRC*/, NULL /*pszReadRC*/, NULL /*pszFillRC*/);
     167    AssertLogRelRCReturn(rc, rc);
    165168
    166169    return VINF_SUCCESS;
  • trunk/src/VBox/Devices/VMMDev/VMMDev.cpp

    r63690 r64115  
    28972897            pThis->GCPhysVMMDevRAM = GCPhysAddress;
    28982898            Assert(pThis->GCPhysVMMDevRAM == GCPhysAddress);
    2899             rc = PDMDevHlpMMIO2Map(pPciDev->pDevIns, iRegion, GCPhysAddress);
     2899            rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress);
    29002900        }
    29012901        else
     
    29192919            pThis->GCPhysVMMDevHeap = GCPhysAddress;
    29202920            Assert(pThis->GCPhysVMMDevHeap == GCPhysAddress);
    2921             rc = PDMDevHlpMMIO2Map(pPciDev->pDevIns, iRegion, GCPhysAddress);
     2921            rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress);
    29222922            if (RT_SUCCESS(rc))
    29232923                rc = PDMDevHlpRegisterVMMDevHeap(pPciDev->pDevIns, GCPhysAddress, pThis->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r63465 r64115  
    123123
    124124
     125/**
     126 * Creates an physical access handler.
     127 *
     128 * @returns VBox status code.
     129 * @retval  VINF_SUCCESS when successfully installed.
     130 * @retval  VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
     131 *          the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
     132 *          flagged together with a pool clearing.
     133 * @retval  VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
     134 *          one. A debug assertion is raised.
     135 *
     136 * @param   pVM             The cross context VM structure.
     137 * @param   hType           The handler type registration handle.
     138 * @param   pvUserR3        User argument to the R3 handler.
     139 * @param   pvUserR0        User argument to the R0 handler.
     140 * @param   pvUserRC        User argument to the RC handler. This can be a value
     141 *                          less that 0x10000 or a (non-null) pointer that is
     142 *                          automatically relocated.
     143 * @param   pszDesc         Description of this handler.  If NULL, the type
     144 *                          description will be used instead.
     145 * @param   ppPhysHandler   Where to return the access handler structure on
     146 *                          success.
     147 */
     148int pgmHandlerPhysicalExCreate(PVM pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC,
     149                               R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
     150{
     151    PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType);
     152    Log(("pgmHandlerPhysicalExCreate: pvUserR3=%RHv pvUserR0=%RHv pvUserGC=%RRv hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
     153         pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
     154
     155    /*
     156     * Validate input.
     157     */
     158    AssertPtr(ppPhysHandler);
     159    AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE);
     160    AssertMsgReturn(    (RTRCUINTPTR)pvUserRC < 0x10000
     161                    ||  MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
     162                    ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
     163                    VERR_INVALID_PARAMETER);
     164    AssertMsgReturn(    (RTR0UINTPTR)pvUserR0 < 0x10000
     165                    ||  MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
     166                    ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
     167                    VERR_INVALID_PARAMETER);
     168
     169    /*
     170     * Allocate and initialize the new entry.
     171     */
     172    PPGMPHYSHANDLER pNew;
     173    int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
     174    if (RT_SUCCESS(rc))
     175    {
     176        pNew->Core.Key      = NIL_RTGCPHYS;
     177        pNew->Core.KeyLast  = NIL_RTGCPHYS;
     178        pNew->cPages        = 0;
     179        pNew->cAliasedPages = 0;
     180        pNew->cTmpOffPages  = 0;
     181        pNew->pvUserR3      = pvUserR3;
     182        pNew->pvUserR0      = pvUserR0;
     183        pNew->pvUserRC      = pvUserRC;
     184        pNew->hType         = hType;
     185        pNew->pszDesc       = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc;
     186        pgmHandlerPhysicalTypeRetain(pVM, pType);
     187        *ppPhysHandler = pNew;
     188        return VINF_SUCCESS;
     189    }
     190
     191    return rc;
     192}
     193
     194
     195/**
     196 * Register a access handler for a physical range.
     197 *
     198 * @returns VBox status code.
     199 * @retval  VINF_SUCCESS when successfully installed.
     200 * @retval  VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
     201 *          the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
     202 *          flagged together with a pool clearing.
     203 * @retval  VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
     204 *          one. A debug assertion is raised.
     205 *
     206 * @param   pVM             The cross context VM structure.
     207 * @param   pPhysHandler    The physical handler.
     208 * @param   GCPhys          Start physical address.
     209 * @param   GCPhysLast      Last physical address. (inclusive)
     210 */
     211int pgmHandlerPhysicalExRegister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
     212{
     213    /*
     214     * Validate input.
     215     */
     216    AssertPtr(pPhysHandler);
     217#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
     218    PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, pPhysHandler->hType);
     219    Assert(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC);
     220    Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
     221         GCPhys, GCPhysLast, pPhysHandler->hType, pType->enmKind, R3STRING(pType->pszDesc), pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
     222#endif
     223    AssertReturn(pPhysHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
     224
     225    AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
     226    switch (pType->enmKind)
     227    {
     228        case PGMPHYSHANDLERKIND_WRITE:
     229            break;
     230        case PGMPHYSHANDLERKIND_MMIO:
     231        case PGMPHYSHANDLERKIND_ALL:
     232            /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
     233            AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
     234            AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
     235            break;
     236        default:
     237            AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
     238            return VERR_INVALID_PARAMETER;
     239    }
     240
     241    /*
     242     * We require the range to be within registered ram.
     243     * There is no apparent need to support ranges which cover more than one ram range.
     244     */
     245    PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
     246    if (   !pRam
     247        || GCPhysLast < pRam->GCPhys
     248        || GCPhys > pRam->GCPhysLast)
     249    {
     250#ifdef IN_RING3
     251        DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
     252#endif
     253        AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
     254        return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
     255    }
     256
     257    /*
     258     * Try insert into list.
     259     */
     260    pPhysHandler->Core.Key     = GCPhys;
     261    pPhysHandler->Core.KeyLast = GCPhysLast;
     262    pPhysHandler->cPages       = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
     263
     264    pgmLock(pVM);
     265    if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core))
     266    {
     267        int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam);
     268        if (rc == VINF_PGM_SYNC_CR3)
     269            rc = VINF_PGM_GCPHYS_ALIASED;
     270        pgmUnlock(pVM);
     271
     272#ifdef VBOX_WITH_REM
     273# ifndef IN_RING3
     274        REMNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
     275# else
     276        REMR3NotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
     277# endif
     278#endif
     279        if (rc != VINF_SUCCESS)
     280            Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
     281        return rc;
     282    }
     283    pgmUnlock(pVM);
     284
     285    pPhysHandler->Core.Key     = NIL_RTGCPHYS;
     286    pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
     287
     288#if defined(IN_RING3) && defined(VBOX_STRICT)
     289    DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
     290#endif
     291    AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
     292                     GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
     293    return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
     294}
     295
    125296
    126297/**
     
    154325         GCPhys, GCPhysLast, pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
    155326
    156     /*
    157      * Validate input.
    158      */
    159     AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE);
    160     AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
    161     switch (pType->enmKind)
    162     {
    163         case PGMPHYSHANDLERKIND_WRITE:
    164             break;
    165         case PGMPHYSHANDLERKIND_MMIO:
    166         case PGMPHYSHANDLERKIND_ALL:
    167             /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
    168             AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
    169             AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
    170             break;
    171         default:
    172             AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
    173             return VERR_INVALID_PARAMETER;
    174     }
    175     AssertMsgReturn(    (RTRCUINTPTR)pvUserRC < 0x10000
    176                     ||  MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
    177                     ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
    178                     VERR_INVALID_PARAMETER);
    179     AssertMsgReturn(    (RTR0UINTPTR)pvUserR0 < 0x10000
    180                     ||  MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
    181                     ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
    182                     VERR_INVALID_PARAMETER);
    183 
    184     /*
    185      * We require the range to be within registered ram.
    186      * There is no apparent need to support ranges which cover more than one ram range.
    187      */
    188     PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
    189     if (   !pRam
    190         || GCPhysLast < pRam->GCPhys
    191         || GCPhys > pRam->GCPhysLast)
    192     {
    193 #ifdef IN_RING3
    194         DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
    195 #endif
    196         AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
    197         return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
    198     }
    199 
    200     /*
    201      * Allocate and initialize the new entry.
    202      */
    203327    PPGMPHYSHANDLER pNew;
    204     int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
    205     if (RT_FAILURE(rc))
    206         return rc;
    207 
    208     pNew->Core.Key      = GCPhys;
    209     pNew->Core.KeyLast  = GCPhysLast;
    210     pNew->cPages        = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
    211     pNew->cAliasedPages = 0;
    212     pNew->cTmpOffPages  = 0;
    213     pNew->pvUserR3      = pvUserR3;
    214     pNew->pvUserR0      = pvUserR0;
    215     pNew->pvUserRC      = pvUserRC;
    216     pNew->hType         = hType;
    217     pNew->pszDesc       = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc;
    218     pgmHandlerPhysicalTypeRetain(pVM, pType);
    219 
    220     pgmLock(pVM);
    221 
    222     /*
    223      * Try insert into list.
    224      */
    225     if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
    226     {
    227         rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
    228         if (rc == VINF_PGM_SYNC_CR3)
    229             rc = VINF_PGM_GCPHYS_ALIASED;
    230         pgmUnlock(pVM);
    231 #ifdef VBOX_WITH_REM
    232 # ifndef IN_RING3
    233         REMNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
    234 # else
    235         REMR3NotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
    236 # endif
    237 #endif
    238         if (rc != VINF_SUCCESS)
    239             Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
    240         return rc;
    241     }
    242 
    243     pgmUnlock(pVM);
    244 
    245 #if defined(IN_RING3) && defined(VBOX_STRICT)
    246     DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
    247 #endif
    248     AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
    249                      GCPhys, GCPhysLast, R3STRING(pszDesc), R3STRING(pType->pszDesc)));
    250     pgmHandlerPhysicalTypeRelease(pVM, pType);
    251     MMHyperFree(pVM, pNew);
    252     return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
     328    int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pNew);
     329    if (RT_SUCCESS(rc))
     330    {
     331        rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
     332        if (RT_SUCCESS(rc))
     333            return rc;
     334        pgmHandlerPhysicalExDestroy(pVM, pNew);
     335    }
     336    return rc;
    253337}
    254338
     
    313397
    314398/**
    315  * Register a physical page access handler.
     399 * Deregister a physical page access handler.
    316400 *
    317401 * @returns VBox status code.
    318  * @param   pVM         The cross context VM structure.
    319  * @param   GCPhys      Start physical address.
    320  */
    321 VMMDECL(int)  PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
    322 {
    323     /*
    324      * Find the handler.
     402 * @param   pVM             The cross context VM structure.
     403 * @param   pPhysHandler    The handler to deregister (but not free).
     404 */
     405int pgmHandlerPhysicalExDeregister(PVM pVM, PPGMPHYSHANDLER pPhysHandler)
     406{
     407    LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
     408             pPhysHandler->Core.Key, pPhysHandler->Core.KeyLast, R3STRING(pPhysHandler->pszDesc)));
     409    AssertReturn(pPhysHandler->Core.Key != NIL_RTGCPHYS, VERR_PGM_HANDLER_NOT_FOUND);
     410
     411    /*
     412     * Remove the handler from the tree.
    325413     */
    326414    pgmLock(pVM);
    327     PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
    328     if (pCur)
    329     {
    330         LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n", pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
    331 
     415    PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
     416                                                                    pPhysHandler->Core.Key);
     417    if (pRemoved == pPhysHandler)
     418    {
    332419        /*
    333420         * Clear the page bits, notify the REM about this change and clear
    334421         * the cache.
    335422         */
    336         pgmHandlerPhysicalResetRamFlags(pVM, pCur);
    337         pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
     423        pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
     424        pgmHandlerPhysicalDeregisterNotifyREM(pVM, pPhysHandler);
    338425        pVM->pgm.s.pLastPhysHandlerR0 = 0;
    339426        pVM->pgm.s.pLastPhysHandlerR3 = 0;
    340427        pVM->pgm.s.pLastPhysHandlerRC = 0;
    341         PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
    342         MMHyperFree(pVM, pCur);
     428
     429        pPhysHandler->Core.Key     = NIL_RTGCPHYS;
     430        pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
     431
    343432        pgmUnlock(pVM);
     433
    344434        return VINF_SUCCESS;
    345435    }
     436
     437    /*
     438     * Both of the failure conditions here are considered internal processing
     439     * errors because they can only be caused by race conditions or corruption.
     440     * If we ever need to handle concurrent deregistration, we have to move
     441     * the NIL_RTGCPHYS check inside the PGM lock.
     442     */
     443    if (pRemoved)
     444        RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pRemoved->Core);
     445
     446    pgmUnlock(pVM);
     447
     448    if (!pRemoved)
     449        AssertMsgFailed(("Didn't find range starting at %RGp in the tree!\n", pPhysHandler->Core.Key));
     450    else
     451        AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
     452                         pPhysHandler->Core.Key, pRemoved, pPhysHandler));
     453    return VERR_PGM_HANDLER_IPE_1;
     454}
     455
     456
     457/**
     458 * Destroys (frees) a physical handler.
     459 *
     460 * The caller must deregister it before destroying it!
     461 *
     462 * @returns VBox status code.
     463 * @param   pVM         The cross context VM structure.
     464 * @param   pHandler    The handler to free.  NULL if ignored.
     465 */
     466int pgmHandlerPhysicalExDestroy(PVM pVM, PPGMPHYSHANDLER pHandler)
     467{
     468    if (pHandler)
     469    {
     470        AssertPtr(pHandler);
     471        AssertReturn(pHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
     472        PGMHandlerPhysicalTypeRelease(pVM, pHandler->hType);
     473        MMHyperFree(pVM, pHandler);
     474    }
     475    return VINF_SUCCESS;
     476}
     477
     478
     479/**
     480 * Deregister a physical page access handler.
     481 *
     482 * @returns VBox status code.
     483 * @param   pVM         The cross context VM structure.
     484 * @param   GCPhys      Start physical address.
     485 */
     486VMMDECL(int)  PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
     487{
     488    /*
     489     * Find the handler.
     490     */
     491    pgmLock(pVM);
     492    PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
     493    if (pRemoved)
     494    {
     495        LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
     496                 pRemoved->Core.Key, pRemoved->Core.KeyLast, R3STRING(pRemoved->pszDesc)));
     497
     498        /*
     499         * Clear the page bits, notify the REM about this change and clear
     500         * the cache.
     501         */
     502        pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
     503        pgmHandlerPhysicalDeregisterNotifyREM(pVM, pRemoved);
     504        pVM->pgm.s.pLastPhysHandlerR0 = 0;
     505        pVM->pgm.s.pLastPhysHandlerR3 = 0;
     506        pVM->pgm.s.pLastPhysHandlerRC = 0;
     507
     508        pgmUnlock(pVM);
     509
     510        pRemoved->Core.Key = NIL_RTGCPHYS;
     511        pgmHandlerPhysicalExDestroy(pVM, pRemoved);
     512        return VINF_SUCCESS;
     513    }
     514
    346515    pgmUnlock(pVM);
    347516
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r62606 r64115  
    12291229                               pPage->s.idPage, pPage->s.uStateY),
    12301230                              VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
    1231         PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
     1231        PPGMREGMMIORANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
    12321232        AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
    12331233        AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
  • trunk/src/VBox/VMM/VMMR3/GIM.cpp

    r63648 r64115  
    634634     * Map the MMIO2 region over the specified guest-physical address.
    635635     */
    636     int rc = PDMDevHlpMMIO2Map(pDevIns, pRegion->iRegion, GCPhysRegion);
     636    int rc = PDMDevHlpMMIOExMap(pDevIns, pRegion->iRegion, GCPhysRegion);
    637637    if (RT_SUCCESS(rc))
    638638    {
  • trunk/src/VBox/VMM/VMMR3/IOM.cpp

    r63682 r64115  
    16541654
    16551655/**
     1656 * Pre-Registers a MMIO region.
     1657 *
     1658 * The rest of of the manipulation of this region goes thru the PGMPhysMMIOEx*
     1659 * APIs: PGMR3PhysMMIOExMap, PGMR3PhysMMIOExUnmap, PGMR3PhysMMIOExDeregister
     1660 *
     1661 * @returns VBox status code.
     1662 * @param   pVM                 Pointer to the cross context VM structure.
     1663 * @param   pDevIns             The device.
     1664 * @param   iRegion             The region number.
     1665 * @param   cbRegion            The size of the MMIO region.  Must be a multiple
     1666 *                              of X86_PAGE_SIZE
     1667 * @param   fFlags              Flags, see IOMMMIO_FLAGS_XXX.
     1668 * @param   pszDesc             Pointer to description string. This must not be
     1669 *                              freed.
     1670 * @param   pvUserR3            Ring-3 user pointer.
     1671 * @param   pfnWriteCallbackR3  Callback for handling writes, ring-3. Mandatory.
     1672 * @param   pfnReadCallbackR3   Callback for handling reads, ring-3. Mandatory.
     1673 * @param   pfnFillCallbackR3   Callback for handling fills, ring-3. Optional.
     1674 * @param   pvUserR0            Ring-0 user pointer.
     1675 * @param   pfnWriteCallbackR0  Callback for handling writes, ring-0. Optional.
     1676 * @param   pfnReadCallbackR0   Callback for handling reads, ring-0. Optional.
     1677 * @param   pfnFillCallbackR0   Callback for handling fills, ring-0. Optional.
     1678 * @param   pvUserRC            Raw-mode context user pointer.  This will be
     1679 *                              relocated with the hypervisor guest mapping if
     1680 *                              the unsigned integer value is 0x10000 or above.
     1681 * @param   pfnWriteCallbackRC  Callback for handling writes, RC. Optional.
     1682 * @param   pfnReadCallbackRC   Callback for handling reads, RC. Optional.
     1683 * @param   pfnFillCallbackRC   Callback for handling fills, RC. Optional.
     1684 */
     1685VMMR3_INT_DECL(int)  IOMR3MmioExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion,
     1686                                            uint32_t fFlags, const char *pszDesc,
     1687                                            RTR3PTR pvUserR3,
     1688                                            R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR3,
     1689                                            R3PTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackR3,
     1690                                            R3PTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackR3,
     1691                                            RTR0PTR pvUserR0,
     1692                                            R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR0,
     1693                                            R0PTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackR0,
     1694                                            R0PTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackR0,
     1695                                            RTRCPTR pvUserRC,
     1696                                            RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackRC,
     1697                                            RCPTRTYPE(PFNIOMMMIOREAD)  pfnReadCallbackRC,
     1698                                            RCPTRTYPE(PFNIOMMMIOFILL)  pfnFillCallbackRC)
     1699{
     1700    LogFlow(("IOMR3MmioExPreRegister: pDevIns=%p iRegion=%u cbRegion=%RGp fFlags=%#x pszDesc=%s\n"
     1701             "                        pvUserR3=%RHv pfnWriteCallbackR3=%RHv pfnReadCallbackR3=%RHv pfnFillCallbackR3=%RHv\n"
     1702             "                        pvUserR0=%RHv pfnWriteCallbackR0=%RHv pfnReadCallbackR0=%RHv pfnFillCallbackR0=%RHv\n"
     1703             "                        pvUserRC=%RRv pfnWriteCallbackRC=%RRv pfnReadCallbackRC=%RRv pfnFillCallbackRC=%RRv\n",
     1704             pDevIns, iRegion, cbRegion, fFlags, pszDesc,
     1705             pvUserR3, pfnWriteCallbackR3, pfnReadCallbackR3, pfnFillCallbackR3,
     1706             pvUserR0, pfnWriteCallbackR0, pfnReadCallbackR0, pfnFillCallbackR0,
     1707             pvUserRC, pfnWriteCallbackRC, pfnReadCallbackRC, pfnFillCallbackRC));
     1708
     1709    /*
     1710     * Validate input.
     1711     */
     1712    AssertReturn(cbRegion > 0,  VERR_INVALID_PARAMETER);
     1713    AssertReturn(RT_ALIGN_T(cbRegion, X86_PAGE_SIZE, RTGCPHYS), VERR_INVALID_PARAMETER);
     1714    AssertMsgReturn(   !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
     1715                    && (fFlags & IOMMMIO_FLAGS_READ_MODE)  <= IOMMMIO_FLAGS_READ_DWORD_QWORD
     1716                    && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
     1717                    ("%#x\n", fFlags),
     1718                    VERR_INVALID_PARAMETER);
     1719    AssertPtrReturn(pfnWriteCallbackR3, VERR_INVALID_POINTER);
     1720    AssertPtrReturn(pfnReadCallbackR3, VERR_INVALID_POINTER);
     1721
     1722    /*
     1723     * Allocate new range record and initialize it.
     1724     */
     1725    PIOMMMIORANGE pRange;
     1726    int rc = MMHyperAlloc(pVM, sizeof(*pRange), 0, MM_TAG_IOM, (void **)&pRange);
     1727    if (RT_SUCCESS(rc))
     1728    {
     1729        pRange->Core.Key            = NIL_RTGCPHYS;
     1730        pRange->Core.KeyLast        = NIL_RTGCPHYS;
     1731        pRange->GCPhys              = NIL_RTGCPHYS;
     1732        pRange->cb                  = cbRegion;
     1733        pRange->cRefs               = 1; /* The PGM reference. */
     1734        pRange->fFlags              = fFlags;
     1735
     1736        pRange->pvUserR3            = pvUserR3;
     1737        pRange->pDevInsR3           = pDevIns;
     1738        pRange->pfnReadCallbackR3   = pfnReadCallbackR3;
     1739        pRange->pfnWriteCallbackR3  = pfnWriteCallbackR3;
     1740        pRange->pfnFillCallbackR3   = pfnFillCallbackR3;
     1741        pRange->pszDesc             = pszDesc;
     1742
     1743        if (pfnReadCallbackR0 || pfnWriteCallbackR0 || pfnFillCallbackR0)
     1744        {
     1745            pRange->pvUserR0            = pvUserR0;
     1746            pRange->pDevInsR0           = MMHyperCCToR0(pVM, pDevIns);
     1747            pRange->pfnReadCallbackR0   = pfnReadCallbackR0;
     1748            pRange->pfnWriteCallbackR0  = pfnWriteCallbackR0;
     1749            pRange->pfnFillCallbackR0   = pfnFillCallbackR0;
     1750        }
     1751
     1752        if (pfnReadCallbackRC || pfnWriteCallbackRC || pfnFillCallbackRC)
     1753        {
     1754            pRange->pvUserRC            = pvUserRC;
     1755            pRange->pDevInsRC           = MMHyperCCToRC(pVM, pDevIns);
     1756            pRange->pfnReadCallbackRC   = pfnReadCallbackRC;
     1757            pRange->pfnWriteCallbackRC  = pfnWriteCallbackRC;
     1758            pRange->pfnFillCallbackRC   = pfnFillCallbackRC;
     1759        }
     1760
     1761        /*
     1762         * Try register it with PGM.  PGM will call us back when it's mapped in
     1763         * and out of the guest address space, and once it's destroyed.
     1764         */
     1765        rc = PGMR3PhysMMIOExPreRegister(pVM, pDevIns, iRegion, cbRegion, pVM->iom.s.hMmioHandlerType,
     1766                                        pRange, MMHyperR3ToR0(pVM, pRange), MMHyperR3ToRC(pVM, pRange), pszDesc);
     1767        if (RT_SUCCESS(rc))
     1768            return VINF_SUCCESS;
     1769
     1770        MMHyperFree(pVM, pRange);
     1771    }
     1772    if (pDevIns->iInstance > 0)
     1773        MMR3HeapFree((void *)pszDesc);
     1774    return rc;
     1775
     1776}
     1777
     1778
     1779/**
     1780 * Notfication from PGM that the pre-registered MMIO region has been mapped into
     1781 * user address space.
     1782 *
     1783 * @returns VBox status code.
     1784 * @param   pVM             Pointer to the cross context VM structure.
     1785 * @param   pvUser          The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
     1786 * @param   GCPhys          The mapping address.
     1787 * @remarks Called while owning the PGM lock.
     1788 */
     1789VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)
     1790{
     1791    PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
     1792    AssertReturn(pRange->GCPhys == NIL_RTGCPHYS, VERR_IOM_MMIO_IPE_1);
     1793
     1794    IOM_LOCK_EXCL(pVM);
     1795    Assert(pRange->GCPhys == NIL_RTGCPHYS);
     1796    pRange->GCPhys       = GCPhys;
     1797    pRange->Core.Key     = GCPhys;
     1798    pRange->Core.KeyLast = GCPhys + pRange->cb - 1;
     1799    if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))
     1800    {
     1801        iomR3FlushCache(pVM);
     1802        IOM_UNLOCK_EXCL(pVM);
     1803        return VINF_SUCCESS;
     1804    }
     1805    IOM_UNLOCK_EXCL(pVM);
     1806
     1807    AssertLogRelMsgFailed(("RTAvlroGCPhysInsert failed on %RGp..%RGp - %s\n", pRange->Core.Key, pRange->Core.KeyLast, pRange->pszDesc));
     1808    pRange->GCPhys       = NIL_RTGCPHYS;
     1809    pRange->Core.Key     = NIL_RTGCPHYS;
     1810    pRange->Core.KeyLast = NIL_RTGCPHYS;
     1811    return VERR_IOM_MMIO_IPE_2;
     1812}
     1813
     1814
     1815/**
     1816 * Notfication from PGM that the pre-registered MMIO region has been unmapped
     1817 * from user address space.
     1818 *
     1819 * @param   pVM             Pointer to the cross context VM structure.
     1820 * @param   pvUser          The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
     1821 * @param   GCPhys          The mapping address.
     1822 * @remarks Called while owning the PGM lock.
     1823 */
     1824VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)
     1825{
     1826    PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
     1827    AssertLogRelReturnVoid(pRange->GCPhys == GCPhys);
     1828
     1829    IOM_LOCK_EXCL(pVM);
     1830    Assert(pRange->GCPhys == GCPhys);
     1831    PIOMMMIORANGE pRemoved = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);
     1832    if (pRemoved == pRange)
     1833    {
     1834        pRange->GCPhys       = NIL_RTGCPHYS;
     1835        pRange->Core.Key     = NIL_RTGCPHYS;
     1836        pRange->Core.KeyLast = NIL_RTGCPHYS;
     1837        iomR3FlushCache(pVM);
     1838        IOM_UNLOCK_EXCL(pVM);
     1839    }
     1840    else
     1841    {
     1842        if (pRemoved)
     1843            RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRemoved->Core);
     1844        IOM_UNLOCK_EXCL(pVM);
     1845        AssertLogRelMsgFailed(("RTAvlroGCPhysRemove returned %p instead of %p for %RGp (%s)\n", pRemoved, pRange, pRange->pszDesc));
     1846    }
     1847}
     1848
     1849
     1850/**
     1851 * Notfication from PGM that the pre-registered MMIO region has been mapped into
     1852 * user address space.
     1853 *
     1854 * @param   pVM             Pointer to the cross context VM structure.
     1855 * @param   pvUser          The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
     1856 * @param   GCPhys          The mapping address.
     1857 * @remarks Called while owning the PGM lock.
     1858 */
     1859VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser)
     1860{
     1861    PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
     1862    AssertLogRelReturnVoid(pRange->GCPhys == NIL_RTGCPHYS);
     1863    iomMmioReleaseRange(pVM, pRange);
     1864}
     1865
     1866
     1867/**
    16561868 * Handles the unlikely and probably fatal merge cases.
    16571869 *
  • trunk/src/VBox/VMM/VMMR3/PDM.cpp

    r62643 r64115  
    739739        pdmR3ThreadDestroyDevice(pVM, pDevIns);
    740740        PDMR3QueueDestroyDevice(pVM, pDevIns);
    741         PGMR3PhysMMIO2Deregister(pVM, pDevIns, UINT32_MAX);
     741        PGMR3PhysMMIOExDeregister(pVM, pDevIns, UINT32_MAX);
    742742#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
    743743        pdmR3AsyncCompletionTemplateDestroyDevice(pVM, pDevIns);
  • trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp

    r63685 r64115  
    444444
    445445/**
    446  * @copydoc PDMDEVHLPR3::pfnMMIO2Deregister
     446 * @interface_method_impl{PDMDEVHLPR3,pfnMMIOExPreRegister}
    447447 */
    448 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion)
     448static DECLCALLBACK(int)
     449pdmR3DevHlp_MMIOExPreRegister(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion, uint32_t fFlags, const char *pszDesc,
     450                              RTHCPTR pvUser, PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill,
     451                              RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0,
     452                              RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC)
     453{
     454    PDMDEV_ASSERT_DEVINS(pDevIns);
     455    PVM pVM = pDevIns->Internal.s.pVMR3;
     456    VM_ASSERT_EMT(pVM);
     457    LogFlow(("pdmR3DevHlp_MMIOExPreRegister: caller='%s'/%d: iRegion=%#x cbRegion=%#RGp fFlags=%RX32 pszDesc=%p:{%s}\n"
     458             "                               pvUser=%p pfnWrite=%p pfnRead=%p pfnFill=%p\n"
     459             "                               pvUserR0=%p pszWriteR0=%s pszReadR0=%s pszFillR0=%s\n"
     460             "                               pvUserRC=%p pszWriteRC=%s pszReadRC=%s pszFillRC=%s\n",
     461             pDevIns->pReg->szName, pDevIns->iInstance, iRegion, cbRegion, fFlags, pszDesc, pszDesc,
     462             pvUser, pfnWrite, pfnRead, pfnFill,
     463             pvUserR0, pszWriteR0, pszReadR0, pszFillR0,
     464             pvUserRC, pszWriteRC, pszReadRC, pszFillRC));
     465
     466    /*
     467     * Resolve the functions.
     468     */
     469    AssertLogRelReturn(   (!pszWriteR0 && !pszReadR0 && !pszFillR0)
     470                       || (pDevIns->pReg->szR0Mod[0] && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0)),
     471                       VERR_INVALID_PARAMETER);
     472    AssertLogRelReturn(   (!pszWriteRC && !pszReadRC && !pszFillRC)
     473                       || (pDevIns->pReg->szRCMod[0] && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)),
     474                       VERR_INVALID_PARAMETER);
     475
     476    /* Ring-0 */
     477    int rc;
     478    R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteR0 = 0;
     479    if (pszWriteR0)
     480    {
     481        rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszWriteR0, &pfnWriteR0);
     482        AssertLogRelMsgRCReturn(rc, ("pszWriteR0=%s rc=%Rrc\n", pszWriteR0, rc), rc);
     483    }
     484
     485    R0PTRTYPE(PFNIOMMMIOREAD) pfnReadR0 = 0;
     486    if (pszReadR0)
     487    {
     488        rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszReadR0, &pfnReadR0);
     489        AssertLogRelMsgRCReturn(rc, ("pszReadR0=%s rc=%Rrc\n", pszReadR0, rc), rc);
     490    }
     491    R0PTRTYPE(PFNIOMMMIOFILL) pfnFillR0 = 0;
     492    if (pszFillR0)
     493    {
     494        rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszFillR0, &pfnFillR0);
     495        AssertLogRelMsgRCReturn(rc, ("pszFillR0=%s rc=%Rrc\n", pszFillR0, rc), rc);
     496    }
     497
     498    /* Raw-mode */
     499    rc = VINF_SUCCESS;
     500    RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteRC = 0;
     501    if (pszWriteRC)
     502    {
     503        rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszWriteRC, &pfnWriteRC);
     504        AssertLogRelMsgRCReturn(rc, ("pszWriteRC=%s rc=%Rrc\n", pszWriteRC, rc), rc);
     505    }
     506
     507    RCPTRTYPE(PFNIOMMMIOREAD) pfnReadRC = 0;
     508    if (pszReadRC)
     509    {
     510        rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszReadRC, &pfnReadRC);
     511        AssertLogRelMsgRCReturn(rc, ("pszReadRC=%s rc=%Rrc\n", pszReadRC, rc), rc);
     512    }
     513    RCPTRTYPE(PFNIOMMMIOFILL) pfnFillRC = 0;
     514    if (pszFillRC)
     515    {
     516        rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszFillRC, &pfnFillRC);
     517        AssertLogRelMsgRCReturn(rc, ("pszFillRC=%s rc=%Rrc\n", pszFillRC, rc), rc);
     518    }
     519
     520    /*
     521     * Call IOM to make the registration.
     522     */
     523    rc = IOMR3MmioExPreRegister(pVM, pDevIns, iRegion, cbRegion, fFlags, pszDesc,
     524                                pvUser,   pfnWrite,   pfnRead,   pfnFill,
     525                                pvUserR0, pfnWriteR0, pfnReadR0, pfnFillR0,
     526                                pvUserRC, pfnWriteRC, pfnReadRC, pfnFillRC);
     527
     528    LogFlow(("pdmR3DevHlp_MMIOExPreRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     529    return rc;
     530}
     531
     532
     533/**
     534 * @copydoc PDMDEVHLPR3::pfnMMIOExDeregister
     535 */
     536static DECLCALLBACK(int) pdmR3DevHlp_MMIOExDeregister(PPDMDEVINS pDevIns, uint32_t iRegion)
    449537{
    450538    PDMDEV_ASSERT_DEVINS(pDevIns);
    451539    VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
    452     LogFlow(("pdmR3DevHlp_MMIO2Deregister: caller='%s'/%d: iRegion=%#x\n",
     540    LogFlow(("pdmR3DevHlp_MMIOExDeregister: caller='%s'/%d: iRegion=%#x\n",
    453541             pDevIns->pReg->szName, pDevIns->iInstance, iRegion));
    454542
    455543    AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
    456544
    457     int rc = PGMR3PhysMMIO2Deregister(pDevIns->Internal.s.pVMR3, pDevIns, iRegion);
    458 
    459     LogFlow(("pdmR3DevHlp_MMIO2Deregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     545    int rc = PGMR3PhysMMIOExDeregister(pDevIns->Internal.s.pVMR3, pDevIns, iRegion);
     546
     547    LogFlow(("pdmR3DevHlp_MMIOExDeregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
    460548    return rc;
    461549}
     
    463551
    464552/**
    465  * @copydoc PDMDEVHLPR3::pfnMMIO2Map
     553 * @copydoc PDMDEVHLPR3::pfnMMIOExMap
    466554 */
    467 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     555static DECLCALLBACK(int) pdmR3DevHlp_MMIOExMap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    468556{
    469557    PDMDEV_ASSERT_DEVINS(pDevIns);
    470558    VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
    471     LogFlow(("pdmR3DevHlp_MMIO2Map: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",
     559    LogFlow(("pdmR3DevHlp_MMIOExMap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",
    472560             pDevIns->pReg->szName, pDevIns->iInstance, iRegion, GCPhys));
    473561
    474     int rc = PGMR3PhysMMIO2Map(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);
    475 
    476     LogFlow(("pdmR3DevHlp_MMIO2Map: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     562    int rc = PGMR3PhysMMIOExMap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);
     563
     564    LogFlow(("pdmR3DevHlp_MMIOExMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
    477565    return rc;
    478566}
     
    480568
    481569/**
    482  * @copydoc PDMDEVHLPR3::pfnMMIO2Unmap
     570 * @copydoc PDMDEVHLPR3::pfnMMIOExUnmap
    483571 */
    484 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     572static DECLCALLBACK(int) pdmR3DevHlp_MMIOExUnmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    485573{
    486574    PDMDEV_ASSERT_DEVINS(pDevIns);
    487575    VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
    488     LogFlow(("pdmR3DevHlp_MMIO2Unmap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",
     576    LogFlow(("pdmR3DevHlp_MMIOExUnmap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",
    489577             pDevIns->pReg->szName, pDevIns->iInstance, iRegion, GCPhys));
    490578
    491     int rc = PGMR3PhysMMIO2Unmap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);
    492 
    493     LogFlow(("pdmR3DevHlp_MMIO2Unmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
     579    int rc = PGMR3PhysMMIOExUnmap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);
     580
     581    LogFlow(("pdmR3DevHlp_MMIOExUnmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
    494582    return rc;
    495583}
     
    36213709    pdmR3DevHlp_MMIODeregister,
    36223710    pdmR3DevHlp_MMIO2Register,
    3623     pdmR3DevHlp_MMIO2Deregister,
    3624     pdmR3DevHlp_MMIO2Map,
    3625     pdmR3DevHlp_MMIO2Unmap,
     3711    pdmR3DevHlp_MMIOExDeregister,
     3712    pdmR3DevHlp_MMIOExMap,
     3713    pdmR3DevHlp_MMIOExUnmap,
    36263714    pdmR3DevHlp_MMHyperMapMMIO2,
    36273715    pdmR3DevHlp_MMIO2MapKernel,
     
    36993787    pdmR3DevHlp_VMGetSuspendReason,
    37003788    pdmR3DevHlp_VMGetResumeReason,
    3701     0,
     3789    pdmR3DevHlp_MMIOExPreRegister,
    37023790    0,
    37033791    0,
     
    38743962    pdmR3DevHlp_MMIODeregister,
    38753963    pdmR3DevHlp_MMIO2Register,
    3876     pdmR3DevHlp_MMIO2Deregister,
    3877     pdmR3DevHlp_MMIO2Map,
    3878     pdmR3DevHlp_MMIO2Unmap,
     3964    pdmR3DevHlp_MMIOExDeregister,
     3965    pdmR3DevHlp_MMIOExMap,
     3966    pdmR3DevHlp_MMIOExUnmap,
    38793967    pdmR3DevHlp_MMHyperMapMMIO2,
    38803968    pdmR3DevHlp_MMIO2MapKernel,
     
    39524040    pdmR3DevHlp_VMGetSuspendReason,
    39534041    pdmR3DevHlp_VMGetResumeReason,
    3954     0,
     4042    pdmR3DevHlp_MMIOExPreRegister,
    39554043    0,
    39564044    0,
  • trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp

    r62643 r64115  
    579579}
    580580
    581 /** @interface_method_impl{PDMPCIHLPR3,pfnIsMMIO2Base} */
     581/** @interface_method_impl{PDMPCIHLPR3,pfnIsMMIOExBase} */
    582582static DECLCALLBACK(bool) pdmR3PciHlp_IsMMIO2Base(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys)
    583583{
    584584    PDMDEV_ASSERT_DEVINS(pDevIns);
    585585    VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
    586     bool fRc = PGMR3PhysMMIO2IsBase(pDevIns->Internal.s.pVMR3, pOwner, GCPhys);
    587     Log4(("pdmR3PciHlp_IsMMIO2Base: pOwner=%p GCPhys=%RGp -> %RTbool\n", pOwner, GCPhys, fRc));
     586    bool fRc = PGMR3PhysMMIOExIsBase(pDevIns->Internal.s.pVMR3, pOwner, GCPhys);
     587    Log4(("pdmR3PciHlp_IsMMIOExBase: pOwner=%p GCPhys=%RGp -> %RTbool\n", pOwner, GCPhys, fRc));
    588588    return fRc;
    589589}
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r62644 r64115  
    23762376     * be mapped and thus not included in the above exercise.
    23772377     */
    2378     for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
     2378    for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
    23792379        if (!(pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING))
    23802380            pCur->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pCur->RamRange);
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r63560 r64115  
    14981498
    14991499/**
    1500  * Relocate a floating RAM range.
    1501  *
    1502  * @copydoc FNPGMRELOCATE
     1500 * @callbackmethodimpl{FNPGMRELOCATE, Relocate a floating RAM range.}
     1501 * @sa pgmR3PhysMMIO2ExRangeRelocate
    15031502 */
    15041503static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
     
    24452444 * @param   iRegion         The region.
    24462445 */
    2447 DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
    2448 {
    2449     /*
    2450      * Search the list.
    2451      */
    2452     for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
     2446DECLINLINE(PPGMREGMMIORANGE) pgmR3PhysMMIOExFind(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
     2447{
     2448    /*
     2449     * Search the list.  There shouldn't be many entries.
     2450     */
     2451    for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
    24532452        if (   pCur->pDevInsR3 == pDevIns
    24542453            && pCur->iRegion == iRegion)
     
    24592458
    24602459/**
    2461  * Allocate and register an MMIO2 region.
    2462  *
    2463  * As mentioned elsewhere, MMIO2 is just RAM spelled differently.  It's RAM
    2464  * associated with a device. It is also non-shared memory with a permanent
    2465  * ring-3 mapping and page backing (presently).
    2466  *
    2467  * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
    2468  * the VM, in which case we'll drop the base memory pages.  Presently we will
    2469  * make no attempt to preserve anything that happens to be present in the base
    2470  * memory that is replaced, this is of course incorrect but it's too much
    2471  * effort.
     2460 * @callbackmethodimpl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.}
     2461 * @sa pgmR3PhysRamRangeRelocate
     2462 */
     2463static DECLCALLBACK(bool) pgmR3PhysMMIOExRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
     2464                                                       PGMRELOCATECALL enmMode, void *pvUser)
     2465{
     2466    PPGMREGMMIORANGE pMmio = (PPGMREGMMIORANGE)pvUser;
     2467    Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
     2468    Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange)); RT_NOREF_PV(GCPtrOld);
     2469
     2470    switch (enmMode)
     2471    {
     2472        case PGMRELOCATECALL_SUGGEST:
     2473            return true;
     2474
     2475        case PGMRELOCATECALL_RELOCATE:
     2476        {
     2477            /*
     2478             * Update myself, then relink all the ranges and flush the RC TLB.
     2479             */
     2480            pgmLock(pVM);
     2481
     2482            pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange));
     2483
     2484            pgmR3PhysRelinkRamRanges(pVM);
     2485            for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
     2486                pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
     2487
     2488            pgmUnlock(pVM);
     2489            return true;
     2490        }
     2491
     2492        default:
     2493            AssertFailedReturn(false);
     2494    }
     2495}
     2496
     2497
     2498/**
     2499 * Worker for PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that allocates
     2500 * and the PGMREGMMIORANGE structure and does basic initialization.
     2501 *
     2502 * Caller must set type specfic members and initialize the PGMPAGE structures.
     2503 *
     2504 * @returns VBox status code.
     2505 * @param   pVM             The cross context VM structure.
     2506 * @param   pDevIns         The device instance owning the region.
     2507 * @param   iRegion         The region number.  If the MMIO2 memory is a PCI
     2508 *                          I/O region this number has to be the number of that
     2509 *                          region. Otherwise it can be any number safe
     2510 *                          UINT8_MAX.
     2511 * @param   cb              The size of the region.  Must be page aligned.
     2512 * @param   pszDesc         The description.
     2513 * @param   ppNew           Where to return the pointer to the registration.
     2514 *
     2515 * @thread  EMT
     2516 */
     2517static int pgmR3PhysMMIOExCreate(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, const char *pszDesc,
     2518                                 PPGMREGMMIORANGE *ppNew)
     2519{
     2520    /*
     2521     * We currently do a single RAM range for the whole thing.  This will
     2522     * probably have to change once someone needs really large MMIO regions,
     2523     * as we will be running into SUPR3PageAllocEx limitations and such.
     2524     */
     2525    const uint32_t   cPages  = cb >> X86_PAGE_SHIFT;
     2526    const size_t     cbRange = RT_OFFSETOF(PGMREGMMIORANGE, RamRange.aPages[cPages]);
     2527    PPGMREGMMIORANGE pNew    = NULL;
     2528    if (cb >= _2G)
     2529    {
     2530        /*
     2531         * Allocate memory for the registration structure.
     2532         */
     2533        size_t const cChunkPages  = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
     2534        size_t const cbChunk      = (1 + cChunkPages + 1) << PAGE_SHIFT;
     2535        AssertLogRelReturn(cbChunk == (uint32_t)cbChunk, VERR_OUT_OF_RANGE);
     2536        PSUPPAGE     paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
     2537        AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
     2538        RTR0PTR      R0PtrChunk   = NIL_RTR0PTR;
     2539        void        *pvChunk      = NULL;
     2540        int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
     2541#if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS)
     2542                                  &R0PtrChunk,
     2543#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
     2544                                  HMIsEnabled(pVM) ? &R0PtrChunk : NULL,
     2545#else
     2546                                  NULL,
     2547#endif
     2548                                  paChunkPages);
     2549        AssertLogRelMsgRCReturnStmt(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages), RTMemTmpFree(paChunkPages), rc);
     2550
     2551#if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS)
     2552        Assert(R0PtrChunk != NIL_RTR0PTR);
     2553#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
     2554        if (!HMIsEnabled(pVM))
     2555            R0PtrChunk = NIL_RTR0PTR;
     2556#else
     2557        R0PtrChunk = (uintptr_t)pvChunk;
     2558#endif
     2559        memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
     2560
     2561        pNew = (PPGMREGMMIORANGE)pvChunk;
     2562        pNew->RamRange.fFlags   = PGM_RAM_RANGE_FLAGS_FLOATING;
     2563        pNew->RamRange.pSelfR0  = R0PtrChunk + RT_OFFSETOF(PGMREGMMIORANGE, RamRange);
     2564
     2565        /*
     2566         * If we might end up in raw-mode, make a HMA mapping of the range,
     2567         * just like we do for memory above 4GB.
     2568         */
     2569        if (HMIsEnabled(pVM))
     2570            pNew->RamRange.pSelfRC  = NIL_RTRCPTR;
     2571        else
     2572        {
     2573            RTGCPTR         GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
     2574            RTGCPTR const   GCPtrChunk    = GCPtrChunkMap + PAGE_SIZE;
     2575            rc = PGMR3MapPT(pVM, GCPtrChunkMap, (uint32_t)cbChunk, 0 /*fFlags*/, pgmR3PhysMMIOExRangeRelocate, pNew, pszDesc);
     2576            if (RT_SUCCESS(rc))
     2577            {
     2578                pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
     2579
     2580                RTGCPTR GCPtrPage  = GCPtrChunk;
     2581                for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
     2582                    rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
     2583            }
     2584            if (RT_FAILURE(rc))
     2585            {
     2586                SUPR3PageFreeEx(pvChunk, cChunkPages);
     2587                return rc;
     2588            }
     2589            pNew->RamRange.pSelfRC  = GCPtrChunk + RT_OFFSETOF(PGMREGMMIORANGE, RamRange);
     2590        }
     2591    }
     2592    /*
     2593     * Not so big, do a one time hyper allocation.
     2594     */
     2595    else
     2596    {
     2597        int rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
     2598        AssertLogRelMsgRC(rc, ("cbRange=%zu\n", cbRange));
     2599
     2600        /*
     2601         * Initialize allocation specific items.
     2602         */
     2603        //pNew->RamRange.fFlags = 0;
     2604        pNew->RamRange.pSelfR0  = MMHyperCCToR0(pVM, &pNew->RamRange);
     2605        pNew->RamRange.pSelfRC  = MMHyperCCToRC(pVM, &pNew->RamRange);
     2606    }
     2607
     2608    /*
     2609     * Initialize the registration structure (caller does specific bits).
     2610     */
     2611    pNew->pDevInsR3             = pDevIns;
     2612    //pNew->pvR3                = NULL;
     2613    //pNew->pNext               = NULL;
     2614    //pNew->fMmio2              = false;
     2615    //pNew->fMapped             = false;
     2616    //pNew->fOverlapping        = false;
     2617    pNew->iRegion               = iRegion;
     2618    pNew->idSavedState          = UINT8_MAX;
     2619    pNew->idMmio2               = UINT8_MAX;
     2620    //pNew->pPhysHandlerR3      = NULL;
     2621    //pNew->paLSPages           = NULL;
     2622    pNew->RamRange.GCPhys       = NIL_RTGCPHYS;
     2623    pNew->RamRange.GCPhysLast   = NIL_RTGCPHYS;
     2624    pNew->RamRange.pszDesc      = pszDesc;
     2625    pNew->RamRange.cb           = cb;
     2626    pNew->RamRange.fFlags      |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX;
     2627    //pNew->RamRange.pvR3       = NULL;
     2628    //pNew->RamRange.paLSPages  = NULL;
     2629
     2630    *ppNew = pNew;
     2631    return VINF_SUCCESS;
     2632}
     2633
     2634
     2635/**
     2636 * Common worker PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that links
     2637 * a complete registration entry into the lists and lookup tables.
     2638 *
     2639 * @param   pVM             The cross context VM structure.
     2640 * @param   pNew            The new MMIO / MMIO2 registration to link.
     2641 */
     2642static void pgmR3PhysMMIOExLink(PVM pVM, PPGMREGMMIORANGE pNew)
     2643{
     2644    /*
     2645     * Link it into the list.
     2646     * Since there is no particular order, just push it.
     2647     */
     2648    pgmLock(pVM);
     2649
     2650    pNew->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
     2651    pVM->pgm.s.pRegMmioRangesR3 = pNew;
     2652
     2653    uint8_t idMmio2 = pNew->idMmio2;
     2654    if (idMmio2 != UINT8_MAX)
     2655    {
     2656        Assert(pNew->fMmio2);
     2657        Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
     2658        Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
     2659        pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
     2660        pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew);
     2661    }
     2662    else
     2663        Assert(!pNew->fMmio2);
     2664
     2665    pgmPhysInvalidatePageMapTLB(pVM);
     2666    pgmUnlock(pVM);
     2667}
     2668
     2669
     2670/**
     2671 * Allocate and pre-register an MMIO region.
     2672 *
     2673 * This is currently the way to deal with large MMIO regions.  It may in the
     2674 * future be extended to be the way we deal with all MMIO regions, but that
     2675 * means we'll have to do something about the simple list based approach we take
     2676 * to tracking the registrations.
    24722677 *
    24732678 * @returns VBox status code.
     
    24872692 *                          the memory.
    24882693 * @param   pszDesc         The description.
     2694 *
     2695 * @thread  EMT
     2696 *
     2697 * @sa      PGMR3PhysMMIORegister, PGMR3PhysMMIO2Register,
     2698 *          PGMR3PhysMMIOExMap, PGMR3PhysMMIOExUnmap, PGMR3PhysMMIOExDeregister.
     2699 */
     2700VMMR3DECL(int) PGMR3PhysMMIOExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
     2701                                          RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc)
     2702{
     2703    /*
     2704     * Validate input.
     2705     */
     2706    VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
     2707    AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
     2708    AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
     2709    AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
     2710    AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
     2711    AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
     2712    AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
     2713    AssertReturn(cb, VERR_INVALID_PARAMETER);
     2714
     2715    const uint32_t cPages = cb >> PAGE_SHIFT;
     2716    AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
     2717    AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE);
     2718
     2719    /*
     2720     * For the 2nd+ instance, mangle the description string so it's unique.
     2721     */
     2722    if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
     2723    {
     2724        pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
     2725        if (!pszDesc)
     2726            return VERR_NO_MEMORY;
     2727    }
     2728
     2729    /*
     2730     * Register the MMIO callbacks.
     2731     */
     2732    PPGMPHYSHANDLER pPhysHandler;
     2733    int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pPhysHandler);
     2734    if (RT_SUCCESS(rc))
     2735    {
     2736        /*
     2737         * Create the registered MMIO range record for it.
     2738         */
     2739        PPGMREGMMIORANGE pNew;
     2740        rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iRegion, cb, pszDesc, &pNew);
     2741        if (RT_SUCCESS(rc))
     2742        {
     2743            pNew->fMmio2         = false;
     2744            pNew->pPhysHandlerR3 = pPhysHandler;
     2745
     2746            uint32_t iPage = cPages;
     2747            while (iPage-- > 0)
     2748            {
     2749                PGM_PAGE_INIT_ZERO(&pNew->RamRange.aPages[iPage], pVM, PGMPAGETYPE_MMIO);
     2750            }
     2751
     2752            /*
     2753             * Update the page count stats, link the registration and we're done.
     2754             */
     2755            pVM->pgm.s.cAllPages += cPages;
     2756            pVM->pgm.s.cPureMmioPages += cPages;
     2757
     2758            pgmR3PhysMMIOExLink(pVM, pNew);
     2759            return VINF_SUCCESS;
     2760        }
     2761
     2762        pgmHandlerPhysicalExDestroy(pVM, pPhysHandler);
     2763    }
     2764    return rc;
     2765}
     2766
     2767
     2768/**
     2769 * Allocate and register an MMIO2 region.
     2770 *
     2771 * As mentioned elsewhere, MMIO2 is just RAM spelled differently.  It's RAM
     2772 * associated with a device. It is also non-shared memory with a permanent
     2773 * ring-3 mapping and page backing (presently).
     2774 *
     2775 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
     2776 * the VM, in which case we'll drop the base memory pages.  Presently we will
     2777 * make no attempt to preserve anything that happens to be present in the base
     2778 * memory that is replaced, this is of course incorrect but it's too much
     2779 * effort.
     2780 *
     2781 * @returns VBox status code.
     2782 * @retval  VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
     2783 *          memory.
     2784 * @retval  VERR_ALREADY_EXISTS if the region already exists.
     2785 *
     2786 * @param   pVM             The cross context VM structure.
     2787 * @param   pDevIns         The device instance owning the region.
     2788 * @param   iRegion         The region number.  If the MMIO2 memory is a PCI
     2789 *                          I/O region this number has to be the number of that
     2790 *                          region. Otherwise it can be any number safe
     2791 *                          UINT8_MAX.
     2792 * @param   cb              The size of the region.  Must be page aligned.
     2793 * @param   fFlags          Reserved for future use, must be zero.
     2794 * @param   ppv             Where to store the pointer to the ring-3 mapping of
     2795 *                          the memory.
     2796 * @param   pszDesc         The description.
     2797 * @thread  EMT
    24892798 */
    24902799VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
     
    25002809    AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
    25012810    AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
    2502     AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
     2811    AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
    25032812    AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    25042813    AssertReturn(cb, VERR_INVALID_PARAMETER);
     
    25072816    const uint32_t cPages = cb >> PAGE_SHIFT;
    25082817    AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
    2509     AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_NO_MEMORY);
     2818    AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE);
    25102819
    25112820    /*
     
    25502859
    25512860                /*
    2552                  * Create the MMIO2 range record for it.
     2861                 * Create the registered MMIO range record for it.
    25532862                 */
    2554                 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
    2555                 PPGMMMIO2RANGE pNew;
    2556                 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
    2557                 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
     2863                PPGMREGMMIORANGE pNew;
     2864                rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iRegion, cb, pszDesc, &pNew);
    25582865                if (RT_SUCCESS(rc))
    25592866                {
    2560                     pNew->pDevInsR3             = pDevIns;
    2561                     pNew->pvR3                  = pvPages;
    2562                     //pNew->pNext               = NULL;
    2563                     //pNew->fMapped             = false;
    2564                     //pNew->fOverlapping        = false;
    2565                     pNew->iRegion               = iRegion;
    2566                     pNew->idSavedState          = UINT8_MAX;
    2567                     pNew->idMmio2               = idMmio2;
    2568                     pNew->RamRange.pSelfR0      = MMHyperCCToR0(pVM, &pNew->RamRange);
    2569                     pNew->RamRange.pSelfRC      = MMHyperCCToRC(pVM, &pNew->RamRange);
    2570                     pNew->RamRange.GCPhys       = NIL_RTGCPHYS;
    2571                     pNew->RamRange.GCPhysLast   = NIL_RTGCPHYS;
    2572                     pNew->RamRange.pszDesc      = pszDesc;
    2573                     pNew->RamRange.cb           = cb;
    2574                     pNew->RamRange.fFlags       = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2;
    2575                     pNew->RamRange.pvR3         = pvPages;
    2576                     //pNew->RamRange.paLSPages    = NULL;
     2867                    pNew->pvR3      = pvPages;
     2868                    pNew->idMmio2   = idMmio2;
     2869                    pNew->fMmio2    = true;
    25772870
    25782871                    uint32_t iPage = cPages;
     
    25852878                    }
    25862879
    2587                     /* update page count stats */
    2588                     pVM->pgm.s.cAllPages     += cPages;
     2880                    RTMemTmpFree(paPages);
     2881
     2882                    /*
     2883                     * Update the page count stats, link the registration and we're done.
     2884                     */
     2885                    pVM->pgm.s.cAllPages += cPages;
    25892886                    pVM->pgm.s.cPrivatePages += cPages;
    25902887
    2591                     /*
    2592                      * Link it into the list.
    2593                      * Since there is no particular order, just push it.
    2594                      */
    2595                     /** @todo we can save us the linked list now, just search the lookup table... */
    2596                     pgmLock(pVM);
    2597                     Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
    2598                     Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
    2599                     pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
    2600                     pVM->pgm.s.pMmio2RangesR3 = pNew;
    2601                     pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
    2602                     pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew);
    2603                     pgmUnlock(pVM);
     2888                    pgmR3PhysMMIOExLink(pVM, pNew);
    26042889
    26052890                    *ppv = pvPages;
    2606                     RTMemTmpFree(paPages);
    2607                     pgmPhysInvalidatePageMapTLB(pVM);
    26082891                    return VINF_SUCCESS;
    26092892                }
     
    26222905
    26232906/**
    2624  * Deregisters and frees an MMIO2 region.
     2907 * Deregisters and frees an MMIO2 region or a pre-registered MMIO region
    26252908 *
    26262909 * Any physical (and virtual) access handlers registered for the region must
     
    26322915 * @param   iRegion         The region. If it's UINT32_MAX it'll be a wildcard match.
    26332916 */
    2634 VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
     2917VMMR3DECL(int) PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
    26352918{
    26362919    /*
     
    26442927    int rc = VINF_SUCCESS;
    26452928    unsigned cFound = 0;
    2646     PPGMMMIO2RANGE pPrev = NULL;
    2647     PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
     2929    PPGMREGMMIORANGE pPrev = NULL;
     2930    PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3;
    26482931    while (pCur)
    26492932    {
     
    26592942            if (pCur->fMapped)
    26602943            {
    2661                 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
     2944                int rc2 = PGMR3PhysMMIOExUnmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
    26622945                AssertRC(rc2);
    26632946                if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
     
    26662949
    26672950            /*
     2951             * Must tell IOM about MMIO.
     2952             */
     2953            if (!pCur->fMmio2)
     2954                IOMR3MmioExNotifyDeregistered(pVM, pCur->pPhysHandlerR3->pvUserR3);
     2955
     2956            /*
    26682957             * Unlink it
    26692958             */
    2670             PPGMMMIO2RANGE pNext = pCur->pNextR3;
     2959            PPGMREGMMIORANGE pNext = pCur->pNextR3;
    26712960            if (pPrev)
    26722961                pPrev->pNextR3 = pNext;
    26732962            else
    2674                 pVM->pgm.s.pMmio2RangesR3 = pNext;
     2963                pVM->pgm.s.pRegMmioRangesR3 = pNext;
    26752964            pCur->pNextR3 = NULL;
    26762965
    26772966            uint8_t idMmio2 = pCur->idMmio2;
    2678             Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
    2679             pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
    2680             pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
     2967            if (idMmio2 != UINT8_MAX)
     2968            {
     2969                Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
     2970                pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
     2971                pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
     2972            }
    26812973
    26822974            /*
    26832975             * Free the memory.
    26842976             */
    2685             int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
    2686             AssertRC(rc2);
    2687             if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
    2688                 rc = rc2;
    2689 
    26902977            uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
    2691             rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
    2692             AssertRC(rc2);
    2693             if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
    2694                 rc = rc2;
     2978            if (pCur->fMmio2)
     2979            {
     2980                int rc2 = SUPR3PageFreeEx(pCur->pvR3, cPages);
     2981                AssertRC(rc2);
     2982                if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
     2983                    rc = rc2;
     2984
     2985                rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
     2986                AssertRC(rc2);
     2987                if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
     2988                    rc = rc2;
     2989            }
    26952990
    26962991            /* we're leaking hyper memory here if done at runtime. */
     
    27063001                      , ("%s\n", VMR3GetStateName(enmState)));
    27073002#endif
    2708             /*rc = MMHyperFree(pVM, pCur);
    2709             AssertRCReturn(rc, rc); - not safe, see the alloc call. */
     3003
     3004            const bool fMmio2 = pCur->fMmio2;
     3005            if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
     3006            {
     3007                const size_t    cbRange     = RT_OFFSETOF(PGMREGMMIORANGE, RamRange.aPages[cPages]);
     3008                size_t const    cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
     3009                SUPR3PageFreeEx(pCur, cChunkPages);
     3010            }
     3011            /*else
     3012            {
     3013                rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call.
     3014                AssertRCReturn(rc, rc);
     3015            } */
    27103016
    27113017
    27123018            /* update page count stats */
    2713             pVM->pgm.s.cAllPages     -= cPages;
    2714             pVM->pgm.s.cPrivatePages -= cPages;
     3019            pVM->pgm.s.cAllPages -= cPages;
     3020            if (fMmio2)
     3021                pVM->pgm.s.cPrivatePages -= cPages;
     3022            else
     3023                pVM->pgm.s.cPureMmioPages -= cPages;
    27153024
    27163025            /* next */
     
    27303039
    27313040/**
    2732  * Maps a MMIO2 region.
     3041 * Maps a MMIO2 region or a pre-registered MMIO region.
    27333042 *
    27343043 * This is done when a guest / the bios / state loading changes the
     
    27433052 * @param   GCPhys          The guest-physical address to be remapped.
    27443053 */
    2745 VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     3054VMMR3DECL(int) PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    27463055{
    27473056    /*
     
    27553064    AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    27563065
    2757     PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
     3066    PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion);
    27583067    AssertReturn(pCur, VERR_NOT_FOUND);
    27593068    AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
     
    27653074
    27663075    /*
    2767      * Find our location in the ram range list, checking for
    2768      * restriction we don't bother implementing yet (partially overlapping).
    2769      */
     3076     * Find our location in the ram range list, checking for restriction
     3077     * we don't bother implementing yet (partially overlapping).
     3078     */
     3079    pgmLock(pVM);
     3080
    27703081    bool fRamExists = false;
    27713082    PPGMRAMRANGE pRamPrev = NULL;
     
    27763087            &&  GCPhysLast >= pRam->GCPhys)
    27773088        {
    2778             /* completely within? */
    2779             AssertLogRelMsgReturn(   GCPhys     >= pRam->GCPhys
    2780                                   && GCPhysLast <= pRam->GCPhysLast,
    2781                                   ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
    2782                                    GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
    2783                                    pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
    2784                                   VERR_PGM_RAM_CONFLICT);
     3089            /* Completely within? */
     3090            AssertLogRelMsgReturnStmt(   GCPhys     >= pRam->GCPhys
     3091                                      && GCPhysLast <= pRam->GCPhysLast,
     3092                                      ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
     3093                                       GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
     3094                                       pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
     3095                                      pgmUnlock(pVM),
     3096                                      VERR_PGM_RAM_CONFLICT);
     3097
     3098            /* Check that all the pages are RAM pages. */
     3099            PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     3100            uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
     3101            while (cPagesLeft-- > 0)
     3102            {
     3103                AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
     3104                                          ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
     3105                                           GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
     3106                                          pgmUnlock(pVM),
     3107                                          VERR_PGM_RAM_CONFLICT);
     3108                pPage++;
     3109            }
     3110
    27853111            fRamExists = true;
    27863112            break;
     
    27913117        pRam = pRam->pNextR3;
    27923118    }
    2793     if (fRamExists)
    2794     {
    2795         PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    2796         uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
    2797         while (cPagesLeft-- > 0)
    2798         {
    2799             AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
    2800                                   ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
    2801                                    GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
    2802                                   VERR_PGM_RAM_CONFLICT);
    2803             pPage++;
    2804         }
    2805     }
    2806     Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
    2807          GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
     3119    Log(("PGMR3PhysMMIOExMap: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
     3120
    28083121
    28093122    /*
    28103123     * Make the changes.
    28113124     */
    2812     pgmLock(pVM);
    2813 
    28143125    pCur->RamRange.GCPhys = GCPhys;
    28153126    pCur->RamRange.GCPhysLast = GCPhysLast;
    2816     pCur->fMapped = true;
    2817     pCur->fOverlapping = fRamExists;
    2818 
    28193127    if (fRamExists)
    28203128    {
    2821 /** @todo use pgmR3PhysFreePageRange here. */
    2822         uint32_t            cPendingPages = 0;
    2823         PGMMFREEPAGESREQ    pReq;
    2824         int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
    2825         AssertLogRelRCReturn(rc, rc);
    2826 
    2827         /* replace the pages, freeing all present RAM pages. */
    2828         PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
    2829         PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    2830         uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
    2831         while (cPagesLeft-- > 0)
    2832         {
    2833             rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
    2834             AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
    2835 
    2836             RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
    2837             uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
    2838             PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
    2839             PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
    2840             PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
    2841             PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
    2842             PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
    2843             PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
    2844             PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
    2845 
    2846             pVM->pgm.s.cZeroPages--;
    2847             GCPhys += PAGE_SIZE;
    2848             pPageSrc++;
    2849             pPageDst++;
     3129        /*
     3130         * Make all the pages in the range MMIO/ZERO pages, freeing any
     3131         * RAM pages currently mapped here. This might not be 100% correct
     3132         * for PCI memory, but we're doing the same thing for MMIO2 pages.
     3133         *
     3134         * We replace this MMIO/ZERO pages with real pages in the MMIO2 case.
     3135         */
     3136        int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
     3137        AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
     3138        if (pCur->fMmio2)
     3139        {
     3140            /* replace the pages, freeing all present RAM pages. */
     3141            PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
     3142            PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     3143            uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
     3144            while (cPagesLeft-- > 0)
     3145            {
     3146                Assert(PGM_PAGE_IS_MMIO(pPageDst));
     3147
     3148                RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
     3149                uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
     3150                PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
     3151                PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
     3152                PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
     3153                PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
     3154                PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
     3155                PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
     3156                PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
     3157
     3158                pVM->pgm.s.cZeroPages--;
     3159                GCPhys += PAGE_SIZE;
     3160                pPageSrc++;
     3161                pPageDst++;
     3162            }
    28503163        }
    28513164
    28523165        /* Flush physical page map TLB. */
    28533166        pgmPhysInvalidatePageMapTLB(pVM);
    2854 
    2855         if (cPendingPages)
    2856         {
    2857             rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
    2858             AssertLogRelRCReturn(rc, rc);
    2859         }
    2860         GMMR3FreePagesCleanup(pReq);
    28613167
    28623168        /* Force a PGM pool flush as guest ram references have been changed. */
     
    28663172        pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
    28673173        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    2868 
    2869         pgmUnlock(pVM);
    28703174    }
    28713175    else
    28723176    {
    2873 #ifdef VBOX_WITH_REM
    2874         RTGCPHYS cb = pCur->RamRange.cb;
    2875 #endif
    2876 
     3177        /*
     3178         * No RAM range, insert the one prepared during registration.
     3179         */
    28773180        /* Clear the tracking data of pages we're going to reactivate. */
    28783181        PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
     
    28873190        /* link in the ram range */
    28883191        pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
    2889         pgmUnlock(pVM);
     3192    }
     3193
     3194    /*
     3195     * Register the access handler if plain MMIO.
     3196     */
     3197    if (!pCur->fMmio2)
     3198    {
     3199        int rc = pgmHandlerPhysicalExRegister(pVM, pCur->pPhysHandlerR3, GCPhys, GCPhysLast);
     3200        if (RT_SUCCESS(rc))
     3201        {
     3202            rc = IOMR3MmioExNotifyMapped(pVM, pCur->pPhysHandlerR3->pvUserR3, GCPhys);
     3203            if (RT_FAILURE(rc))
     3204                pgmHandlerPhysicalExDeregister(pVM, pCur->pPhysHandlerR3);
     3205        }
     3206        if (RT_FAILURE(rc))
     3207        {
     3208            /* Almost impossible, but try clean up properly and get out of here. */
     3209            if (!fRamExists)
     3210                pgmR3PhysUnlinkRamRange2(pVM, &pCur->RamRange, pRamPrev);
     3211            else
     3212            {
     3213                uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
     3214                if (pCur->fMmio2)
     3215                    pVM->pgm.s.cZeroPages += cPagesLeft;
     3216
     3217                PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     3218                while (cPagesLeft-- > 0)
     3219                {
     3220                    PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
     3221                    pPageDst++;
     3222                }
     3223            }
     3224            pCur->RamRange.GCPhys     = NIL_RTGCPHYS;
     3225            pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
     3226
     3227            pgmUnlock(pVM);
     3228            return rc;
     3229        }
     3230    }
     3231
     3232    pCur->fMapped = true;
     3233    pCur->fOverlapping = fRamExists;
     3234    pgmPhysInvalidatePageMapTLB(pVM);
     3235    pgmUnlock(pVM);
    28903236
    28913237#ifdef VBOX_WITH_REM
    2892         REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
     3238    /*
     3239     * Inform REM without holding the PGM lock.
     3240     */
     3241    if (!fRamExists && pCur->fMmio2)
     3242        REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
    28933243#endif
    2894     }
    2895 
    2896     pgmPhysInvalidatePageMapTLB(pVM);
    28973244    return VINF_SUCCESS;
    28983245}
     
    29003247
    29013248/**
    2902  * Unmaps a MMIO2 region.
     3249 * Unmaps a MMIO2 or a pre-registered MMIO region.
    29033250 *
    29043251 * This is done when a guest / the bios / state loading changes the
     
    29063253 * as during registration, of course.
    29073254 */
    2908 VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
     3255VMMR3DECL(int) PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
    29093256{
    29103257    /*
     
    29183265    AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    29193266
    2920     PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
     3267    PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion);
    29213268    AssertReturn(pCur, VERR_NOT_FOUND);
    29223269    AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
     
    29243271    Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
    29253272
    2926     Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
     3273    Log(("PGMR3PhysMMIOExUnmap: %RGp-%RGp %s\n",
    29273274         pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
    29283275
     3276    int rc = pgmLock(pVM);
     3277    AssertRCReturn(rc, rc);
     3278    AssertReturnStmt(pCur->fMapped, pgmUnlock(pVM),VERR_WRONG_ORDER);
     3279
     3280    /*
     3281     * If plain MMIO, we must deregister the handler first.
     3282     */
     3283    if (!pCur->fMmio2)
     3284    {
     3285        rc = pgmHandlerPhysicalExDeregister(pVM, pCur->pPhysHandlerR3);
     3286        AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
     3287
     3288        IOMR3MmioExNotifyUnmapped(pVM, pCur->pPhysHandlerR3->pvUserR3, GCPhys);
     3289    }
     3290
    29293291    /*
    29303292     * Unmap it.
    29313293     */
    2932     pgmLock(pVM);
    2933 
    29343294#ifdef VBOX_WITH_REM
    2935     RTGCPHYS    GCPhysRangeREM;
    2936     RTGCPHYS    cbRangeREM;
    2937     bool        fInformREM;
     3295    RTGCPHYS        GCPhysRangeREM;
     3296    bool            fInformREM;
    29383297#endif
    29393298    if (pCur->fOverlapping)
     
    29443303            pRam = pRam->pNextR3;
    29453304
     3305        uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
     3306        if (pCur->fMmio2)
     3307            pVM->pgm.s.cZeroPages += cPagesLeft;
     3308
    29463309        PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
    2947         uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
    29483310        while (cPagesLeft-- > 0)
    29493311        {
    29503312            PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
    2951             pVM->pgm.s.cZeroPages++;
    29523313            pPageDst++;
    29533314        }
     
    29573318#ifdef VBOX_WITH_REM
    29583319        GCPhysRangeREM = NIL_RTGCPHYS;  /* shuts up gcc */
    2959         cbRangeREM     = RTGCPHYS_MAX;  /* ditto */
    29603320        fInformREM     = false;
    29613321#endif
     
    29653325#ifdef VBOX_WITH_REM
    29663326        GCPhysRangeREM = pCur->RamRange.GCPhys;
    2967         cbRangeREM     = pCur->RamRange.cb;
    2968         fInformREM     = true;
     3327        fInformREM     = pCur->fMmio2;
    29693328#endif
    29703329        pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
     
    29863345    pgmPhysInvalidatePageMapTLB(pVM);
    29873346    pgmPhysInvalidRamRangeTlbs(pVM);
     3347
    29883348    pgmUnlock(pVM);
    29893349
    29903350#ifdef VBOX_WITH_REM
     3351    /*
     3352     * Inform REM without holding the PGM lock.
     3353     */
    29913354    if (fInformREM)
    2992         REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
     3355        REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, pCur->RamRange.cb);
    29933356#endif
    29943357
     
    29983361
    29993362/**
    3000  * Checks if the given address is an MMIO2 base address or not.
     3363 * Checks if the given address is an MMIO2 or pre-registered MMIO base address
     3364 * or not.
    30013365 *
    30023366 * @returns true/false accordingly.
     
    30053369 * @param   GCPhys          The address to check.
    30063370 */
    3007 VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
     3371VMMR3DECL(bool) PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
    30083372{
    30093373    /*
     
    30203384     */
    30213385    pgmLock(pVM);
    3022     for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
     3386    for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
    30233387        if (pCur->RamRange.GCPhys == GCPhys)
    30243388        {
     
    30453409 * @param   pHCPhys         Where to store the result.
    30463410 */
    3047 VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
     3411VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
    30483412{
    30493413    /*
     
    30553419
    30563420    pgmLock(pVM);
    3057     PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
     3421    PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion);
    30583422    AssertReturn(pCur, VERR_NOT_FOUND);
     3423    AssertReturn(pCur->fMmio2, VERR_WRONG_TYPE);
    30593424    AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
    30603425
     
    30823447 * @param   pR0Ptr      Where to store the R0 address.
    30833448 */
    3084 VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
    3085                                        const char *pszDesc, PRTR0PTR pR0Ptr)
     3449VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
     3450                                            const char *pszDesc, PRTR0PTR pR0Ptr)
    30863451{
    30873452    /*
     
    30923457    AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
    30933458
    3094     PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
     3459    PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion);
    30953460    AssertReturn(pCur, VERR_NOT_FOUND);
     3461    AssertReturn(pCur->fMmio2, VERR_WRONG_TYPE);
    30963462    AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
    30973463    AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
  • trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

    r62478 r64115  
    642642     */
    643643    pgmLock(pVM);
    644     for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    645     {
    646         uint32_t const  cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
    647         pgmUnlock(pVM);
    648 
    649         PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
    650         if (!paLSPages)
    651             return VERR_NO_MEMORY;
    652         for (uint32_t iPage = 0; iPage < cPages; iPage++)
    653         {
    654             /* Initialize it as a dirty zero page. */
    655             paLSPages[iPage].fDirty          = true;
    656             paLSPages[iPage].cUnchangedScans = 0;
    657             paLSPages[iPage].fZero           = true;
    658             paLSPages[iPage].u32CrcH1        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
    659             paLSPages[iPage].u32CrcH2        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
    660         }
    661 
    662         pgmLock(pVM);
    663         pMmio2->paLSPages = paLSPages;
    664         pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
     644    for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     645    {
     646        if (pRegMmio->fMmio2)
     647        {
     648            uint32_t const  cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
     649            pgmUnlock(pVM);
     650
     651            PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
     652            if (!paLSPages)
     653                return VERR_NO_MEMORY;
     654            for (uint32_t iPage = 0; iPage < cPages; iPage++)
     655            {
     656                /* Initialize it as a dirty zero page. */
     657                paLSPages[iPage].fDirty          = true;
     658                paLSPages[iPage].cUnchangedScans = 0;
     659                paLSPages[iPage].fZero           = true;
     660                paLSPages[iPage].u32CrcH1        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
     661                paLSPages[iPage].u32CrcH2        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
     662            }
     663
     664            pgmLock(pVM);
     665            pRegMmio->paLSPages = paLSPages;
     666            pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
     667        }
    665668    }
    666669    pgmUnlock(pVM);
     
    680683    pgmLock(pVM);
    681684    uint8_t id = 1;
    682     for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
    683     {
    684         pMmio2->idSavedState = id;
    685         SSMR3PutU8(pSSM, id);
    686         SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pReg->szName);
    687         SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
    688         SSMR3PutU8(pSSM, pMmio2->iRegion);
    689         SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
    690         int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
    691         if (RT_FAILURE(rc))
    692             break;
     685    for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     686    {
     687        if (pRegMmio->fMmio2)
     688        {
     689            pRegMmio->idSavedState = id;
     690            SSMR3PutU8(pSSM, id);
     691            SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
     692            SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
     693            SSMR3PutU8(pSSM, pRegMmio->iRegion);
     694            SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
     695            int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
     696            if (RT_FAILURE(rc))
     697                break;
     698            id++;
     699        }
    693700    }
    694701    pgmUnlock(pVM);
     
    709716    PGM_LOCK_ASSERT_OWNER(pVM);
    710717
    711     for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    712         pMmio2->idSavedState = UINT8_MAX;
     718    for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     719        if (pRegMmio->fMmio2)
     720            pRegMmio->idSavedState = UINT8_MAX;
    713721
    714722    for (;;)
     
    723731        if (id == UINT8_MAX)
    724732        {
    725             for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    726                 AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
     733            for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     734                AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX || !pRegMmio->fMmio2, ("%s\n", pRegMmio->RamRange.pszDesc));
    727735            return VINF_SUCCESS;        /* the end */
    728736        }
     
    749757         * Locate a matching MMIO2 range.
    750758         */
    751         PPGMMMIO2RANGE pMmio2;
    752         for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    753         {
    754             if (    pMmio2->idSavedState == UINT8_MAX
    755                 &&  pMmio2->iRegion == iRegion
    756                 &&  pMmio2->pDevInsR3->iInstance == uInstance
    757                 &&  !strcmp(pMmio2->pDevInsR3->pReg->szName, szDevName))
    758             {
    759                 pMmio2->idSavedState = id;
     759        PPGMREGMMIORANGE pRegMmio;
     760        for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     761        {
     762            if (    pRegMmio->idSavedState == UINT8_MAX
     763                &&  pRegMmio->iRegion == iRegion
     764                &&  pRegMmio->pDevInsR3->iInstance == uInstance
     765                &&  pRegMmio->fMmio2
     766                &&  !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
     767            {
     768                pRegMmio->idSavedState = id;
    760769                break;
    761770            }
    762771        }
    763         if (!pMmio2)
     772        if (!pRegMmio)
    764773            return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
    765774                                    szDesc, szDevName, uInstance, iRegion);
     
    769778         * the same.
    770779         */
    771         if (cb != pMmio2->RamRange.cb)
     780        if (cb != pRegMmio->RamRange.cb)
    772781        {
    773782            LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
    774                     pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb));
    775             if (cb > pMmio2->RamRange.cb) /* bad idea? */
     783                    pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
     784            if (cb > pRegMmio->RamRange.cb) /* bad idea? */
    776785                return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
    777                                         pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb);
     786                                        pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
    778787        }
    779788    } /* forever */
     
    873882
    874883    pgmLock(pVM);                       /* paranoia */
    875     for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    876     {
    877         PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
    878         uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
    879         pgmUnlock(pVM);
    880 
    881         for (uint32_t iPage = 0; iPage < cPages; iPage++)
    882         {
    883             uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE;
    884             pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
    885         }
    886 
    887         pgmLock(pVM);
    888     }
     884    for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     885        if (pRegMmio->fMmio2)
     886        {
     887            PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
     888            uint32_t              cPages    = pRegMmio->RamRange.cb >> PAGE_SHIFT;
     889            pgmUnlock(pVM);
     890
     891            for (uint32_t iPage = 0; iPage < cPages; iPage++)
     892            {
     893                uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * PAGE_SIZE;
     894                pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
     895            }
     896
     897            pgmLock(pVM);
     898        }
    889899    pgmUnlock(pVM);
    890900
     
    913923         */
    914924        pgmLock(pVM);
    915         for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
    916              pMmio2 && RT_SUCCESS(rc);
    917              pMmio2 = pMmio2->pNextR3)
    918         {
    919             PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
    920             uint8_t const        *pbPage    = (uint8_t const *)pMmio2->RamRange.pvR3;
    921             uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
    922             uint32_t              iPageLast = cPages;
    923             for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
    924             {
    925                 uint8_t u8Type;
    926                 if (!fLiveSave)
    927                     u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
    928                 else
     925        for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
     926             pRegMmio && RT_SUCCESS(rc);
     927             pRegMmio = pRegMmio->pNextR3)
     928            if (pRegMmio->fMmio2)
     929            {
     930                PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
     931                uint8_t const        *pbPage    = (uint8_t const *)pRegMmio->RamRange.pvR3;
     932                uint32_t              cPages    = pRegMmio->RamRange.cb >> PAGE_SHIFT;
     933                uint32_t              iPageLast = cPages;
     934                for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
    929935                {
    930                     /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
    931                     if (   !paLSPages[iPage].fDirty
    932                         && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
     936                    uint8_t u8Type;
     937                    if (!fLiveSave)
     938                        u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
     939                    else
    933940                    {
    934                         if (paLSPages[iPage].fZero)
    935                             continue;
    936 
    937                         uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
    938                         RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
    939                         if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
    940                             continue;
     941                        /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
     942                        if (   !paLSPages[iPage].fDirty
     943                            && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
     944                        {
     945                            if (paLSPages[iPage].fZero)
     946                                continue;
     947
     948                            uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
     949                            RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
     950                            if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
     951                                continue;
     952                        }
     953                        u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
     954                        pVM->pgm.s.LiveSave.cSavedPages++;
    941955                    }
    942                     u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
    943                     pVM->pgm.s.LiveSave.cSavedPages++;
     956
     957                    if (iPage != 0 && iPage == iPageLast + 1)
     958                        rc = SSMR3PutU8(pSSM, u8Type);
     959                    else
     960                    {
     961                        SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
     962                        SSMR3PutU8(pSSM, pRegMmio->idSavedState);
     963                        rc = SSMR3PutU32(pSSM, iPage);
     964                    }
     965                    if (u8Type == PGM_STATE_REC_MMIO2_RAW)
     966                        rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
     967                    if (RT_FAILURE(rc))
     968                        break;
     969                    iPageLast = iPage;
    944970                }
    945 
    946                 if (iPage != 0 && iPage == iPageLast + 1)
    947                     rc = SSMR3PutU8(pSSM, u8Type);
    948                 else
    949                 {
    950                     SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
    951                     SSMR3PutU8(pSSM, pMmio2->idSavedState);
    952                     rc = SSMR3PutU32(pSSM, iPage);
    953                 }
    954                 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
    955                     rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
    956                 if (RT_FAILURE(rc))
    957                     break;
    958                 iPageLast = iPage;
    959             }
    960         }
     971            }
    961972        pgmUnlock(pVM);
    962973    }
     
    970981    {
    971982        pgmLock(pVM);
    972         for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
    973              pMmio2 && RT_SUCCESS(rc);
    974              pMmio2 = pMmio2->pNextR3)
    975         {
    976             PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
    977             uint8_t const        *pbPage    = (uint8_t const *)pMmio2->RamRange.pvR3;
    978             uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
    979             uint32_t              iPageLast = cPages;
    980             pgmUnlock(pVM);
    981 
    982             for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
    983             {
    984                 /* Skip clean pages and pages which hasn't quiesced. */
    985                 if (!paLSPages[iPage].fDirty)
    986                     continue;
    987                 if (paLSPages[iPage].cUnchangedScans < 3)
    988                     continue;
    989                 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
    990                     continue;
    991 
    992                 /* Save it. */
    993                 bool const fZero = paLSPages[iPage].fZero;
    994                 uint8_t abPage[PAGE_SIZE];
    995                 if (!fZero)
     983        for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
     984             pRegMmio && RT_SUCCESS(rc);
     985             pRegMmio = pRegMmio->pNextR3)
     986            if (pRegMmio->fMmio2)
     987            {
     988                PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
     989                uint8_t const        *pbPage    = (uint8_t const *)pRegMmio->RamRange.pvR3;
     990                uint32_t              cPages    = pRegMmio->RamRange.cb >> PAGE_SHIFT;
     991                uint32_t              iPageLast = cPages;
     992                pgmUnlock(pVM);
     993
     994                for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
    996995                {
    997                     memcpy(abPage, pbPage, PAGE_SIZE);
    998                     RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
     996                    /* Skip clean pages and pages which hasn't quiesced. */
     997                    if (!paLSPages[iPage].fDirty)
     998                        continue;
     999                    if (paLSPages[iPage].cUnchangedScans < 3)
     1000                        continue;
     1001                    if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
     1002                        continue;
     1003
     1004                    /* Save it. */
     1005                    bool const fZero = paLSPages[iPage].fZero;
     1006                    uint8_t abPage[PAGE_SIZE];
     1007                    if (!fZero)
     1008                    {
     1009                        memcpy(abPage, pbPage, PAGE_SIZE);
     1010                        RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
     1011                    }
     1012
     1013                    uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
     1014                    if (iPage != 0 && iPage == iPageLast + 1)
     1015                        rc = SSMR3PutU8(pSSM, u8Type);
     1016                    else
     1017                    {
     1018                        SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
     1019                        SSMR3PutU8(pSSM, pRegMmio->idSavedState);
     1020                        rc = SSMR3PutU32(pSSM, iPage);
     1021                    }
     1022                    if (u8Type == PGM_STATE_REC_MMIO2_RAW)
     1023                        rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
     1024                    if (RT_FAILURE(rc))
     1025                        break;
     1026
     1027                    /* Housekeeping. */
     1028                    paLSPages[iPage].fDirty = false;
     1029                    pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
     1030                    pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
     1031                    if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
     1032                        pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
     1033                    pVM->pgm.s.LiveSave.cSavedPages++;
     1034                    iPageLast = iPage;
    9991035                }
    10001036
    1001                 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
    1002                 if (iPage != 0 && iPage == iPageLast + 1)
    1003                     rc = SSMR3PutU8(pSSM, u8Type);
    1004                 else
    1005                 {
    1006                     SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
    1007                     SSMR3PutU8(pSSM, pMmio2->idSavedState);
    1008                     rc = SSMR3PutU32(pSSM, iPage);
    1009                 }
    1010                 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
    1011                     rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
    1012                 if (RT_FAILURE(rc))
    1013                     break;
    1014 
    1015                 /* Housekeeping. */
    1016                 paLSPages[iPage].fDirty = false;
    1017                 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
    1018                 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
    1019                 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
    1020                     pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
    1021                 pVM->pgm.s.LiveSave.cSavedPages++;
    1022                 iPageLast = iPage;
    1023             }
    1024 
    1025             pgmLock(pVM);
    1026         }
     1037                pgmLock(pVM);
     1038            }
    10271039        pgmUnlock(pVM);
    10281040    }
     
    10441056     */
    10451057    pgmLock(pVM);
    1046     for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    1047     {
    1048         void *pvMmio2ToFree = pMmio2->paLSPages;
    1049         if (pvMmio2ToFree)
    1050         {
    1051             pMmio2->paLSPages = NULL;
    1052             pgmUnlock(pVM);
    1053             MMR3HeapFree(pvMmio2ToFree);
    1054             pgmLock(pVM);
    1055         }
    1056     }
     1058    for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     1059        if (pRegMmio->fMmio2)
     1060        {
     1061            void *pvMmio2ToFree = pRegMmio->paLSPages;
     1062            if (pvMmio2ToFree)
     1063            {
     1064                pRegMmio->paLSPages = NULL;
     1065                pgmUnlock(pVM);
     1066                MMR3HeapFree(pvMmio2ToFree);
     1067                pgmLock(pVM);
     1068            }
     1069        }
    10571070    pgmUnlock(pVM);
    10581071}
     
    26132626     * Process page records until we hit the terminator.
    26142627     */
    2615     RTGCPHYS        GCPhys   = NIL_RTGCPHYS;
    2616     PPGMRAMRANGE    pRamHint = NULL;
    2617     uint8_t         id       = UINT8_MAX;
    2618     uint32_t        iPage    = UINT32_MAX - 10;
    2619     PPGMROMRANGE    pRom     = NULL;
    2620     PPGMMMIO2RANGE  pMmio2  = NULL;
     2628    RTGCPHYS            GCPhys   = NIL_RTGCPHYS;
     2629    PPGMRAMRANGE        pRamHint = NULL;
     2630    uint8_t             id       = UINT8_MAX;
     2631    uint32_t            iPage    = UINT32_MAX - 10;
     2632    PPGMROMRANGE        pRom     = NULL;
     2633    PPGMREGMMIORANGE    pRegMmio = NULL;
    26212634
    26222635    /*
     
    27922805                        return rc;
    27932806                }
    2794                 if (    !pMmio2
    2795                     ||  pMmio2->idSavedState != id)
     2807                if (   !pRegMmio
     2808                    || pRegMmio->idSavedState != id)
    27962809                {
    2797                     for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    2798                         if (pMmio2->idSavedState == id)
     2810                    for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
     2811                        if (   pRegMmio->idSavedState == id
     2812                            && pRegMmio->fMmio2)
    27992813                            break;
    2800                     AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
     2814                    AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
    28012815                }
    2802                 AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
    2803                 void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
     2816                AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
     2817                void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
    28042818
    28052819                /*
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r63640 r64115  
    15391539/** Ad hoc RAM range for an MMIO mapping. */
    15401540#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO     RT_BIT(22)
    1541 /** Ad hoc RAM range for an MMIO2 mapping. */
    1542 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO  RT_BIT(23)
     1541/** Ad hoc RAM range for an MMIO2 or pre-registered MMIO mapping. */
     1542#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX  RT_BIT(23)
    15431543/** @} */
    15441544
     
    15481548 */
    15491549#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
    1550     (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
     1550    (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX) ) )
    15511551
    15521552/** The number of entries in the RAM range TLBs (there is one for each
     
    16881688
    16891689/**
    1690  * A registered MMIO2 (= Device RAM) range.
    1691  *
    1692  * There are a few reason why we need to keep track of these
    1693  * registrations.  One of them is the deregistration & cleanup stuff,
    1694  * while another is that the PGMRAMRANGE associated with such a region may
    1695  * have to be removed from the ram range list.
    1696  *
    1697  * Overlapping with a RAM range has to be 100% or none at all.  The pages
    1698  * in the existing RAM range must not be ROM nor MMIO.  A guru meditation
    1699  * will be raised if a partial overlap or an overlap of ROM pages is
    1700  * encountered.  On an overlap we will free all the existing RAM pages and
    1701  * put in the ram range pages instead.
    1702  */
    1703 typedef struct PGMMMIO2RANGE
     1690 * A registered MMIO2 (= Device RAM) or pre-registered MMIO range.
     1691 *
     1692 * There are a few reason why we need to keep track of these registrations.  One
     1693 * of them is the deregistration & cleanup stuff, while another is that the
     1694 * PGMRAMRANGE associated with such a region may have to be removed from the ram
     1695 * range list.
     1696 *
     1697 * Overlapping with a RAM range has to be 100% or none at all.  The pages in the
     1698 * existing RAM range must not be ROM nor MMIO.  A guru meditation will be
     1699 * raised if a partial overlap or an overlap of ROM pages is encountered.  On an
     1700 * overlap we will free all the existing RAM pages and put in the ram range
     1701 * pages instead.
     1702 */
     1703typedef struct PGMREGMMIORANGE
    17041704{
    17051705    /** The owner of the range. (a device) */
    17061706    PPDMDEVINSR3                        pDevInsR3;
    1707     /** Pointer to the ring-3 mapping of the allocation. */
     1707    /** Pointer to the ring-3 mapping of the allocation, if MMIO2. */
    17081708    RTR3PTR                             pvR3;
    17091709    /** Pointer to the next range - R3. */
    1710     R3PTRTYPE(struct PGMMMIO2RANGE *)   pNextR3;
     1710    R3PTRTYPE(struct PGMREGMMIORANGE *) pNextR3;
     1711    /** Whether this is MMIO2 or plain MMIO. */
     1712    bool                                fMmio2;
    17111713    /** Whether it's mapped or not. */
    17121714    bool                                fMapped;
     
    17221724    uint8_t                             idMmio2;
    17231725    /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */
    1724     uint8_t                             abAlignment[HC_ARCH_BITS == 32 ? 11 : 11];
    1725     /** Live save per page tracking data. */
     1726    uint8_t                             abAlignment[HC_ARCH_BITS == 32 ? 6 : 2];
     1727    /** Pointer to the physical handler for MMIO. */
     1728    R3PTRTYPE(PPGMPHYSHANDLER)          pPhysHandlerR3;
     1729    /** Live save per page tracking data for MMIO2. */
    17261730    R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE)    paLSPages;
    17271731    /** The associated RAM range. */
    17281732    PGMRAMRANGE                         RamRange;
    1729 } PGMMMIO2RANGE;
    1730 /** Pointer to a MMIO2 range. */
    1731 typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
     1733} PGMREGMMIORANGE;
     1734AssertCompileMemberAlignment(PGMREGMMIORANGE, RamRange, 16);
     1735/** Pointer to a MMIO2 or pre-registered MMIO range. */
     1736typedef PGMREGMMIORANGE *PPGMREGMMIORANGE;
    17321737
    17331738/** @name Internal MMIO2 constants.
     
    17361741#define PGM_MMIO2_MAX_RANGES                        8
    17371742/** The maximum number of pages in a MMIO2 range. */
    1738 #define PGM_MMIO2_MAX_PAGE_COUNT                    UINT32_C(0x00ffffff)
     1743#define PGM_MMIO2_MAX_PAGE_COUNT                    UINT32_C(0x01000000)
    17391744/** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */
    17401745#define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage)   ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) )
     
    33083313    /** Pointer to the list of MMIO2 ranges - for R3.
    33093314     * Registration order. */
    3310     R3PTRTYPE(PPGMMMIO2RANGE)       pMmio2RangesR3;
     3315    R3PTRTYPE(PPGMREGMMIORANGE)     pRegMmioRangesR3;
    33113316    /** Pointer to SHW+GST mode data (function pointers).
    33123317     * The index into this table is made up from */
     
    33143319    RTR3PTR                         R3PtrAlignment0;
    33153320    /** MMIO2 lookup array for ring-3.  Indexed by idMmio2 minus 1. */
    3316     R3PTRTYPE(PPGMMMIO2RANGE)       apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];
     3321    R3PTRTYPE(PPGMREGMMIORANGE)     apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];
    33173322
    33183323    /** RAM range TLB for R0. */
     
    33343339    R0PTRTYPE(PPGMROMRANGE)         pRomRangesR0;
    33353340    RTR0PTR                         R0PtrAlignment0;
    3336     /** MMIO2 lookup array for ring-3.  Indexed by idMmio2 minus 1. */
    3337     R0PTRTYPE(PPGMMMIO2RANGE)       apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];
     3341    /** MMIO2 lookup array for ring-0.  Indexed by idMmio2 minus 1. */
     3342    R0PTRTYPE(PPGMREGMMIORANGE)     apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];
    33383343
    33393344    /** RAM range TLB for RC. */
     
    41394144DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    41404145
     4146int             pgmHandlerPhysicalExCreate(PVM pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0,
     4147                                           RTRCPTR pvUserRC, R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler);
     4148int             pgmHandlerPhysicalExRegister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast);
     4149int             pgmHandlerPhysicalExDeregister(PVM pVM, PPGMPHYSHANDLER pPhysHandler);
     4150int             pgmHandlerPhysicalExDestroy(PVM pVM, PPGMPHYSHANDLER pHandler);
    41414151void            pgmR3HandlerPhysicalUpdateAll(PVM pVM);
    41424152bool            pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r62016 r64115  
    878878    GEN_CHECK_OFF(PGMROMRANGE, aPages);
    879879    GEN_CHECK_OFF(PGMROMRANGE, aPages[1]);
    880     GEN_CHECK_SIZE(PGMMMIO2RANGE);
    881     GEN_CHECK_OFF(PGMMMIO2RANGE, pDevInsR3);
    882     GEN_CHECK_OFF(PGMMMIO2RANGE, pNextR3);
    883     GEN_CHECK_OFF(PGMMMIO2RANGE, fMapped);
    884     GEN_CHECK_OFF(PGMMMIO2RANGE, fOverlapping);
    885     GEN_CHECK_OFF(PGMMMIO2RANGE, iRegion);
    886     GEN_CHECK_OFF(PGMMMIO2RANGE, RamRange);
     880    GEN_CHECK_SIZE(PGMREGMMIORANGE);
     881    GEN_CHECK_OFF(PGMREGMMIORANGE, pDevInsR3);
     882    GEN_CHECK_OFF(PGMREGMMIORANGE, pNextR3);
     883    GEN_CHECK_OFF(PGMREGMMIORANGE, fMmio2);
     884    GEN_CHECK_OFF(PGMREGMMIORANGE, fMapped);
     885    GEN_CHECK_OFF(PGMREGMMIORANGE, fOverlapping);
     886    GEN_CHECK_OFF(PGMREGMMIORANGE, iRegion);
     887    GEN_CHECK_OFF(PGMREGMMIORANGE, RamRange);
    887888    GEN_CHECK_SIZE(PGMTREES);
    888889    GEN_CHECK_OFF(PGMTREES, PhysHandlers);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r62276 r64115  
    385385    CHECK_SIZE(PGMPAGE, 16);
    386386    CHECK_MEMBER_ALIGNMENT(PGMRAMRANGE, aPages, 16);
    387     CHECK_MEMBER_ALIGNMENT(PGMMMIO2RANGE, RamRange, 16);
     387    CHECK_MEMBER_ALIGNMENT(PGMREGMMIORANGE, RamRange, 16);
    388388
    389389    /* rem */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette