Changeset 64115 in vbox
- Timestamp:
- Sep 30, 2016 8:14:27 PM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 111072
- Location:
- trunk
- Files:
-
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r61847 r64115 520 520 /** Don't mess around with ballooned pages. */ 521 521 #define VERR_PGM_PHYS_PAGE_BALLOONED (-1646) 522 /** Internal processing error \#1 in page access handler code. */ 523 #define VERR_PGM_HANDLER_IPE_1 (-1647) 522 524 523 525 -
trunk/include/VBox/param.h
r62476 r64115 39 39 /** The maximum number of pages that can be allocated and mapped 40 40 * by various MM, PGM and SUP APIs. */ 41 #define VBOX_MAX_ALLOC_PAGE_COUNT (256U * _1M / PAGE_SIZE) 41 #if ARCH_BITS == 64 42 # define VBOX_MAX_ALLOC_PAGE_COUNT (_512M / PAGE_SIZE) 43 #else 44 # define VBOX_MAX_ALLOC_PAGE_COUNT (_256M / PAGE_SIZE) 45 #endif 42 46 43 47 /** @def VBOX_WITH_PAGE_SHARING -
trunk/include/VBox/vmm/iom.h
r63682 r64115 350 350 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallback); 351 351 VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange); 352 VMMR3_INT_DECL(int) IOMR3MmioExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRange, 353 uint32_t fFlags, const char *pszDesc, 354 RTR3PTR pvUserR3, 355 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR3, 356 R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR3, 357 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR3, 358 RTR0PTR pvUserR0, 359 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR0, 360 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR0, 361 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR0, 362 RTRCPTR pvUserRC, 363 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackRC, 364 RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackRC, 365 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackRC); 366 VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys); 367 VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys); 368 VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser); 369 352 370 VMMR3_INT_DECL(VBOXSTRICTRC) IOMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict); 353 371 -
trunk/include/VBox/vmm/pdmdev.h
r63701 r64115 808 808 809 809 /** 810 * Checks if the given address is an MMIO2 base address or not.810 * Checks if the given address is an MMIO2 or pre-registered MMIO base address. 811 811 * 812 812 * @returns true/false accordingly. … … 814 814 * @param pOwner The owner of the memory, optional. 815 815 * @param GCPhys The address to check. 816 */ 817 DECLR3CALLBACKMEMBER(bool, pfnIsMMIO2Base,(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys)); 816 * @sa PGMR3PhysMMIOExIsBase 817 */ 818 DECLR3CALLBACKMEMBER(bool, pfnIsMMIOExBase,(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys)); 818 819 819 820 /** … … 867 868 868 869 /** Current PDMPCIHLPR3 version number. */ 869 #define PDM_PCIHLPR3_VERSION PDM_VERSION_MAKE(0xfffb, 3, 0)870 #define PDM_PCIHLPR3_VERSION PDM_VERSION_MAKE(0xfffb, 3, 1) 870 871 871 872 … … 2525 2526 2526 2527 /** 2527 * Deregisters and frees a MMIO 2 region.2528 * Deregisters and frees a MMIO or MMIO2 region. 2528 2529 * 2529 2530 * Any physical (and virtual) access handlers registered for the region must 2530 * be deregistered before calling this function .2531 * be deregistered before calling this function (MMIO2 only). 2531 2532 * 2532 2533 * @returns VBox status code. … … 2535 2536 * @thread EMT. 2536 2537 */ 2537 DECLR3CALLBACKMEMBER(int, pfnMMIO2Deregister,(PPDMDEVINS pDevIns, uint32_t iRegion)); 2538 2539 /** 2540 * Maps a MMIO2 region into the physical memory space. 2541 * 2542 * A MMIO2 range may overlap with base memory if a lot of RAM 2543 * is configured for the VM, in which case we'll drop the base 2544 * memory pages. Presently we will make no attempt to preserve 2545 * anything that happens to be present in the base memory that 2546 * is replaced, this is of course incorrect but it's too much 2547 * effort. 2538 DECLR3CALLBACKMEMBER(int, pfnMMIOExDeregister,(PPDMDEVINS pDevIns, uint32_t iRegion)); 2539 2540 /** 2541 * Maps a MMIO or MMIO2 region into the physical memory space. 2542 * 2543 * A MMIO2 range or a pre-registered MMIO range may overlap with base memory if 2544 * a lot of RAM is configured for the VM, in which case we'll drop the base 2545 * memory pages. Presently we will make no attempt to preserve anything that 2546 * happens to be present in the base memory that is replaced, this is of course 2547 * incorrect but it's too much effort. 2548 2548 * 2549 2549 * @returns VBox status code. … … 2553 2553 * @thread EMT. 2554 2554 */ 2555 DECLR3CALLBACKMEMBER(int, pfnMMIO 2Map,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));2556 2557 /** 2558 * Unmaps a MMIO 2 region previously mapped using pfnMMIO2Map.2555 DECLR3CALLBACKMEMBER(int, pfnMMIOExMap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)); 2556 2557 /** 2558 * Unmaps a MMIO or MMIO2 region previously mapped using pfnMMIOExMap. 2559 2559 * 2560 2560 * @returns VBox status code. … … 2564 2564 * @thread EMT. 2565 2565 */ 2566 DECLR3CALLBACKMEMBER(int, pfnMMIO 2Unmap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys));2566 DECLR3CALLBACKMEMBER(int, pfnMMIOExUnmap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)); 2567 2567 2568 2568 /** … … 3607 3607 DECLR3CALLBACKMEMBER(VMRESUMEREASON, pfnVMGetResumeReason,(PPDMDEVINS pDevIns)); 3608 3608 3609 /** 3610 * Pre-register a Memory Mapped I/O (MMIO) region. 3611 * 3612 * This API must be used for large PCI MMIO regions, as it handles these much 3613 * more efficiently and with greater flexibility when it comes to heap usage. 3614 * It is only available during device construction. 3615 * 3616 * To map and unmap the pre-registered region into and our of guest address 3617 * space, use the PDMDevHlpMMIOExMap and PDMDevHlpMMIOExUnmap helpers. 3618 * 3619 * You may call PDMDevHlpMMIOExDeregister from the destructor to free the region 3620 * for reasons of symmetry, but it will be automatically deregistered by PDM 3621 * once the destructor returns. 3622 * 3623 * @returns VBox status. 3624 * @param pDevIns The device instance to register the MMIO with. 3625 * @param iRegion The region number. 3626 * @param cbRegion The size of the range (in bytes). 3627 * @param fFlags Flags, IOMMMIO_FLAGS_XXX. 3628 * @param pszDesc Pointer to description string. This must not be freed. 3629 * @param pvUser Ring-3 user argument. 3630 * @param pfnWrite Pointer to function which is gonna handle Write operations. 3631 * @param pfnRead Pointer to function which is gonna handle Read operations. 3632 * @param pfnFill Pointer to function which is gonna handle Fill/memset operations. (optional) 3633 * @param pvUserR0 Ring-0 user argument. Optional. 3634 * @param pszWriteR0 The name of the ring-0 write handler method. Optional. 3635 * @param pszReadR0 The name of the ring-0 read handler method. Optional. 3636 * @param pszFillR0 The name of the ring-0 fill/memset handler method. Optional. 3637 * @param pvUserRC Raw-mode context user argument. Optional. If 3638 * unsigned value is 0x10000 or higher, it will be 3639 * automatically relocated with the hypervisor 3640 * guest mapping. 3641 * @param pszWriteRC The name of the raw-mode context write handler method. Optional. 3642 * @param pszReadRC The name of the raw-mode context read handler method. Optional. 3643 * @param pszFillRC The name of the raw-mode context fill/memset handler method. Optional. 3644 * @thread EMT 3645 * 3646 * @remarks Caller enters the device critical section prior to invoking the 3647 * registered callback methods. 3648 * @sa PDMDevHlpMMIOExMap, PDMDevHlpMMIOExUnmap, PDMDevHlpMMIOExDeregister, 3649 * PDMDevHlpMMIORegisterEx 3650 */ 3651 DECLR3CALLBACKMEMBER(int, pfnMMIOExPreRegister,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion, 3652 uint32_t fFlags, const char *pszDesc, RTHCPTR pvUser, 3653 PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill, 3654 RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0, 3655 RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC)); 3609 3656 3610 3657 /** Space reserved for future members. … … 3616 3663 DECLR3CALLBACKMEMBER(void, pfnReserved5,(void)); 3617 3664 DECLR3CALLBACKMEMBER(void, pfnReserved6,(void)); 3618 DECLR3CALLBACKMEMBER(void, pfnReserved7,(void));3619 /*DECLR3CALLBACKMEMBER(void, pfnReserved8,(void));3665 /*DECLR3CALLBACKMEMBER(void, pfnReserved7,(void)); 3666 DECLR3CALLBACKMEMBER(void, pfnReserved8,(void)); 3620 3667 DECLR3CALLBACKMEMBER(void, pfnReserved9,(void));*/ 3621 3668 /*DECLR3CALLBACKMEMBER(void, pfnReserved10,(void));*/ … … 3819 3866 /** Current PDMDEVHLPR3 version number. */ 3820 3867 /* 5.0 is (18, 0) so the next version for trunk has to be (19, 0)! */ 3821 #define PDM_DEVHLPR3_VERSION PDM_VERSION_MAKE(0xffe7, 17, 0)3868 #define PDM_DEVHLPR3_VERSION PDM_VERSION_MAKE(0xffe7, 17, 1) 3822 3869 3823 3870 … … 4644 4691 4645 4692 /** 4646 * @copydoc PDMDEVHLPR3::pfnMMIO2Deregister 4647 */ 4648 DECLINLINE(int) PDMDevHlpMMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion) 4649 { 4650 return pDevIns->pHlpR3->pfnMMIO2Deregister(pDevIns, iRegion); 4651 } 4652 4653 /** 4654 * @copydoc PDMDEVHLPR3::pfnMMIO2Map 4655 */ 4656 DECLINLINE(int) PDMDevHlpMMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4657 { 4658 return pDevIns->pHlpR3->pfnMMIO2Map(pDevIns, iRegion, GCPhys); 4659 } 4660 4661 /** 4662 * @copydoc PDMDEVHLPR3::pfnMMIO2Unmap 4663 */ 4664 DECLINLINE(int) PDMDevHlpMMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4665 { 4666 return pDevIns->pHlpR3->pfnMMIO2Unmap(pDevIns, iRegion, GCPhys); 4693 * @copydoc PDMDEVHLPR3::pfnMMIOExPreRegister 4694 */ 4695 DECLINLINE(int) PDMDevHlpMMIOExPreRegister(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion, 4696 uint32_t fFlags, const char *pszDesc, RTHCPTR pvUser, 4697 PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill, 4698 RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0, 4699 RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC) 4700 { 4701 return pDevIns->pHlpR3->pfnMMIOExPreRegister(pDevIns, iRegion, cbRegion, fFlags, pszDesc, 4702 pvUser, pfnWrite, pfnRead, pfnFill, 4703 pvUserR0, pszWriteR0, pszReadR0, pszFillR0, 4704 pvUserRC, pszWriteRC, pszReadRC, pszFillRC); 4705 } 4706 4707 /** 4708 * @copydoc PDMDEVHLPR3::pfnMMIOExDeregister 4709 */ 4710 DECLINLINE(int) PDMDevHlpMMIOExDeregister(PPDMDEVINS pDevIns, uint32_t iRegion) 4711 { 4712 return pDevIns->pHlpR3->pfnMMIOExDeregister(pDevIns, iRegion); 4713 } 4714 4715 /** 4716 * @copydoc PDMDEVHLPR3::pfnMMIOExMap 4717 */ 4718 DECLINLINE(int) PDMDevHlpMMIOExMap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4719 { 4720 return pDevIns->pHlpR3->pfnMMIOExMap(pDevIns, iRegion, GCPhys); 4721 } 4722 4723 /** 4724 * @copydoc PDMDEVHLPR3::pfnMMIOExUnmap 4725 */ 4726 DECLINLINE(int) PDMDevHlpMMIOExUnmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4727 { 4728 return pDevIns->pHlpR3->pfnMMIOExUnmap(pDevIns, iRegion, GCPhys); 4667 4729 } 4668 4730 -
trunk/include/VBox/vmm/pgm.h
r63226 r64115 742 742 VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb); 743 743 VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc); 744 VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion); 745 VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 746 VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 747 VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys); 748 VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys); 749 VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr); 744 VMMR3DECL(int) PGMR3PhysMMIOExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType, 745 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc); 746 VMMR3DECL(int) PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion); 747 VMMR3DECL(int) PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 748 VMMR3DECL(int) PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 749 VMMR3DECL(bool) PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys); 750 VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys); 751 VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr); 750 752 751 753 /** @name PGMR3PhysRegisterRom flags. -
trunk/src/VBox/Devices/Bus/DevPCI.cpp
r63685 r64115 319 319 RTGCPHYS GCPhysBase = r->addr; 320 320 int rc; 321 if (pBus->pPciHlpR3->pfnIsMMIO 2Base(pBus->pDevInsR3, d->pDevIns, GCPhysBase))321 if (pBus->pPciHlpR3->pfnIsMMIOExBase(pBus->pDevInsR3, d->pDevIns, GCPhysBase)) 322 322 { 323 323 /* unmap it. */ 324 324 rc = r->map_func(d, i, NIL_RTGCPHYS, r->size, (PCIADDRESSSPACE)(r->type)); 325 325 AssertRC(rc); 326 rc = PDMDevHlpMMIO 2Unmap(d->pDevIns, i, GCPhysBase);326 rc = PDMDevHlpMMIOExUnmap(d->pDevIns, i, GCPhysBase); 327 327 } 328 328 else -
trunk/src/VBox/Devices/Bus/DevPciIch9.cpp
r63879 r64115 841 841 { 842 842 RTGCPHYS GCPhysBase = pRegion->addr; 843 if (pBus->pPciHlpR3->pfnIsMMIO 2Base(pBus->pDevInsR3, pDev->pDevIns, GCPhysBase))843 if (pBus->pPciHlpR3->pfnIsMMIOExBase(pBus->pDevInsR3, pDev->pDevIns, GCPhysBase)) 844 844 { 845 845 /* unmap it. */ 846 846 rc = pRegion->map_func(pDev, iRegion, NIL_RTGCPHYS, pRegion->size, (PCIADDRESSSPACE)(pRegion->type)); 847 847 AssertRC(rc); 848 rc = PDMDevHlpMMIO 2Unmap(pDev->pDevIns, iRegion, GCPhysBase);848 rc = PDMDevHlpMMIOExUnmap(pDev->pDevIns, iRegion, GCPhysBase); 849 849 } 850 850 else -
trunk/src/VBox/Devices/GIMDev/GIMDev.cpp
r62890 r64115 340 340 for (uint32_t i = 0; i < cRegions; i++, pCur++) 341 341 { 342 int rc = PDMDevHlpMMIO 2Deregister(pDevIns, pCur->iRegion);342 int rc = PDMDevHlpMMIOExDeregister(pDevIns, pCur->iRegion); 343 343 if (RT_FAILURE(rc)) 344 344 return rc; -
trunk/src/VBox/Devices/Graphics/DevVGA-SVGA.cpp
r63690 r64115 3814 3814 * Mapping the FIFO RAM. 3815 3815 */ 3816 rc = PDMDevHlpMMIO 2Map(pDevIns, iRegion, GCPhysAddress);3816 rc = PDMDevHlpMMIOExMap(pDevIns, iRegion, GCPhysAddress); 3817 3817 AssertRC(rc); 3818 3818 -
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r63690 r64115 5432 5432 * Mapping the VRAM. 5433 5433 */ 5434 rc = PDMDevHlpMMIO 2Map(pDevIns, iRegion, GCPhysAddress);5434 rc = PDMDevHlpMMIOExMap(pDevIns, iRegion, GCPhysAddress); 5435 5435 AssertRC(rc); 5436 5436 if (RT_SUCCESS(rc)) -
trunk/src/VBox/Devices/Network/DevE1000.cpp
r63690 r64115 48 48 49 49 50 /* Options *******************************************************************/ 50 /********************************************************************************************************************************* 51 * Defined Constants And Macros * 52 *********************************************************************************************************************************/ 53 /** @name E1000 Build Options 54 * @{ */ 51 55 /** @def E1K_INIT_RA0 52 56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter … … 117 121 */ 118 122 #define E1K_WITH_RXD_CACHE 123 /** @def E1K_WITH_PREREG_MMIO 124 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is 125 * currently only done for testing the relateted PDM, IOM and PGM code. */ 126 //#define E1K_WITH_PREREG_MMIO 127 /* @} */ 119 128 /* End of Options ************************************************************/ 120 129 … … 6113 6122 * byte enables. 6114 6123 */ 6124 #ifdef E1K_WITH_PREREG_MMIO 6125 pThis->addrMMReg = GCPhysAddress; 6126 if (GCPhysAddress == NIL_RTGCPHYS) 6127 rc = VINF_SUCCESS; 6128 else 6129 { 6130 Assert(!(GCPhysAddress & 7)); 6131 rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress); 6132 } 6133 #else 6115 6134 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7)); 6116 6135 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/, … … 6123 6142 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/, 6124 6143 "e1kMMIOWrite", "e1kMMIORead"); 6144 #endif 6125 6145 break; 6126 6146 … … 7641 7661 if (RT_FAILURE(rc)) 7642 7662 return rc; 7663 #ifdef E1K_WITH_PREREG_MMIO 7664 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000", 7665 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/, 7666 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL, 7667 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/, 7668 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL, 7669 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/); 7670 AssertLogRelRCReturn(rc, rc); 7671 #endif 7643 7672 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */ 7644 7673 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap); -
trunk/src/VBox/Devices/Network/DevPCNet.cpp
r63690 r64115 4418 4418 { 4419 4419 /* drop this dummy region */ 4420 rc = PDMDevHlpMMIO 2Deregister(pDevIns, 2);4420 rc = PDMDevHlpMMIOExDeregister(pDevIns, 2); 4421 4421 pThis->fSharedRegion = false; 4422 4422 } -
trunk/src/VBox/Devices/Samples/DevPlayground.cpp
r63910 r64115 80 80 * @callback_method_impl{FNPCIIOREGIONMAP} 81 81 */ 82 static DECLCALLBACK(int) devPlaygroundMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)83 { 84 NOREF(enmType); 85 int rc;82 static DECLCALLBACK(int) 83 devPlaygroundMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType) 84 { 85 RT_NOREF(enmType, cb); 86 86 87 87 switch (iRegion) 88 88 { 89 89 case 0: 90 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL,91 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,92 devPlaygroundMMIOWrite, devPlaygroundMMIORead, "PG-BAR0");93 break;94 90 case 2: 95 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL, 96 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, 97 devPlaygroundMMIOWrite, devPlaygroundMMIORead, "PG-BAR2"); 98 break; 91 Assert(enmType == (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64)); 92 return PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress); 93 99 94 default: 100 95 /* We should never get here */ 101 AssertMsgFailed(("Invalid PCI region param in map callback")); 102 rc = VERR_INTERNAL_ERROR; 96 AssertMsgFailedReturn(("Invalid PCI region param in map callback"), VERR_INTERNAL_ERROR); 103 97 } 104 return rc;105 106 98 } 107 99 … … 153 145 if (RT_FAILURE(rc)) 154 146 return rc; 147 /* First region. */ 155 148 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, 8*_1G64, 156 149 (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64), 157 150 devPlaygroundMap); 158 if (RT_FAILURE(rc)) 159 return rc; 160 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, 8*_1G64, 151 AssertLogRelRCReturn(rc, rc); 152 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, 8*_1G64, IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, "PG-BAR0", 153 NULL /*pvUser*/, devPlaygroundMMIOWrite, devPlaygroundMMIORead, NULL /*pfnFill*/, 154 NIL_RTR0PTR /*pvUserR0*/, NULL /*pszWriteR0*/, NULL /*pszReadR0*/, NULL /*pszFillR0*/, 155 NIL_RTRCPTR /*pvUserRC*/, NULL /*pszWriteRC*/, NULL /*pszReadRC*/, NULL /*pszFillRC*/); 156 AssertLogRelRCReturn(rc, rc); 157 158 /* Second region. */ 159 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, 64*_1G64, 161 160 (PCIADDRESSSPACE)(PCI_ADDRESS_SPACE_MEM | PCI_ADDRESS_SPACE_BAR64), 162 161 devPlaygroundMap); 163 if (RT_FAILURE(rc)) 164 return rc; 162 AssertLogRelRCReturn(rc, rc); 163 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 2, 64*_1G64, IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, "PG-BAR2", 164 NULL /*pvUser*/, devPlaygroundMMIOWrite, devPlaygroundMMIORead, NULL /*pfnFill*/, 165 NIL_RTR0PTR /*pvUserR0*/, NULL /*pszWriteR0*/, NULL /*pszReadR0*/, NULL /*pszFillR0*/, 166 NIL_RTRCPTR /*pvUserRC*/, NULL /*pszWriteRC*/, NULL /*pszReadRC*/, NULL /*pszFillRC*/); 167 AssertLogRelRCReturn(rc, rc); 165 168 166 169 return VINF_SUCCESS; -
trunk/src/VBox/Devices/VMMDev/VMMDev.cpp
r63690 r64115 2897 2897 pThis->GCPhysVMMDevRAM = GCPhysAddress; 2898 2898 Assert(pThis->GCPhysVMMDevRAM == GCPhysAddress); 2899 rc = PDMDevHlpMMIO 2Map(pPciDev->pDevIns, iRegion, GCPhysAddress);2899 rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress); 2900 2900 } 2901 2901 else … … 2919 2919 pThis->GCPhysVMMDevHeap = GCPhysAddress; 2920 2920 Assert(pThis->GCPhysVMMDevHeap == GCPhysAddress); 2921 rc = PDMDevHlpMMIO 2Map(pPciDev->pDevIns, iRegion, GCPhysAddress);2921 rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress); 2922 2922 if (RT_SUCCESS(rc)) 2923 2923 rc = PDMDevHlpRegisterVMMDevHeap(pPciDev->pDevIns, GCPhysAddress, pThis->pVMMDevHeapR3, VMMDEV_HEAP_SIZE); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r63465 r64115 123 123 124 124 125 /** 126 * Creates an physical access handler. 127 * 128 * @returns VBox status code. 129 * @retval VINF_SUCCESS when successfully installed. 130 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because 131 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been 132 * flagged together with a pool clearing. 133 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing 134 * one. A debug assertion is raised. 135 * 136 * @param pVM The cross context VM structure. 137 * @param hType The handler type registration handle. 138 * @param pvUserR3 User argument to the R3 handler. 139 * @param pvUserR0 User argument to the R0 handler. 140 * @param pvUserRC User argument to the RC handler. This can be a value 141 * less that 0x10000 or a (non-null) pointer that is 142 * automatically relocated. 143 * @param pszDesc Description of this handler. If NULL, the type 144 * description will be used instead. 145 * @param ppPhysHandler Where to return the access handler structure on 146 * success. 147 */ 148 int pgmHandlerPhysicalExCreate(PVM pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, 149 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler) 150 { 151 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType); 152 Log(("pgmHandlerPhysicalExCreate: pvUserR3=%RHv pvUserR0=%RHv pvUserGC=%RRv hType=%#x (%d, %s) pszDesc=%RHv:%s\n", 153 pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc))); 154 155 /* 156 * Validate input. 157 */ 158 AssertPtr(ppPhysHandler); 159 AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE); 160 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000 161 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC, 162 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC), 163 VERR_INVALID_PARAMETER); 164 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000 165 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0, 166 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0), 167 VERR_INVALID_PARAMETER); 168 169 /* 170 * Allocate and initialize the new entry. 171 */ 172 PPGMPHYSHANDLER pNew; 173 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew); 174 if (RT_SUCCESS(rc)) 175 { 176 pNew->Core.Key = NIL_RTGCPHYS; 177 pNew->Core.KeyLast = NIL_RTGCPHYS; 178 pNew->cPages = 0; 179 pNew->cAliasedPages = 0; 180 pNew->cTmpOffPages = 0; 181 pNew->pvUserR3 = pvUserR3; 182 pNew->pvUserR0 = pvUserR0; 183 pNew->pvUserRC = pvUserRC; 184 pNew->hType = hType; 185 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc; 186 pgmHandlerPhysicalTypeRetain(pVM, pType); 187 *ppPhysHandler = pNew; 188 return VINF_SUCCESS; 189 } 190 191 return rc; 192 } 193 194 195 /** 196 * Register a access handler for a physical range. 197 * 198 * @returns VBox status code. 199 * @retval VINF_SUCCESS when successfully installed. 200 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because 201 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been 202 * flagged together with a pool clearing. 203 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing 204 * one. A debug assertion is raised. 205 * 206 * @param pVM The cross context VM structure. 207 * @param pPhysHandler The physical handler. 208 * @param GCPhys Start physical address. 209 * @param GCPhysLast Last physical address. (inclusive) 210 */ 211 int pgmHandlerPhysicalExRegister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast) 212 { 213 /* 214 * Validate input. 215 */ 216 AssertPtr(pPhysHandler); 217 #if defined(LOG_ENABLED) || defined(VBOX_STRICT) 218 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, pPhysHandler->hType); 219 Assert(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC); 220 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", 221 GCPhys, GCPhysLast, pPhysHandler->hType, pType->enmKind, R3STRING(pType->pszDesc), pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc))); 222 #endif 223 AssertReturn(pPhysHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER); 224 225 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER); 226 switch (pType->enmKind) 227 { 228 case PGMPHYSHANDLERKIND_WRITE: 229 break; 230 case PGMPHYSHANDLERKIND_MMIO: 231 case PGMPHYSHANDLERKIND_ALL: 232 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */ 233 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER); 234 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER); 235 break; 236 default: 237 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind)); 238 return VERR_INVALID_PARAMETER; 239 } 240 241 /* 242 * We require the range to be within registered ram. 243 * There is no apparent need to support ranges which cover more than one ram range. 244 */ 245 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 246 if ( !pRam 247 || GCPhysLast < pRam->GCPhys 248 || GCPhys > pRam->GCPhysLast) 249 { 250 #ifdef IN_RING3 251 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL); 252 #endif 253 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast)); 254 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE; 255 } 256 257 /* 258 * Try insert into list. 259 */ 260 pPhysHandler->Core.Key = GCPhys; 261 pPhysHandler->Core.KeyLast = GCPhysLast; 262 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 263 264 pgmLock(pVM); 265 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core)) 266 { 267 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam); 268 if (rc == VINF_PGM_SYNC_CR3) 269 rc = VINF_PGM_GCPHYS_ALIASED; 270 pgmUnlock(pVM); 271 272 #ifdef VBOX_WITH_REM 273 # ifndef IN_RING3 274 REMNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3); 275 # else 276 REMR3NotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3); 277 # endif 278 #endif 279 if (rc != VINF_SUCCESS) 280 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast)); 281 return rc; 282 } 283 pgmUnlock(pVM); 284 285 pPhysHandler->Core.Key = NIL_RTGCPHYS; 286 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS; 287 288 #if defined(IN_RING3) && defined(VBOX_STRICT) 289 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL); 290 #endif 291 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n", 292 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc))); 293 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT; 294 } 295 125 296 126 297 /** … … 154 325 GCPhys, GCPhysLast, pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc))); 155 326 156 /*157 * Validate input.158 */159 AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE);160 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);161 switch (pType->enmKind)162 {163 case PGMPHYSHANDLERKIND_WRITE:164 break;165 case PGMPHYSHANDLERKIND_MMIO:166 case PGMPHYSHANDLERKIND_ALL:167 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */168 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);169 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);170 break;171 default:172 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));173 return VERR_INVALID_PARAMETER;174 }175 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000176 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,177 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),178 VERR_INVALID_PARAMETER);179 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000180 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,181 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),182 VERR_INVALID_PARAMETER);183 184 /*185 * We require the range to be within registered ram.186 * There is no apparent need to support ranges which cover more than one ram range.187 */188 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);189 if ( !pRam190 || GCPhysLast < pRam->GCPhys191 || GCPhys > pRam->GCPhysLast)192 {193 #ifdef IN_RING3194 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);195 #endif196 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));197 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;198 }199 200 /*201 * Allocate and initialize the new entry.202 */203 327 PPGMPHYSHANDLER pNew; 204 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew); 205 if (RT_FAILURE(rc)) 206 return rc; 207 208 pNew->Core.Key = GCPhys; 209 pNew->Core.KeyLast = GCPhysLast; 210 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 211 pNew->cAliasedPages = 0; 212 pNew->cTmpOffPages = 0; 213 pNew->pvUserR3 = pvUserR3; 214 pNew->pvUserR0 = pvUserR0; 215 pNew->pvUserRC = pvUserRC; 216 pNew->hType = hType; 217 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc; 218 pgmHandlerPhysicalTypeRetain(pVM, pType); 219 220 pgmLock(pVM); 221 222 /* 223 * Try insert into list. 224 */ 225 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)) 226 { 227 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam); 228 if (rc == VINF_PGM_SYNC_CR3) 229 rc = VINF_PGM_GCPHYS_ALIASED; 230 pgmUnlock(pVM); 231 #ifdef VBOX_WITH_REM 232 # ifndef IN_RING3 233 REMNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3); 234 # else 235 REMR3NotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3); 236 # endif 237 #endif 238 if (rc != VINF_SUCCESS) 239 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast)); 240 return rc; 241 } 242 243 pgmUnlock(pVM); 244 245 #if defined(IN_RING3) && defined(VBOX_STRICT) 246 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL); 247 #endif 248 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n", 249 GCPhys, GCPhysLast, R3STRING(pszDesc), R3STRING(pType->pszDesc))); 250 pgmHandlerPhysicalTypeRelease(pVM, pType); 251 MMHyperFree(pVM, pNew); 252 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT; 328 int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pNew); 329 if (RT_SUCCESS(rc)) 330 { 331 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast); 332 if (RT_SUCCESS(rc)) 333 return rc; 334 pgmHandlerPhysicalExDestroy(pVM, pNew); 335 } 336 return rc; 253 337 } 254 338 … … 313 397 314 398 /** 315 * Register a physical page access handler.399 * Deregister a physical page access handler. 316 400 * 317 401 * @returns VBox status code. 318 * @param pVM The cross context VM structure. 319 * @param GCPhys Start physical address. 320 */ 321 VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys) 322 { 323 /* 324 * Find the handler. 402 * @param pVM The cross context VM structure. 403 * @param pPhysHandler The handler to deregister (but not free). 404 */ 405 int pgmHandlerPhysicalExDeregister(PVM pVM, PPGMPHYSHANDLER pPhysHandler) 406 { 407 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n", 408 pPhysHandler->Core.Key, pPhysHandler->Core.KeyLast, R3STRING(pPhysHandler->pszDesc))); 409 AssertReturn(pPhysHandler->Core.Key != NIL_RTGCPHYS, VERR_PGM_HANDLER_NOT_FOUND); 410 411 /* 412 * Remove the handler from the tree. 325 413 */ 326 414 pgmLock(pVM); 327 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 328 if (pCur) 329 { 330 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n", pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc))); 331 415 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, 416 pPhysHandler->Core.Key); 417 if (pRemoved == pPhysHandler) 418 { 332 419 /* 333 420 * Clear the page bits, notify the REM about this change and clear 334 421 * the cache. 335 422 */ 336 pgmHandlerPhysicalResetRamFlags(pVM, p Cur);337 pgmHandlerPhysicalDeregisterNotifyREM(pVM, p Cur);423 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler); 424 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pPhysHandler); 338 425 pVM->pgm.s.pLastPhysHandlerR0 = 0; 339 426 pVM->pgm.s.pLastPhysHandlerR3 = 0; 340 427 pVM->pgm.s.pLastPhysHandlerRC = 0; 341 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType); 342 MMHyperFree(pVM, pCur); 428 429 pPhysHandler->Core.Key = NIL_RTGCPHYS; 430 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS; 431 343 432 pgmUnlock(pVM); 433 344 434 return VINF_SUCCESS; 345 435 } 436 437 /* 438 * Both of the failure conditions here are considered internal processing 439 * errors because they can only be caused by race conditions or corruption. 440 * If we ever need to handle concurrent deregistration, we have to move 441 * the NIL_RTGCPHYS check inside the PGM lock. 442 */ 443 if (pRemoved) 444 RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pRemoved->Core); 445 446 pgmUnlock(pVM); 447 448 if (!pRemoved) 449 AssertMsgFailed(("Didn't find range starting at %RGp in the tree!\n", pPhysHandler->Core.Key)); 450 else 451 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n", 452 pPhysHandler->Core.Key, pRemoved, pPhysHandler)); 453 return VERR_PGM_HANDLER_IPE_1; 454 } 455 456 457 /** 458 * Destroys (frees) a physical handler. 459 * 460 * The caller must deregister it before destroying it! 461 * 462 * @returns VBox status code. 463 * @param pVM The cross context VM structure. 464 * @param pHandler The handler to free. NULL if ignored. 465 */ 466 int pgmHandlerPhysicalExDestroy(PVM pVM, PPGMPHYSHANDLER pHandler) 467 { 468 if (pHandler) 469 { 470 AssertPtr(pHandler); 471 AssertReturn(pHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER); 472 PGMHandlerPhysicalTypeRelease(pVM, pHandler->hType); 473 MMHyperFree(pVM, pHandler); 474 } 475 return VINF_SUCCESS; 476 } 477 478 479 /** 480 * Deregister a physical page access handler. 481 * 482 * @returns VBox status code. 483 * @param pVM The cross context VM structure. 484 * @param GCPhys Start physical address. 485 */ 486 VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys) 487 { 488 /* 489 * Find the handler. 490 */ 491 pgmLock(pVM); 492 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 493 if (pRemoved) 494 { 495 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n", 496 pRemoved->Core.Key, pRemoved->Core.KeyLast, R3STRING(pRemoved->pszDesc))); 497 498 /* 499 * Clear the page bits, notify the REM about this change and clear 500 * the cache. 501 */ 502 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved); 503 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pRemoved); 504 pVM->pgm.s.pLastPhysHandlerR0 = 0; 505 pVM->pgm.s.pLastPhysHandlerR3 = 0; 506 pVM->pgm.s.pLastPhysHandlerRC = 0; 507 508 pgmUnlock(pVM); 509 510 pRemoved->Core.Key = NIL_RTGCPHYS; 511 pgmHandlerPhysicalExDestroy(pVM, pRemoved); 512 return VINF_SUCCESS; 513 } 514 346 515 pgmUnlock(pVM); 347 516 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r62606 r64115 1229 1229 pPage->s.idPage, pPage->s.uStateY), 1230 1230 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1231 PPGM MMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];1231 PPGMREGMMIORANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1]; 1232 1232 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1233 1233 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r63648 r64115 634 634 * Map the MMIO2 region over the specified guest-physical address. 635 635 */ 636 int rc = PDMDevHlpMMIO 2Map(pDevIns, pRegion->iRegion, GCPhysRegion);636 int rc = PDMDevHlpMMIOExMap(pDevIns, pRegion->iRegion, GCPhysRegion); 637 637 if (RT_SUCCESS(rc)) 638 638 { -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r63682 r64115 1654 1654 1655 1655 /** 1656 * Pre-Registers a MMIO region. 1657 * 1658 * The rest of of the manipulation of this region goes thru the PGMPhysMMIOEx* 1659 * APIs: PGMR3PhysMMIOExMap, PGMR3PhysMMIOExUnmap, PGMR3PhysMMIOExDeregister 1660 * 1661 * @returns VBox status code. 1662 * @param pVM Pointer to the cross context VM structure. 1663 * @param pDevIns The device. 1664 * @param iRegion The region number. 1665 * @param cbRegion The size of the MMIO region. Must be a multiple 1666 * of X86_PAGE_SIZE 1667 * @param fFlags Flags, see IOMMMIO_FLAGS_XXX. 1668 * @param pszDesc Pointer to description string. This must not be 1669 * freed. 1670 * @param pvUserR3 Ring-3 user pointer. 1671 * @param pfnWriteCallbackR3 Callback for handling writes, ring-3. Mandatory. 1672 * @param pfnReadCallbackR3 Callback for handling reads, ring-3. Mandatory. 1673 * @param pfnFillCallbackR3 Callback for handling fills, ring-3. Optional. 1674 * @param pvUserR0 Ring-0 user pointer. 1675 * @param pfnWriteCallbackR0 Callback for handling writes, ring-0. Optional. 1676 * @param pfnReadCallbackR0 Callback for handling reads, ring-0. Optional. 1677 * @param pfnFillCallbackR0 Callback for handling fills, ring-0. Optional. 1678 * @param pvUserRC Raw-mode context user pointer. This will be 1679 * relocated with the hypervisor guest mapping if 1680 * the unsigned integer value is 0x10000 or above. 1681 * @param pfnWriteCallbackRC Callback for handling writes, RC. Optional. 1682 * @param pfnReadCallbackRC Callback for handling reads, RC. Optional. 1683 * @param pfnFillCallbackRC Callback for handling fills, RC. Optional. 1684 */ 1685 VMMR3_INT_DECL(int) IOMR3MmioExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion, 1686 uint32_t fFlags, const char *pszDesc, 1687 RTR3PTR pvUserR3, 1688 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR3, 1689 R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR3, 1690 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR3, 1691 RTR0PTR pvUserR0, 1692 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR0, 1693 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR0, 1694 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR0, 1695 RTRCPTR pvUserRC, 1696 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackRC, 1697 RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackRC, 1698 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackRC) 1699 { 1700 LogFlow(("IOMR3MmioExPreRegister: pDevIns=%p iRegion=%u cbRegion=%RGp fFlags=%#x pszDesc=%s\n" 1701 " pvUserR3=%RHv pfnWriteCallbackR3=%RHv pfnReadCallbackR3=%RHv pfnFillCallbackR3=%RHv\n" 1702 " pvUserR0=%RHv pfnWriteCallbackR0=%RHv pfnReadCallbackR0=%RHv pfnFillCallbackR0=%RHv\n" 1703 " pvUserRC=%RRv pfnWriteCallbackRC=%RRv pfnReadCallbackRC=%RRv pfnFillCallbackRC=%RRv\n", 1704 pDevIns, iRegion, cbRegion, fFlags, pszDesc, 1705 pvUserR3, pfnWriteCallbackR3, pfnReadCallbackR3, pfnFillCallbackR3, 1706 pvUserR0, pfnWriteCallbackR0, pfnReadCallbackR0, pfnFillCallbackR0, 1707 pvUserRC, pfnWriteCallbackRC, pfnReadCallbackRC, pfnFillCallbackRC)); 1708 1709 /* 1710 * Validate input. 1711 */ 1712 AssertReturn(cbRegion > 0, VERR_INVALID_PARAMETER); 1713 AssertReturn(RT_ALIGN_T(cbRegion, X86_PAGE_SIZE, RTGCPHYS), VERR_INVALID_PARAMETER); 1714 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK) 1715 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD 1716 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD, 1717 ("%#x\n", fFlags), 1718 VERR_INVALID_PARAMETER); 1719 AssertPtrReturn(pfnWriteCallbackR3, VERR_INVALID_POINTER); 1720 AssertPtrReturn(pfnReadCallbackR3, VERR_INVALID_POINTER); 1721 1722 /* 1723 * Allocate new range record and initialize it. 1724 */ 1725 PIOMMMIORANGE pRange; 1726 int rc = MMHyperAlloc(pVM, sizeof(*pRange), 0, MM_TAG_IOM, (void **)&pRange); 1727 if (RT_SUCCESS(rc)) 1728 { 1729 pRange->Core.Key = NIL_RTGCPHYS; 1730 pRange->Core.KeyLast = NIL_RTGCPHYS; 1731 pRange->GCPhys = NIL_RTGCPHYS; 1732 pRange->cb = cbRegion; 1733 pRange->cRefs = 1; /* The PGM reference. */ 1734 pRange->fFlags = fFlags; 1735 1736 pRange->pvUserR3 = pvUserR3; 1737 pRange->pDevInsR3 = pDevIns; 1738 pRange->pfnReadCallbackR3 = pfnReadCallbackR3; 1739 pRange->pfnWriteCallbackR3 = pfnWriteCallbackR3; 1740 pRange->pfnFillCallbackR3 = pfnFillCallbackR3; 1741 pRange->pszDesc = pszDesc; 1742 1743 if (pfnReadCallbackR0 || pfnWriteCallbackR0 || pfnFillCallbackR0) 1744 { 1745 pRange->pvUserR0 = pvUserR0; 1746 pRange->pDevInsR0 = MMHyperCCToR0(pVM, pDevIns); 1747 pRange->pfnReadCallbackR0 = pfnReadCallbackR0; 1748 pRange->pfnWriteCallbackR0 = pfnWriteCallbackR0; 1749 pRange->pfnFillCallbackR0 = pfnFillCallbackR0; 1750 } 1751 1752 if (pfnReadCallbackRC || pfnWriteCallbackRC || pfnFillCallbackRC) 1753 { 1754 pRange->pvUserRC = pvUserRC; 1755 pRange->pDevInsRC = MMHyperCCToRC(pVM, pDevIns); 1756 pRange->pfnReadCallbackRC = pfnReadCallbackRC; 1757 pRange->pfnWriteCallbackRC = pfnWriteCallbackRC; 1758 pRange->pfnFillCallbackRC = pfnFillCallbackRC; 1759 } 1760 1761 /* 1762 * Try register it with PGM. PGM will call us back when it's mapped in 1763 * and out of the guest address space, and once it's destroyed. 1764 */ 1765 rc = PGMR3PhysMMIOExPreRegister(pVM, pDevIns, iRegion, cbRegion, pVM->iom.s.hMmioHandlerType, 1766 pRange, MMHyperR3ToR0(pVM, pRange), MMHyperR3ToRC(pVM, pRange), pszDesc); 1767 if (RT_SUCCESS(rc)) 1768 return VINF_SUCCESS; 1769 1770 MMHyperFree(pVM, pRange); 1771 } 1772 if (pDevIns->iInstance > 0) 1773 MMR3HeapFree((void *)pszDesc); 1774 return rc; 1775 1776 } 1777 1778 1779 /** 1780 * Notfication from PGM that the pre-registered MMIO region has been mapped into 1781 * user address space. 1782 * 1783 * @returns VBox status code. 1784 * @param pVM Pointer to the cross context VM structure. 1785 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister. 1786 * @param GCPhys The mapping address. 1787 * @remarks Called while owning the PGM lock. 1788 */ 1789 VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys) 1790 { 1791 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; 1792 AssertReturn(pRange->GCPhys == NIL_RTGCPHYS, VERR_IOM_MMIO_IPE_1); 1793 1794 IOM_LOCK_EXCL(pVM); 1795 Assert(pRange->GCPhys == NIL_RTGCPHYS); 1796 pRange->GCPhys = GCPhys; 1797 pRange->Core.Key = GCPhys; 1798 pRange->Core.KeyLast = GCPhys + pRange->cb - 1; 1799 if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core)) 1800 { 1801 iomR3FlushCache(pVM); 1802 IOM_UNLOCK_EXCL(pVM); 1803 return VINF_SUCCESS; 1804 } 1805 IOM_UNLOCK_EXCL(pVM); 1806 1807 AssertLogRelMsgFailed(("RTAvlroGCPhysInsert failed on %RGp..%RGp - %s\n", pRange->Core.Key, pRange->Core.KeyLast, pRange->pszDesc)); 1808 pRange->GCPhys = NIL_RTGCPHYS; 1809 pRange->Core.Key = NIL_RTGCPHYS; 1810 pRange->Core.KeyLast = NIL_RTGCPHYS; 1811 return VERR_IOM_MMIO_IPE_2; 1812 } 1813 1814 1815 /** 1816 * Notfication from PGM that the pre-registered MMIO region has been unmapped 1817 * from user address space. 1818 * 1819 * @param pVM Pointer to the cross context VM structure. 1820 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister. 1821 * @param GCPhys The mapping address. 1822 * @remarks Called while owning the PGM lock. 1823 */ 1824 VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys) 1825 { 1826 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; 1827 AssertLogRelReturnVoid(pRange->GCPhys == GCPhys); 1828 1829 IOM_LOCK_EXCL(pVM); 1830 Assert(pRange->GCPhys == GCPhys); 1831 PIOMMMIORANGE pRemoved = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys); 1832 if (pRemoved == pRange) 1833 { 1834 pRange->GCPhys = NIL_RTGCPHYS; 1835 pRange->Core.Key = NIL_RTGCPHYS; 1836 pRange->Core.KeyLast = NIL_RTGCPHYS; 1837 iomR3FlushCache(pVM); 1838 IOM_UNLOCK_EXCL(pVM); 1839 } 1840 else 1841 { 1842 if (pRemoved) 1843 RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRemoved->Core); 1844 IOM_UNLOCK_EXCL(pVM); 1845 AssertLogRelMsgFailed(("RTAvlroGCPhysRemove returned %p instead of %p for %RGp (%s)\n", pRemoved, pRange, pRange->pszDesc)); 1846 } 1847 } 1848 1849 1850 /** 1851 * Notfication from PGM that the pre-registered MMIO region has been mapped into 1852 * user address space. 1853 * 1854 * @param pVM Pointer to the cross context VM structure. 1855 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister. 1856 * @param GCPhys The mapping address. 1857 * @remarks Called while owning the PGM lock. 1858 */ 1859 VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser) 1860 { 1861 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; 1862 AssertLogRelReturnVoid(pRange->GCPhys == NIL_RTGCPHYS); 1863 iomMmioReleaseRange(pVM, pRange); 1864 } 1865 1866 1867 /** 1656 1868 * Handles the unlikely and probably fatal merge cases. 1657 1869 * -
trunk/src/VBox/VMM/VMMR3/PDM.cpp
r62643 r64115 739 739 pdmR3ThreadDestroyDevice(pVM, pDevIns); 740 740 PDMR3QueueDestroyDevice(pVM, pDevIns); 741 PGMR3PhysMMIO 2Deregister(pVM, pDevIns, UINT32_MAX);741 PGMR3PhysMMIOExDeregister(pVM, pDevIns, UINT32_MAX); 742 742 #ifdef VBOX_WITH_PDM_ASYNC_COMPLETION 743 743 pdmR3AsyncCompletionTemplateDestroyDevice(pVM, pDevIns); -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r63685 r64115 444 444 445 445 /** 446 * @ copydoc PDMDEVHLPR3::pfnMMIO2Deregister446 * @interface_method_impl{PDMDEVHLPR3,pfnMMIOExPreRegister} 447 447 */ 448 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion) 448 static DECLCALLBACK(int) 449 pdmR3DevHlp_MMIOExPreRegister(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cbRegion, uint32_t fFlags, const char *pszDesc, 450 RTHCPTR pvUser, PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill, 451 RTR0PTR pvUserR0, const char *pszWriteR0, const char *pszReadR0, const char *pszFillR0, 452 RTRCPTR pvUserRC, const char *pszWriteRC, const char *pszReadRC, const char *pszFillRC) 453 { 454 PDMDEV_ASSERT_DEVINS(pDevIns); 455 PVM pVM = pDevIns->Internal.s.pVMR3; 456 VM_ASSERT_EMT(pVM); 457 LogFlow(("pdmR3DevHlp_MMIOExPreRegister: caller='%s'/%d: iRegion=%#x cbRegion=%#RGp fFlags=%RX32 pszDesc=%p:{%s}\n" 458 " pvUser=%p pfnWrite=%p pfnRead=%p pfnFill=%p\n" 459 " pvUserR0=%p pszWriteR0=%s pszReadR0=%s pszFillR0=%s\n" 460 " pvUserRC=%p pszWriteRC=%s pszReadRC=%s pszFillRC=%s\n", 461 pDevIns->pReg->szName, pDevIns->iInstance, iRegion, cbRegion, fFlags, pszDesc, pszDesc, 462 pvUser, pfnWrite, pfnRead, pfnFill, 463 pvUserR0, pszWriteR0, pszReadR0, pszFillR0, 464 pvUserRC, pszWriteRC, pszReadRC, pszFillRC)); 465 466 /* 467 * Resolve the functions. 468 */ 469 AssertLogRelReturn( (!pszWriteR0 && !pszReadR0 && !pszFillR0) 470 || (pDevIns->pReg->szR0Mod[0] && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0)), 471 VERR_INVALID_PARAMETER); 472 AssertLogRelReturn( (!pszWriteRC && !pszReadRC && !pszFillRC) 473 || (pDevIns->pReg->szRCMod[0] && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)), 474 VERR_INVALID_PARAMETER); 475 476 /* Ring-0 */ 477 int rc; 478 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteR0 = 0; 479 if (pszWriteR0) 480 { 481 rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszWriteR0, &pfnWriteR0); 482 AssertLogRelMsgRCReturn(rc, ("pszWriteR0=%s rc=%Rrc\n", pszWriteR0, rc), rc); 483 } 484 485 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadR0 = 0; 486 if (pszReadR0) 487 { 488 rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszReadR0, &pfnReadR0); 489 AssertLogRelMsgRCReturn(rc, ("pszReadR0=%s rc=%Rrc\n", pszReadR0, rc), rc); 490 } 491 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillR0 = 0; 492 if (pszFillR0) 493 { 494 rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszFillR0, &pfnFillR0); 495 AssertLogRelMsgRCReturn(rc, ("pszFillR0=%s rc=%Rrc\n", pszFillR0, rc), rc); 496 } 497 498 /* Raw-mode */ 499 rc = VINF_SUCCESS; 500 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteRC = 0; 501 if (pszWriteRC) 502 { 503 rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszWriteRC, &pfnWriteRC); 504 AssertLogRelMsgRCReturn(rc, ("pszWriteRC=%s rc=%Rrc\n", pszWriteRC, rc), rc); 505 } 506 507 RCPTRTYPE(PFNIOMMMIOREAD) pfnReadRC = 0; 508 if (pszReadRC) 509 { 510 rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszReadRC, &pfnReadRC); 511 AssertLogRelMsgRCReturn(rc, ("pszReadRC=%s rc=%Rrc\n", pszReadRC, rc), rc); 512 } 513 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillRC = 0; 514 if (pszFillRC) 515 { 516 rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszFillRC, &pfnFillRC); 517 AssertLogRelMsgRCReturn(rc, ("pszFillRC=%s rc=%Rrc\n", pszFillRC, rc), rc); 518 } 519 520 /* 521 * Call IOM to make the registration. 522 */ 523 rc = IOMR3MmioExPreRegister(pVM, pDevIns, iRegion, cbRegion, fFlags, pszDesc, 524 pvUser, pfnWrite, pfnRead, pfnFill, 525 pvUserR0, pfnWriteR0, pfnReadR0, pfnFillR0, 526 pvUserRC, pfnWriteRC, pfnReadRC, pfnFillRC); 527 528 LogFlow(("pdmR3DevHlp_MMIOExPreRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 529 return rc; 530 } 531 532 533 /** 534 * @copydoc PDMDEVHLPR3::pfnMMIOExDeregister 535 */ 536 static DECLCALLBACK(int) pdmR3DevHlp_MMIOExDeregister(PPDMDEVINS pDevIns, uint32_t iRegion) 449 537 { 450 538 PDMDEV_ASSERT_DEVINS(pDevIns); 451 539 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 452 LogFlow(("pdmR3DevHlp_MMIO 2Deregister: caller='%s'/%d: iRegion=%#x\n",540 LogFlow(("pdmR3DevHlp_MMIOExDeregister: caller='%s'/%d: iRegion=%#x\n", 453 541 pDevIns->pReg->szName, pDevIns->iInstance, iRegion)); 454 542 455 543 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER); 456 544 457 int rc = PGMR3PhysMMIO 2Deregister(pDevIns->Internal.s.pVMR3, pDevIns, iRegion);458 459 LogFlow(("pdmR3DevHlp_MMIO 2Deregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));545 int rc = PGMR3PhysMMIOExDeregister(pDevIns->Internal.s.pVMR3, pDevIns, iRegion); 546 547 LogFlow(("pdmR3DevHlp_MMIOExDeregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 460 548 return rc; 461 549 } … … 463 551 464 552 /** 465 * @copydoc PDMDEVHLPR3::pfnMMIO 2Map553 * @copydoc PDMDEVHLPR3::pfnMMIOExMap 466 554 */ 467 static DECLCALLBACK(int) pdmR3DevHlp_MMIO 2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)555 static DECLCALLBACK(int) pdmR3DevHlp_MMIOExMap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 468 556 { 469 557 PDMDEV_ASSERT_DEVINS(pDevIns); 470 558 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 471 LogFlow(("pdmR3DevHlp_MMIO 2Map: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",559 LogFlow(("pdmR3DevHlp_MMIOExMap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n", 472 560 pDevIns->pReg->szName, pDevIns->iInstance, iRegion, GCPhys)); 473 561 474 int rc = PGMR3PhysMMIO 2Map(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);475 476 LogFlow(("pdmR3DevHlp_MMIO 2Map: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));562 int rc = PGMR3PhysMMIOExMap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys); 563 564 LogFlow(("pdmR3DevHlp_MMIOExMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 477 565 return rc; 478 566 } … … 480 568 481 569 /** 482 * @copydoc PDMDEVHLPR3::pfnMMIO 2Unmap570 * @copydoc PDMDEVHLPR3::pfnMMIOExUnmap 483 571 */ 484 static DECLCALLBACK(int) pdmR3DevHlp_MMIO 2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)572 static DECLCALLBACK(int) pdmR3DevHlp_MMIOExUnmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 485 573 { 486 574 PDMDEV_ASSERT_DEVINS(pDevIns); 487 575 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 488 LogFlow(("pdmR3DevHlp_MMIO 2Unmap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n",576 LogFlow(("pdmR3DevHlp_MMIOExUnmap: caller='%s'/%d: iRegion=%#x GCPhys=%#RGp\n", 489 577 pDevIns->pReg->szName, pDevIns->iInstance, iRegion, GCPhys)); 490 578 491 int rc = PGMR3PhysMMIO 2Unmap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys);492 493 LogFlow(("pdmR3DevHlp_MMIO 2Unmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));579 int rc = PGMR3PhysMMIOExUnmap(pDevIns->Internal.s.pVMR3, pDevIns, iRegion, GCPhys); 580 581 LogFlow(("pdmR3DevHlp_MMIOExUnmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 494 582 return rc; 495 583 } … … 3621 3709 pdmR3DevHlp_MMIODeregister, 3622 3710 pdmR3DevHlp_MMIO2Register, 3623 pdmR3DevHlp_MMIO 2Deregister,3624 pdmR3DevHlp_MMIO 2Map,3625 pdmR3DevHlp_MMIO 2Unmap,3711 pdmR3DevHlp_MMIOExDeregister, 3712 pdmR3DevHlp_MMIOExMap, 3713 pdmR3DevHlp_MMIOExUnmap, 3626 3714 pdmR3DevHlp_MMHyperMapMMIO2, 3627 3715 pdmR3DevHlp_MMIO2MapKernel, … … 3699 3787 pdmR3DevHlp_VMGetSuspendReason, 3700 3788 pdmR3DevHlp_VMGetResumeReason, 3701 0,3789 pdmR3DevHlp_MMIOExPreRegister, 3702 3790 0, 3703 3791 0, … … 3874 3962 pdmR3DevHlp_MMIODeregister, 3875 3963 pdmR3DevHlp_MMIO2Register, 3876 pdmR3DevHlp_MMIO 2Deregister,3877 pdmR3DevHlp_MMIO 2Map,3878 pdmR3DevHlp_MMIO 2Unmap,3964 pdmR3DevHlp_MMIOExDeregister, 3965 pdmR3DevHlp_MMIOExMap, 3966 pdmR3DevHlp_MMIOExUnmap, 3879 3967 pdmR3DevHlp_MMHyperMapMMIO2, 3880 3968 pdmR3DevHlp_MMIO2MapKernel, … … 3952 4040 pdmR3DevHlp_VMGetSuspendReason, 3953 4041 pdmR3DevHlp_VMGetResumeReason, 3954 0,4042 pdmR3DevHlp_MMIOExPreRegister, 3955 4043 0, 3956 4044 0, -
trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
r62643 r64115 579 579 } 580 580 581 /** @interface_method_impl{PDMPCIHLPR3,pfnIsMMIO 2Base} */581 /** @interface_method_impl{PDMPCIHLPR3,pfnIsMMIOExBase} */ 582 582 static DECLCALLBACK(bool) pdmR3PciHlp_IsMMIO2Base(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys) 583 583 { 584 584 PDMDEV_ASSERT_DEVINS(pDevIns); 585 585 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 586 bool fRc = PGMR3PhysMMIO 2IsBase(pDevIns->Internal.s.pVMR3, pOwner, GCPhys);587 Log4(("pdmR3PciHlp_IsMMIO 2Base: pOwner=%p GCPhys=%RGp -> %RTbool\n", pOwner, GCPhys, fRc));586 bool fRc = PGMR3PhysMMIOExIsBase(pDevIns->Internal.s.pVMR3, pOwner, GCPhys); 587 Log4(("pdmR3PciHlp_IsMMIOExBase: pOwner=%p GCPhys=%RGp -> %RTbool\n", pOwner, GCPhys, fRc)); 588 588 return fRc; 589 589 } -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r62644 r64115 2376 2376 * be mapped and thus not included in the above exercise. 2377 2377 */ 2378 for (PPGM MMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)2378 for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3) 2379 2379 if (!(pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)) 2380 2380 pCur->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pCur->RamRange); -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r63560 r64115 1498 1498 1499 1499 /** 1500 * Relocate a floating RAM range. 1501 * 1502 * @copydoc FNPGMRELOCATE 1500 * @callbackmethodimpl{FNPGMRELOCATE, Relocate a floating RAM range.} 1501 * @sa pgmR3PhysMMIO2ExRangeRelocate 1503 1502 */ 1504 1503 static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, … … 2445 2444 * @param iRegion The region. 2446 2445 */ 2447 DECLINLINE(PPGM MMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)2448 { 2449 /* 2450 * Search the list. 2451 */ 2452 for (PPGM MMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)2446 DECLINLINE(PPGMREGMMIORANGE) pgmR3PhysMMIOExFind(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion) 2447 { 2448 /* 2449 * Search the list. There shouldn't be many entries. 2450 */ 2451 for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3) 2453 2452 if ( pCur->pDevInsR3 == pDevIns 2454 2453 && pCur->iRegion == iRegion) … … 2459 2458 2460 2459 /** 2461 * Allocate and register an MMIO2 region. 2462 * 2463 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM 2464 * associated with a device. It is also non-shared memory with a permanent 2465 * ring-3 mapping and page backing (presently). 2466 * 2467 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for 2468 * the VM, in which case we'll drop the base memory pages. Presently we will 2469 * make no attempt to preserve anything that happens to be present in the base 2470 * memory that is replaced, this is of course incorrect but it's too much 2471 * effort. 2460 * @callbackmethodimpl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.} 2461 * @sa pgmR3PhysRamRangeRelocate 2462 */ 2463 static DECLCALLBACK(bool) pgmR3PhysMMIOExRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, 2464 PGMRELOCATECALL enmMode, void *pvUser) 2465 { 2466 PPGMREGMMIORANGE pMmio = (PPGMREGMMIORANGE)pvUser; 2467 Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING); 2468 Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange)); RT_NOREF_PV(GCPtrOld); 2469 2470 switch (enmMode) 2471 { 2472 case PGMRELOCATECALL_SUGGEST: 2473 return true; 2474 2475 case PGMRELOCATECALL_RELOCATE: 2476 { 2477 /* 2478 * Update myself, then relink all the ranges and flush the RC TLB. 2479 */ 2480 pgmLock(pVM); 2481 2482 pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange)); 2483 2484 pgmR3PhysRelinkRamRanges(pVM); 2485 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++) 2486 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR; 2487 2488 pgmUnlock(pVM); 2489 return true; 2490 } 2491 2492 default: 2493 AssertFailedReturn(false); 2494 } 2495 } 2496 2497 2498 /** 2499 * Worker for PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that allocates 2500 * and the PGMREGMMIORANGE structure and does basic initialization. 2501 * 2502 * Caller must set type specfic members and initialize the PGMPAGE structures. 2503 * 2504 * @returns VBox status code. 2505 * @param pVM The cross context VM structure. 2506 * @param pDevIns The device instance owning the region. 2507 * @param iRegion The region number. If the MMIO2 memory is a PCI 2508 * I/O region this number has to be the number of that 2509 * region. Otherwise it can be any number safe 2510 * UINT8_MAX. 2511 * @param cb The size of the region. Must be page aligned. 2512 * @param pszDesc The description. 2513 * @param ppNew Where to return the pointer to the registration. 2514 * 2515 * @thread EMT 2516 */ 2517 static int pgmR3PhysMMIOExCreate(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, const char *pszDesc, 2518 PPGMREGMMIORANGE *ppNew) 2519 { 2520 /* 2521 * We currently do a single RAM range for the whole thing. This will 2522 * probably have to change once someone needs really large MMIO regions, 2523 * as we will be running into SUPR3PageAllocEx limitations and such. 2524 */ 2525 const uint32_t cPages = cb >> X86_PAGE_SHIFT; 2526 const size_t cbRange = RT_OFFSETOF(PGMREGMMIORANGE, RamRange.aPages[cPages]); 2527 PPGMREGMMIORANGE pNew = NULL; 2528 if (cb >= _2G) 2529 { 2530 /* 2531 * Allocate memory for the registration structure. 2532 */ 2533 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT; 2534 size_t const cbChunk = (1 + cChunkPages + 1) << PAGE_SHIFT; 2535 AssertLogRelReturn(cbChunk == (uint32_t)cbChunk, VERR_OUT_OF_RANGE); 2536 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages); 2537 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY); 2538 RTR0PTR R0PtrChunk = NIL_RTR0PTR; 2539 void *pvChunk = NULL; 2540 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, 2541 #if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS) 2542 &R0PtrChunk, 2543 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 2544 HMIsEnabled(pVM) ? &R0PtrChunk : NULL, 2545 #else 2546 NULL, 2547 #endif 2548 paChunkPages); 2549 AssertLogRelMsgRCReturnStmt(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages), RTMemTmpFree(paChunkPages), rc); 2550 2551 #if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS) 2552 Assert(R0PtrChunk != NIL_RTR0PTR); 2553 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 2554 if (!HMIsEnabled(pVM)) 2555 R0PtrChunk = NIL_RTR0PTR; 2556 #else 2557 R0PtrChunk = (uintptr_t)pvChunk; 2558 #endif 2559 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT); 2560 2561 pNew = (PPGMREGMMIORANGE)pvChunk; 2562 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING; 2563 pNew->RamRange.pSelfR0 = R0PtrChunk + RT_OFFSETOF(PGMREGMMIORANGE, RamRange); 2564 2565 /* 2566 * If we might end up in raw-mode, make a HMA mapping of the range, 2567 * just like we do for memory above 4GB. 2568 */ 2569 if (HMIsEnabled(pVM)) 2570 pNew->RamRange.pSelfRC = NIL_RTRCPTR; 2571 else 2572 { 2573 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk; 2574 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE; 2575 rc = PGMR3MapPT(pVM, GCPtrChunkMap, (uint32_t)cbChunk, 0 /*fFlags*/, pgmR3PhysMMIOExRangeRelocate, pNew, pszDesc); 2576 if (RT_SUCCESS(rc)) 2577 { 2578 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap; 2579 2580 RTGCPTR GCPtrPage = GCPtrChunk; 2581 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE) 2582 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0); 2583 } 2584 if (RT_FAILURE(rc)) 2585 { 2586 SUPR3PageFreeEx(pvChunk, cChunkPages); 2587 return rc; 2588 } 2589 pNew->RamRange.pSelfRC = GCPtrChunk + RT_OFFSETOF(PGMREGMMIORANGE, RamRange); 2590 } 2591 } 2592 /* 2593 * Not so big, do a one time hyper allocation. 2594 */ 2595 else 2596 { 2597 int rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 2598 AssertLogRelMsgRC(rc, ("cbRange=%zu\n", cbRange)); 2599 2600 /* 2601 * Initialize allocation specific items. 2602 */ 2603 //pNew->RamRange.fFlags = 0; 2604 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange); 2605 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange); 2606 } 2607 2608 /* 2609 * Initialize the registration structure (caller does specific bits). 2610 */ 2611 pNew->pDevInsR3 = pDevIns; 2612 //pNew->pvR3 = NULL; 2613 //pNew->pNext = NULL; 2614 //pNew->fMmio2 = false; 2615 //pNew->fMapped = false; 2616 //pNew->fOverlapping = false; 2617 pNew->iRegion = iRegion; 2618 pNew->idSavedState = UINT8_MAX; 2619 pNew->idMmio2 = UINT8_MAX; 2620 //pNew->pPhysHandlerR3 = NULL; 2621 //pNew->paLSPages = NULL; 2622 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 2623 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 2624 pNew->RamRange.pszDesc = pszDesc; 2625 pNew->RamRange.cb = cb; 2626 pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX; 2627 //pNew->RamRange.pvR3 = NULL; 2628 //pNew->RamRange.paLSPages = NULL; 2629 2630 *ppNew = pNew; 2631 return VINF_SUCCESS; 2632 } 2633 2634 2635 /** 2636 * Common worker PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that links 2637 * a complete registration entry into the lists and lookup tables. 2638 * 2639 * @param pVM The cross context VM structure. 2640 * @param pNew The new MMIO / MMIO2 registration to link. 2641 */ 2642 static void pgmR3PhysMMIOExLink(PVM pVM, PPGMREGMMIORANGE pNew) 2643 { 2644 /* 2645 * Link it into the list. 2646 * Since there is no particular order, just push it. 2647 */ 2648 pgmLock(pVM); 2649 2650 pNew->pNextR3 = pVM->pgm.s.pRegMmioRangesR3; 2651 pVM->pgm.s.pRegMmioRangesR3 = pNew; 2652 2653 uint8_t idMmio2 = pNew->idMmio2; 2654 if (idMmio2 != UINT8_MAX) 2655 { 2656 Assert(pNew->fMmio2); 2657 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL); 2658 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR); 2659 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew; 2660 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew); 2661 } 2662 else 2663 Assert(!pNew->fMmio2); 2664 2665 pgmPhysInvalidatePageMapTLB(pVM); 2666 pgmUnlock(pVM); 2667 } 2668 2669 2670 /** 2671 * Allocate and pre-register an MMIO region. 2672 * 2673 * This is currently the way to deal with large MMIO regions. It may in the 2674 * future be extended to be the way we deal with all MMIO regions, but that 2675 * means we'll have to do something about the simple list based approach we take 2676 * to tracking the registrations. 2472 2677 * 2473 2678 * @returns VBox status code. … … 2487 2692 * the memory. 2488 2693 * @param pszDesc The description. 2694 * 2695 * @thread EMT 2696 * 2697 * @sa PGMR3PhysMMIORegister, PGMR3PhysMMIO2Register, 2698 * PGMR3PhysMMIOExMap, PGMR3PhysMMIOExUnmap, PGMR3PhysMMIOExDeregister. 2699 */ 2700 VMMR3DECL(int) PGMR3PhysMMIOExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType, 2701 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc) 2702 { 2703 /* 2704 * Validate input. 2705 */ 2706 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 2707 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 2708 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 2709 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 2710 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER); 2711 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS); 2712 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2713 AssertReturn(cb, VERR_INVALID_PARAMETER); 2714 2715 const uint32_t cPages = cb >> PAGE_SHIFT; 2716 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER); 2717 AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE); 2718 2719 /* 2720 * For the 2nd+ instance, mangle the description string so it's unique. 2721 */ 2722 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */ 2723 { 2724 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance); 2725 if (!pszDesc) 2726 return VERR_NO_MEMORY; 2727 } 2728 2729 /* 2730 * Register the MMIO callbacks. 2731 */ 2732 PPGMPHYSHANDLER pPhysHandler; 2733 int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pPhysHandler); 2734 if (RT_SUCCESS(rc)) 2735 { 2736 /* 2737 * Create the registered MMIO range record for it. 2738 */ 2739 PPGMREGMMIORANGE pNew; 2740 rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iRegion, cb, pszDesc, &pNew); 2741 if (RT_SUCCESS(rc)) 2742 { 2743 pNew->fMmio2 = false; 2744 pNew->pPhysHandlerR3 = pPhysHandler; 2745 2746 uint32_t iPage = cPages; 2747 while (iPage-- > 0) 2748 { 2749 PGM_PAGE_INIT_ZERO(&pNew->RamRange.aPages[iPage], pVM, PGMPAGETYPE_MMIO); 2750 } 2751 2752 /* 2753 * Update the page count stats, link the registration and we're done. 2754 */ 2755 pVM->pgm.s.cAllPages += cPages; 2756 pVM->pgm.s.cPureMmioPages += cPages; 2757 2758 pgmR3PhysMMIOExLink(pVM, pNew); 2759 return VINF_SUCCESS; 2760 } 2761 2762 pgmHandlerPhysicalExDestroy(pVM, pPhysHandler); 2763 } 2764 return rc; 2765 } 2766 2767 2768 /** 2769 * Allocate and register an MMIO2 region. 2770 * 2771 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM 2772 * associated with a device. It is also non-shared memory with a permanent 2773 * ring-3 mapping and page backing (presently). 2774 * 2775 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for 2776 * the VM, in which case we'll drop the base memory pages. Presently we will 2777 * make no attempt to preserve anything that happens to be present in the base 2778 * memory that is replaced, this is of course incorrect but it's too much 2779 * effort. 2780 * 2781 * @returns VBox status code. 2782 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the 2783 * memory. 2784 * @retval VERR_ALREADY_EXISTS if the region already exists. 2785 * 2786 * @param pVM The cross context VM structure. 2787 * @param pDevIns The device instance owning the region. 2788 * @param iRegion The region number. If the MMIO2 memory is a PCI 2789 * I/O region this number has to be the number of that 2790 * region. Otherwise it can be any number safe 2791 * UINT8_MAX. 2792 * @param cb The size of the region. Must be page aligned. 2793 * @param fFlags Reserved for future use, must be zero. 2794 * @param ppv Where to store the pointer to the ring-3 mapping of 2795 * the memory. 2796 * @param pszDesc The description. 2797 * @thread EMT 2489 2798 */ 2490 2799 VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, … … 2500 2809 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 2501 2810 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER); 2502 AssertReturn(pgmR3PhysMMIO 2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);2811 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS); 2503 2812 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2504 2813 AssertReturn(cb, VERR_INVALID_PARAMETER); … … 2507 2816 const uint32_t cPages = cb >> PAGE_SHIFT; 2508 2817 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER); 2509 AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_ NO_MEMORY);2818 AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE); 2510 2819 2511 2820 /* … … 2550 2859 2551 2860 /* 2552 * Create the MMIO2range record for it.2861 * Create the registered MMIO range record for it. 2553 2862 */ 2554 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]); 2555 PPGMMMIO2RANGE pNew; 2556 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 2557 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange)); 2863 PPGMREGMMIORANGE pNew; 2864 rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iRegion, cb, pszDesc, &pNew); 2558 2865 if (RT_SUCCESS(rc)) 2559 2866 { 2560 pNew->pDevInsR3 = pDevIns; 2561 pNew->pvR3 = pvPages; 2562 //pNew->pNext = NULL; 2563 //pNew->fMapped = false; 2564 //pNew->fOverlapping = false; 2565 pNew->iRegion = iRegion; 2566 pNew->idSavedState = UINT8_MAX; 2567 pNew->idMmio2 = idMmio2; 2568 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange); 2569 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange); 2570 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 2571 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 2572 pNew->RamRange.pszDesc = pszDesc; 2573 pNew->RamRange.cb = cb; 2574 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2; 2575 pNew->RamRange.pvR3 = pvPages; 2576 //pNew->RamRange.paLSPages = NULL; 2867 pNew->pvR3 = pvPages; 2868 pNew->idMmio2 = idMmio2; 2869 pNew->fMmio2 = true; 2577 2870 2578 2871 uint32_t iPage = cPages; … … 2585 2878 } 2586 2879 2587 /* update page count stats */ 2588 pVM->pgm.s.cAllPages += cPages; 2880 RTMemTmpFree(paPages); 2881 2882 /* 2883 * Update the page count stats, link the registration and we're done. 2884 */ 2885 pVM->pgm.s.cAllPages += cPages; 2589 2886 pVM->pgm.s.cPrivatePages += cPages; 2590 2887 2591 /* 2592 * Link it into the list. 2593 * Since there is no particular order, just push it. 2594 */ 2595 /** @todo we can save us the linked list now, just search the lookup table... */ 2596 pgmLock(pVM); 2597 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL); 2598 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR); 2599 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3; 2600 pVM->pgm.s.pMmio2RangesR3 = pNew; 2601 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew; 2602 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew); 2603 pgmUnlock(pVM); 2888 pgmR3PhysMMIOExLink(pVM, pNew); 2604 2889 2605 2890 *ppv = pvPages; 2606 RTMemTmpFree(paPages);2607 pgmPhysInvalidatePageMapTLB(pVM);2608 2891 return VINF_SUCCESS; 2609 2892 } … … 2622 2905 2623 2906 /** 2624 * Deregisters and frees an MMIO2 region .2907 * Deregisters and frees an MMIO2 region or a pre-registered MMIO region 2625 2908 * 2626 2909 * Any physical (and virtual) access handlers registered for the region must … … 2632 2915 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match. 2633 2916 */ 2634 VMMR3DECL(int) PGMR3PhysMMIO 2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)2917 VMMR3DECL(int) PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion) 2635 2918 { 2636 2919 /* … … 2644 2927 int rc = VINF_SUCCESS; 2645 2928 unsigned cFound = 0; 2646 PPGM MMIO2RANGE pPrev = NULL;2647 PPGM MMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;2929 PPGMREGMMIORANGE pPrev = NULL; 2930 PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; 2648 2931 while (pCur) 2649 2932 { … … 2659 2942 if (pCur->fMapped) 2660 2943 { 2661 int rc2 = PGMR3PhysMMIO 2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);2944 int rc2 = PGMR3PhysMMIOExUnmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys); 2662 2945 AssertRC(rc2); 2663 2946 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) … … 2666 2949 2667 2950 /* 2951 * Must tell IOM about MMIO. 2952 */ 2953 if (!pCur->fMmio2) 2954 IOMR3MmioExNotifyDeregistered(pVM, pCur->pPhysHandlerR3->pvUserR3); 2955 2956 /* 2668 2957 * Unlink it 2669 2958 */ 2670 PPGM MMIO2RANGE pNext = pCur->pNextR3;2959 PPGMREGMMIORANGE pNext = pCur->pNextR3; 2671 2960 if (pPrev) 2672 2961 pPrev->pNextR3 = pNext; 2673 2962 else 2674 pVM->pgm.s.p Mmio2RangesR3 = pNext;2963 pVM->pgm.s.pRegMmioRangesR3 = pNext; 2675 2964 pCur->pNextR3 = NULL; 2676 2965 2677 2966 uint8_t idMmio2 = pCur->idMmio2; 2678 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur); 2679 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL; 2680 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR; 2967 if (idMmio2 != UINT8_MAX) 2968 { 2969 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur); 2970 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL; 2971 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR; 2972 } 2681 2973 2682 2974 /* 2683 2975 * Free the memory. 2684 2976 */ 2685 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);2686 AssertRC(rc2);2687 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))2688 rc = rc2;2689 2690 2977 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT; 2691 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc); 2692 AssertRC(rc2); 2693 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 2694 rc = rc2; 2978 if (pCur->fMmio2) 2979 { 2980 int rc2 = SUPR3PageFreeEx(pCur->pvR3, cPages); 2981 AssertRC(rc2); 2982 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 2983 rc = rc2; 2984 2985 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc); 2986 AssertRC(rc2); 2987 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 2988 rc = rc2; 2989 } 2695 2990 2696 2991 /* we're leaking hyper memory here if done at runtime. */ … … 2706 3001 , ("%s\n", VMR3GetStateName(enmState))); 2707 3002 #endif 2708 /*rc = MMHyperFree(pVM, pCur); 2709 AssertRCReturn(rc, rc); - not safe, see the alloc call. */ 3003 3004 const bool fMmio2 = pCur->fMmio2; 3005 if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) 3006 { 3007 const size_t cbRange = RT_OFFSETOF(PGMREGMMIORANGE, RamRange.aPages[cPages]); 3008 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT; 3009 SUPR3PageFreeEx(pCur, cChunkPages); 3010 } 3011 /*else 3012 { 3013 rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call. 3014 AssertRCReturn(rc, rc); 3015 } */ 2710 3016 2711 3017 2712 3018 /* update page count stats */ 2713 pVM->pgm.s.cAllPages -= cPages; 2714 pVM->pgm.s.cPrivatePages -= cPages; 3019 pVM->pgm.s.cAllPages -= cPages; 3020 if (fMmio2) 3021 pVM->pgm.s.cPrivatePages -= cPages; 3022 else 3023 pVM->pgm.s.cPureMmioPages -= cPages; 2715 3024 2716 3025 /* next */ … … 2730 3039 2731 3040 /** 2732 * Maps a MMIO2 region .3041 * Maps a MMIO2 region or a pre-registered MMIO region. 2733 3042 * 2734 3043 * This is done when a guest / the bios / state loading changes the … … 2743 3052 * @param GCPhys The guest-physical address to be remapped. 2744 3053 */ 2745 VMMR3DECL(int) PGMR3PhysMMIO 2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)3054 VMMR3DECL(int) PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 2746 3055 { 2747 3056 /* … … 2755 3064 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2756 3065 2757 PPGM MMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);3066 PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion); 2758 3067 AssertReturn(pCur, VERR_NOT_FOUND); 2759 3068 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER); … … 2765 3074 2766 3075 /* 2767 * Find our location in the ram range list, checking for 2768 * restriction we don't bother implementing yet (partially overlapping). 2769 */ 3076 * Find our location in the ram range list, checking for restriction 3077 * we don't bother implementing yet (partially overlapping). 3078 */ 3079 pgmLock(pVM); 3080 2770 3081 bool fRamExists = false; 2771 3082 PPGMRAMRANGE pRamPrev = NULL; … … 2776 3087 && GCPhysLast >= pRam->GCPhys) 2777 3088 { 2778 /* completely within? */ 2779 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys 2780 && GCPhysLast <= pRam->GCPhysLast, 2781 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n", 2782 GCPhys, GCPhysLast, pCur->RamRange.pszDesc, 2783 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 2784 VERR_PGM_RAM_CONFLICT); 3089 /* Completely within? */ 3090 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys 3091 && GCPhysLast <= pRam->GCPhysLast, 3092 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n", 3093 GCPhys, GCPhysLast, pCur->RamRange.pszDesc, 3094 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 3095 pgmUnlock(pVM), 3096 VERR_PGM_RAM_CONFLICT); 3097 3098 /* Check that all the pages are RAM pages. */ 3099 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 3100 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 3101 while (cPagesLeft-- > 0) 3102 { 3103 AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 3104 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n", 3105 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc), 3106 pgmUnlock(pVM), 3107 VERR_PGM_RAM_CONFLICT); 3108 pPage++; 3109 } 3110 2785 3111 fRamExists = true; 2786 3112 break; … … 2791 3117 pRam = pRam->pNextR3; 2792 3118 } 2793 if (fRamExists) 2794 { 2795 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 2796 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 2797 while (cPagesLeft-- > 0) 2798 { 2799 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 2800 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n", 2801 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc), 2802 VERR_PGM_RAM_CONFLICT); 2803 pPage++; 2804 } 2805 } 2806 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n", 2807 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc)); 3119 Log(("PGMR3PhysMMIOExMap: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc)); 3120 2808 3121 2809 3122 /* 2810 3123 * Make the changes. 2811 3124 */ 2812 pgmLock(pVM);2813 2814 3125 pCur->RamRange.GCPhys = GCPhys; 2815 3126 pCur->RamRange.GCPhysLast = GCPhysLast; 2816 pCur->fMapped = true;2817 pCur->fOverlapping = fRamExists;2818 2819 3127 if (fRamExists) 2820 3128 { 2821 /** @todo use pgmR3PhysFreePageRange here. */ 2822 uint32_t cPendingPages = 0; 2823 PGMMFREEPAGESREQ pReq; 2824 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE); 2825 AssertLogRelRCReturn(rc, rc); 2826 2827 /* replace the pages, freeing all present RAM pages. */ 2828 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0]; 2829 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 2830 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 2831 while (cPagesLeft-- > 0) 2832 { 2833 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys); 2834 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */ 2835 2836 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc); 2837 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc); 2838 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage); 2839 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys); 2840 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2); 2841 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED); 2842 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE); 2843 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0); 2844 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0); 2845 2846 pVM->pgm.s.cZeroPages--; 2847 GCPhys += PAGE_SIZE; 2848 pPageSrc++; 2849 pPageDst++; 3129 /* 3130 * Make all the pages in the range MMIO/ZERO pages, freeing any 3131 * RAM pages currently mapped here. This might not be 100% correct 3132 * for PCI memory, but we're doing the same thing for MMIO2 pages. 3133 * 3134 * We replace this MMIO/ZERO pages with real pages in the MMIO2 case. 3135 */ 3136 int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO); 3137 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc); 3138 if (pCur->fMmio2) 3139 { 3140 /* replace the pages, freeing all present RAM pages. */ 3141 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0]; 3142 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 3143 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 3144 while (cPagesLeft-- > 0) 3145 { 3146 Assert(PGM_PAGE_IS_MMIO(pPageDst)); 3147 3148 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc); 3149 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc); 3150 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage); 3151 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys); 3152 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2); 3153 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED); 3154 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE); 3155 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0); 3156 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0); 3157 3158 pVM->pgm.s.cZeroPages--; 3159 GCPhys += PAGE_SIZE; 3160 pPageSrc++; 3161 pPageDst++; 3162 } 2850 3163 } 2851 3164 2852 3165 /* Flush physical page map TLB. */ 2853 3166 pgmPhysInvalidatePageMapTLB(pVM); 2854 2855 if (cPendingPages)2856 {2857 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);2858 AssertLogRelRCReturn(rc, rc);2859 }2860 GMMR3FreePagesCleanup(pReq);2861 3167 2862 3168 /* Force a PGM pool flush as guest ram references have been changed. */ … … 2866 3172 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 2867 3173 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2868 2869 pgmUnlock(pVM);2870 3174 } 2871 3175 else 2872 3176 { 2873 #ifdef VBOX_WITH_REM 2874 RTGCPHYS cb = pCur->RamRange.cb; 2875 #endif 2876 3177 /* 3178 * No RAM range, insert the one prepared during registration. 3179 */ 2877 3180 /* Clear the tracking data of pages we're going to reactivate. */ 2878 3181 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0]; … … 2887 3190 /* link in the ram range */ 2888 3191 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev); 2889 pgmUnlock(pVM); 3192 } 3193 3194 /* 3195 * Register the access handler if plain MMIO. 3196 */ 3197 if (!pCur->fMmio2) 3198 { 3199 int rc = pgmHandlerPhysicalExRegister(pVM, pCur->pPhysHandlerR3, GCPhys, GCPhysLast); 3200 if (RT_SUCCESS(rc)) 3201 { 3202 rc = IOMR3MmioExNotifyMapped(pVM, pCur->pPhysHandlerR3->pvUserR3, GCPhys); 3203 if (RT_FAILURE(rc)) 3204 pgmHandlerPhysicalExDeregister(pVM, pCur->pPhysHandlerR3); 3205 } 3206 if (RT_FAILURE(rc)) 3207 { 3208 /* Almost impossible, but try clean up properly and get out of here. */ 3209 if (!fRamExists) 3210 pgmR3PhysUnlinkRamRange2(pVM, &pCur->RamRange, pRamPrev); 3211 else 3212 { 3213 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 3214 if (pCur->fMmio2) 3215 pVM->pgm.s.cZeroPages += cPagesLeft; 3216 3217 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 3218 while (cPagesLeft-- > 0) 3219 { 3220 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM); 3221 pPageDst++; 3222 } 3223 } 3224 pCur->RamRange.GCPhys = NIL_RTGCPHYS; 3225 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS; 3226 3227 pgmUnlock(pVM); 3228 return rc; 3229 } 3230 } 3231 3232 pCur->fMapped = true; 3233 pCur->fOverlapping = fRamExists; 3234 pgmPhysInvalidatePageMapTLB(pVM); 3235 pgmUnlock(pVM); 2890 3236 2891 3237 #ifdef VBOX_WITH_REM 2892 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2); 3238 /* 3239 * Inform REM without holding the PGM lock. 3240 */ 3241 if (!fRamExists && pCur->fMmio2) 3242 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2); 2893 3243 #endif 2894 }2895 2896 pgmPhysInvalidatePageMapTLB(pVM);2897 3244 return VINF_SUCCESS; 2898 3245 } … … 2900 3247 2901 3248 /** 2902 * Unmaps a MMIO2 region.3249 * Unmaps a MMIO2 or a pre-registered MMIO region. 2903 3250 * 2904 3251 * This is done when a guest / the bios / state loading changes the … … 2906 3253 * as during registration, of course. 2907 3254 */ 2908 VMMR3DECL(int) PGMR3PhysMMIO 2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)3255 VMMR3DECL(int) PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 2909 3256 { 2910 3257 /* … … 2918 3265 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2919 3266 2920 PPGM MMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);3267 PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion); 2921 3268 AssertReturn(pCur, VERR_NOT_FOUND); 2922 3269 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER); … … 2924 3271 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS); 2925 3272 2926 Log(("PGMR3PhysMMIO 2Unmap: %RGp-%RGp %s\n",3273 Log(("PGMR3PhysMMIOExUnmap: %RGp-%RGp %s\n", 2927 3274 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc)); 2928 3275 3276 int rc = pgmLock(pVM); 3277 AssertRCReturn(rc, rc); 3278 AssertReturnStmt(pCur->fMapped, pgmUnlock(pVM),VERR_WRONG_ORDER); 3279 3280 /* 3281 * If plain MMIO, we must deregister the handler first. 3282 */ 3283 if (!pCur->fMmio2) 3284 { 3285 rc = pgmHandlerPhysicalExDeregister(pVM, pCur->pPhysHandlerR3); 3286 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc); 3287 3288 IOMR3MmioExNotifyUnmapped(pVM, pCur->pPhysHandlerR3->pvUserR3, GCPhys); 3289 } 3290 2929 3291 /* 2930 3292 * Unmap it. 2931 3293 */ 2932 pgmLock(pVM);2933 2934 3294 #ifdef VBOX_WITH_REM 2935 RTGCPHYS GCPhysRangeREM; 2936 RTGCPHYS cbRangeREM; 2937 bool fInformREM; 3295 RTGCPHYS GCPhysRangeREM; 3296 bool fInformREM; 2938 3297 #endif 2939 3298 if (pCur->fOverlapping) … … 2944 3303 pRam = pRam->pNextR3; 2945 3304 3305 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 3306 if (pCur->fMmio2) 3307 pVM->pgm.s.cZeroPages += cPagesLeft; 3308 2946 3309 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 2947 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;2948 3310 while (cPagesLeft-- > 0) 2949 3311 { 2950 3312 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM); 2951 pVM->pgm.s.cZeroPages++;2952 3313 pPageDst++; 2953 3314 } … … 2957 3318 #ifdef VBOX_WITH_REM 2958 3319 GCPhysRangeREM = NIL_RTGCPHYS; /* shuts up gcc */ 2959 cbRangeREM = RTGCPHYS_MAX; /* ditto */2960 3320 fInformREM = false; 2961 3321 #endif … … 2965 3325 #ifdef VBOX_WITH_REM 2966 3326 GCPhysRangeREM = pCur->RamRange.GCPhys; 2967 cbRangeREM = pCur->RamRange.cb; 2968 fInformREM = true; 3327 fInformREM = pCur->fMmio2; 2969 3328 #endif 2970 3329 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange); … … 2986 3345 pgmPhysInvalidatePageMapTLB(pVM); 2987 3346 pgmPhysInvalidRamRangeTlbs(pVM); 3347 2988 3348 pgmUnlock(pVM); 2989 3349 2990 3350 #ifdef VBOX_WITH_REM 3351 /* 3352 * Inform REM without holding the PGM lock. 3353 */ 2991 3354 if (fInformREM) 2992 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);3355 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, pCur->RamRange.cb); 2993 3356 #endif 2994 3357 … … 2998 3361 2999 3362 /** 3000 * Checks if the given address is an MMIO2 base address or not. 3363 * Checks if the given address is an MMIO2 or pre-registered MMIO base address 3364 * or not. 3001 3365 * 3002 3366 * @returns true/false accordingly. … … 3005 3369 * @param GCPhys The address to check. 3006 3370 */ 3007 VMMR3DECL(bool) PGMR3PhysMMIO 2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)3371 VMMR3DECL(bool) PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys) 3008 3372 { 3009 3373 /* … … 3020 3384 */ 3021 3385 pgmLock(pVM); 3022 for (PPGM MMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)3386 for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3) 3023 3387 if (pCur->RamRange.GCPhys == GCPhys) 3024 3388 { … … 3045 3409 * @param pHCPhys Where to store the result. 3046 3410 */ 3047 VMMR3 DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)3411 VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys) 3048 3412 { 3049 3413 /* … … 3055 3419 3056 3420 pgmLock(pVM); 3057 PPGM MMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);3421 PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion); 3058 3422 AssertReturn(pCur, VERR_NOT_FOUND); 3423 AssertReturn(pCur->fMmio2, VERR_WRONG_TYPE); 3059 3424 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER); 3060 3425 … … 3082 3447 * @param pR0Ptr Where to store the R0 address. 3083 3448 */ 3084 VMMR3 DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,3085 const char *pszDesc, PRTR0PTR pR0Ptr)3449 VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, 3450 const char *pszDesc, PRTR0PTR pR0Ptr) 3086 3451 { 3087 3452 /* … … 3092 3457 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 3093 3458 3094 PPGM MMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);3459 PPGMREGMMIORANGE pCur = pgmR3PhysMMIOExFind(pVM, pDevIns, iRegion); 3095 3460 AssertReturn(pCur, VERR_NOT_FOUND); 3461 AssertReturn(pCur->fMmio2, VERR_WRONG_TYPE); 3096 3462 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER); 3097 3463 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER); -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r62478 r64115 642 642 */ 643 643 pgmLock(pVM); 644 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 645 { 646 uint32_t const cPages = pMmio2->RamRange.cb >> PAGE_SHIFT; 647 pgmUnlock(pVM); 648 649 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages); 650 if (!paLSPages) 651 return VERR_NO_MEMORY; 652 for (uint32_t iPage = 0; iPage < cPages; iPage++) 653 { 654 /* Initialize it as a dirty zero page. */ 655 paLSPages[iPage].fDirty = true; 656 paLSPages[iPage].cUnchangedScans = 0; 657 paLSPages[iPage].fZero = true; 658 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE; 659 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE; 660 } 661 662 pgmLock(pVM); 663 pMmio2->paLSPages = paLSPages; 664 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages; 644 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 645 { 646 if (pRegMmio->fMmio2) 647 { 648 uint32_t const cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT; 649 pgmUnlock(pVM); 650 651 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages); 652 if (!paLSPages) 653 return VERR_NO_MEMORY; 654 for (uint32_t iPage = 0; iPage < cPages; iPage++) 655 { 656 /* Initialize it as a dirty zero page. */ 657 paLSPages[iPage].fDirty = true; 658 paLSPages[iPage].cUnchangedScans = 0; 659 paLSPages[iPage].fZero = true; 660 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE; 661 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE; 662 } 663 664 pgmLock(pVM); 665 pRegMmio->paLSPages = paLSPages; 666 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages; 667 } 665 668 } 666 669 pgmUnlock(pVM); … … 680 683 pgmLock(pVM); 681 684 uint8_t id = 1; 682 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++) 683 { 684 pMmio2->idSavedState = id; 685 SSMR3PutU8(pSSM, id); 686 SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pReg->szName); 687 SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance); 688 SSMR3PutU8(pSSM, pMmio2->iRegion); 689 SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc); 690 int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb); 691 if (RT_FAILURE(rc)) 692 break; 685 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 686 { 687 if (pRegMmio->fMmio2) 688 { 689 pRegMmio->idSavedState = id; 690 SSMR3PutU8(pSSM, id); 691 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName); 692 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance); 693 SSMR3PutU8(pSSM, pRegMmio->iRegion); 694 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc); 695 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb); 696 if (RT_FAILURE(rc)) 697 break; 698 id++; 699 } 693 700 } 694 701 pgmUnlock(pVM); … … 709 716 PGM_LOCK_ASSERT_OWNER(pVM); 710 717 711 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 712 pMmio2->idSavedState = UINT8_MAX; 718 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 719 if (pRegMmio->fMmio2) 720 pRegMmio->idSavedState = UINT8_MAX; 713 721 714 722 for (;;) … … 723 731 if (id == UINT8_MAX) 724 732 { 725 for (PPGM MMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)726 AssertLogRelMsg(p Mmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));733 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 734 AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX || !pRegMmio->fMmio2, ("%s\n", pRegMmio->RamRange.pszDesc)); 727 735 return VINF_SUCCESS; /* the end */ 728 736 } … … 749 757 * Locate a matching MMIO2 range. 750 758 */ 751 PPGMMMIO2RANGE pMmio2; 752 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 753 { 754 if ( pMmio2->idSavedState == UINT8_MAX 755 && pMmio2->iRegion == iRegion 756 && pMmio2->pDevInsR3->iInstance == uInstance 757 && !strcmp(pMmio2->pDevInsR3->pReg->szName, szDevName)) 758 { 759 pMmio2->idSavedState = id; 759 PPGMREGMMIORANGE pRegMmio; 760 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 761 { 762 if ( pRegMmio->idSavedState == UINT8_MAX 763 && pRegMmio->iRegion == iRegion 764 && pRegMmio->pDevInsR3->iInstance == uInstance 765 && pRegMmio->fMmio2 766 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName)) 767 { 768 pRegMmio->idSavedState = id; 760 769 break; 761 770 } 762 771 } 763 if (!p Mmio2)772 if (!pRegMmio) 764 773 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"), 765 774 szDesc, szDevName, uInstance, iRegion); … … 769 778 * the same. 770 779 */ 771 if (cb != p Mmio2->RamRange.cb)780 if (cb != pRegMmio->RamRange.cb) 772 781 { 773 782 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n", 774 p Mmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb));775 if (cb > p Mmio2->RamRange.cb) /* bad idea? */783 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb)); 784 if (cb > pRegMmio->RamRange.cb) /* bad idea? */ 776 785 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"), 777 p Mmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb);786 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb); 778 787 } 779 788 } /* forever */ … … 873 882 874 883 pgmLock(pVM); /* paranoia */ 875 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 876 { 877 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages; 878 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT; 879 pgmUnlock(pVM); 880 881 for (uint32_t iPage = 0; iPage < cPages; iPage++) 882 { 883 uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE; 884 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]); 885 } 886 887 pgmLock(pVM); 888 } 884 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 885 if (pRegMmio->fMmio2) 886 { 887 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 888 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT; 889 pgmUnlock(pVM); 890 891 for (uint32_t iPage = 0; iPage < cPages; iPage++) 892 { 893 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * PAGE_SIZE; 894 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]); 895 } 896 897 pgmLock(pVM); 898 } 889 899 pgmUnlock(pVM); 890 900 … … 913 923 */ 914 924 pgmLock(pVM); 915 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; 916 pMmio2 && RT_SUCCESS(rc); 917 pMmio2 = pMmio2->pNextR3) 918 { 919 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages; 920 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3; 921 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT; 922 uint32_t iPageLast = cPages; 923 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE) 924 { 925 uint8_t u8Type; 926 if (!fLiveSave) 927 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 928 else 925 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; 926 pRegMmio && RT_SUCCESS(rc); 927 pRegMmio = pRegMmio->pNextR3) 928 if (pRegMmio->fMmio2) 929 { 930 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 931 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3; 932 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT; 933 uint32_t iPageLast = cPages; 934 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE) 929 935 { 930 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */ 931 if ( !paLSPages[iPage].fDirty 932 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage])) 936 uint8_t u8Type; 937 if (!fLiveSave) 938 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 939 else 933 940 { 934 if (paLSPages[iPage].fZero) 935 continue; 936 937 uint8_t abSha1Hash[RTSHA1_HASH_SIZE]; 938 RTSha1(pbPage, PAGE_SIZE, abSha1Hash); 939 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash))) 940 continue; 941 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */ 942 if ( !paLSPages[iPage].fDirty 943 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage])) 944 { 945 if (paLSPages[iPage].fZero) 946 continue; 947 948 uint8_t abSha1Hash[RTSHA1_HASH_SIZE]; 949 RTSha1(pbPage, PAGE_SIZE, abSha1Hash); 950 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash))) 951 continue; 952 } 953 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 954 pVM->pgm.s.LiveSave.cSavedPages++; 941 955 } 942 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 943 pVM->pgm.s.LiveSave.cSavedPages++; 956 957 if (iPage != 0 && iPage == iPageLast + 1) 958 rc = SSMR3PutU8(pSSM, u8Type); 959 else 960 { 961 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 962 SSMR3PutU8(pSSM, pRegMmio->idSavedState); 963 rc = SSMR3PutU32(pSSM, iPage); 964 } 965 if (u8Type == PGM_STATE_REC_MMIO2_RAW) 966 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE); 967 if (RT_FAILURE(rc)) 968 break; 969 iPageLast = iPage; 944 970 } 945 946 if (iPage != 0 && iPage == iPageLast + 1) 947 rc = SSMR3PutU8(pSSM, u8Type); 948 else 949 { 950 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 951 SSMR3PutU8(pSSM, pMmio2->idSavedState); 952 rc = SSMR3PutU32(pSSM, iPage); 953 } 954 if (u8Type == PGM_STATE_REC_MMIO2_RAW) 955 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE); 956 if (RT_FAILURE(rc)) 957 break; 958 iPageLast = iPage; 959 } 960 } 971 } 961 972 pgmUnlock(pVM); 962 973 } … … 970 981 { 971 982 pgmLock(pVM); 972 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; 973 pMmio2 && RT_SUCCESS(rc); 974 pMmio2 = pMmio2->pNextR3) 975 { 976 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages; 977 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3; 978 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT; 979 uint32_t iPageLast = cPages; 980 pgmUnlock(pVM); 981 982 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE) 983 { 984 /* Skip clean pages and pages which hasn't quiesced. */ 985 if (!paLSPages[iPage].fDirty) 986 continue; 987 if (paLSPages[iPage].cUnchangedScans < 3) 988 continue; 989 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage])) 990 continue; 991 992 /* Save it. */ 993 bool const fZero = paLSPages[iPage].fZero; 994 uint8_t abPage[PAGE_SIZE]; 995 if (!fZero) 983 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; 984 pRegMmio && RT_SUCCESS(rc); 985 pRegMmio = pRegMmio->pNextR3) 986 if (pRegMmio->fMmio2) 987 { 988 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 989 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3; 990 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT; 991 uint32_t iPageLast = cPages; 992 pgmUnlock(pVM); 993 994 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE) 996 995 { 997 memcpy(abPage, pbPage, PAGE_SIZE); 998 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved); 996 /* Skip clean pages and pages which hasn't quiesced. */ 997 if (!paLSPages[iPage].fDirty) 998 continue; 999 if (paLSPages[iPage].cUnchangedScans < 3) 1000 continue; 1001 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage])) 1002 continue; 1003 1004 /* Save it. */ 1005 bool const fZero = paLSPages[iPage].fZero; 1006 uint8_t abPage[PAGE_SIZE]; 1007 if (!fZero) 1008 { 1009 memcpy(abPage, pbPage, PAGE_SIZE); 1010 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved); 1011 } 1012 1013 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 1014 if (iPage != 0 && iPage == iPageLast + 1) 1015 rc = SSMR3PutU8(pSSM, u8Type); 1016 else 1017 { 1018 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 1019 SSMR3PutU8(pSSM, pRegMmio->idSavedState); 1020 rc = SSMR3PutU32(pSSM, iPage); 1021 } 1022 if (u8Type == PGM_STATE_REC_MMIO2_RAW) 1023 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE); 1024 if (RT_FAILURE(rc)) 1025 break; 1026 1027 /* Housekeeping. */ 1028 paLSPages[iPage].fDirty = false; 1029 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--; 1030 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++; 1031 if (u8Type == PGM_STATE_REC_MMIO2_ZERO) 1032 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++; 1033 pVM->pgm.s.LiveSave.cSavedPages++; 1034 iPageLast = iPage; 999 1035 } 1000 1036 1001 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW; 1002 if (iPage != 0 && iPage == iPageLast + 1) 1003 rc = SSMR3PutU8(pSSM, u8Type); 1004 else 1005 { 1006 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 1007 SSMR3PutU8(pSSM, pMmio2->idSavedState); 1008 rc = SSMR3PutU32(pSSM, iPage); 1009 } 1010 if (u8Type == PGM_STATE_REC_MMIO2_RAW) 1011 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE); 1012 if (RT_FAILURE(rc)) 1013 break; 1014 1015 /* Housekeeping. */ 1016 paLSPages[iPage].fDirty = false; 1017 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--; 1018 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++; 1019 if (u8Type == PGM_STATE_REC_MMIO2_ZERO) 1020 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++; 1021 pVM->pgm.s.LiveSave.cSavedPages++; 1022 iPageLast = iPage; 1023 } 1024 1025 pgmLock(pVM); 1026 } 1037 pgmLock(pVM); 1038 } 1027 1039 pgmUnlock(pVM); 1028 1040 } … … 1044 1056 */ 1045 1057 pgmLock(pVM); 1046 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 1047 { 1048 void *pvMmio2ToFree = pMmio2->paLSPages; 1049 if (pvMmio2ToFree) 1050 { 1051 pMmio2->paLSPages = NULL; 1052 pgmUnlock(pVM); 1053 MMR3HeapFree(pvMmio2ToFree); 1054 pgmLock(pVM); 1055 } 1056 } 1058 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 1059 if (pRegMmio->fMmio2) 1060 { 1061 void *pvMmio2ToFree = pRegMmio->paLSPages; 1062 if (pvMmio2ToFree) 1063 { 1064 pRegMmio->paLSPages = NULL; 1065 pgmUnlock(pVM); 1066 MMR3HeapFree(pvMmio2ToFree); 1067 pgmLock(pVM); 1068 } 1069 } 1057 1070 pgmUnlock(pVM); 1058 1071 } … … 2613 2626 * Process page records until we hit the terminator. 2614 2627 */ 2615 RTGCPHYS GCPhys = NIL_RTGCPHYS;2616 PPGMRAMRANGE pRamHint = NULL;2617 uint8_t id = UINT8_MAX;2618 uint32_t iPage = UINT32_MAX - 10;2619 PPGMROMRANGE pRom = NULL;2620 PPGM MMIO2RANGE pMmio2= NULL;2628 RTGCPHYS GCPhys = NIL_RTGCPHYS; 2629 PPGMRAMRANGE pRamHint = NULL; 2630 uint8_t id = UINT8_MAX; 2631 uint32_t iPage = UINT32_MAX - 10; 2632 PPGMROMRANGE pRom = NULL; 2633 PPGMREGMMIORANGE pRegMmio = NULL; 2621 2634 2622 2635 /* … … 2792 2805 return rc; 2793 2806 } 2794 if ( !pMmio22795 || pMmio2->idSavedState != id)2807 if ( !pRegMmio 2808 || pRegMmio->idSavedState != id) 2796 2809 { 2797 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3) 2798 if (pMmio2->idSavedState == id) 2810 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 2811 if ( pRegMmio->idSavedState == id 2812 && pRegMmio->fMmio2) 2799 2813 break; 2800 AssertLogRelMsgReturn(p Mmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);2814 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND); 2801 2815 } 2802 AssertLogRelMsgReturn(iPage < (p Mmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);2803 void *pvDstPage = (uint8_t *)p Mmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);2816 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND); 2817 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT); 2804 2818 2805 2819 /* -
trunk/src/VBox/VMM/include/PGMInternal.h
r63640 r64115 1539 1539 /** Ad hoc RAM range for an MMIO mapping. */ 1540 1540 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22) 1541 /** Ad hoc RAM range for an MMIO2 mapping. */1542 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO 2RT_BIT(23)1541 /** Ad hoc RAM range for an MMIO2 or pre-registered MMIO mapping. */ 1542 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX RT_BIT(23) 1543 1543 /** @} */ 1544 1544 … … 1548 1548 */ 1549 1549 #define PGM_RAM_RANGE_IS_AD_HOC(pRam) \ 1550 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO 2) ) )1550 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX) ) ) 1551 1551 1552 1552 /** The number of entries in the RAM range TLBs (there is one for each … … 1688 1688 1689 1689 /** 1690 * A registered MMIO2 (= Device RAM) range.1691 * 1692 * There are a few reason why we need to keep track of these 1693 * registrations. One of them is the deregistration & cleanup stuff,1694 * while another is that the PGMRAMRANGE associated with such a region may1695 * have to be removed from the ramrange list.1696 * 1697 * Overlapping with a RAM range has to be 100% or none at all. The pages 1698 * in the existing RAM range must not be ROM nor MMIO. A guru meditation1699 * will be raised if a partial overlap or an overlap of ROM pages is1700 * encountered. On an overlap we will free all the existing RAM pages and1701 * p ut in the ram range pages instead.1702 */ 1703 typedef struct PGM MMIO2RANGE1690 * A registered MMIO2 (= Device RAM) or pre-registered MMIO range. 1691 * 1692 * There are a few reason why we need to keep track of these registrations. One 1693 * of them is the deregistration & cleanup stuff, while another is that the 1694 * PGMRAMRANGE associated with such a region may have to be removed from the ram 1695 * range list. 1696 * 1697 * Overlapping with a RAM range has to be 100% or none at all. The pages in the 1698 * existing RAM range must not be ROM nor MMIO. A guru meditation will be 1699 * raised if a partial overlap or an overlap of ROM pages is encountered. On an 1700 * overlap we will free all the existing RAM pages and put in the ram range 1701 * pages instead. 1702 */ 1703 typedef struct PGMREGMMIORANGE 1704 1704 { 1705 1705 /** The owner of the range. (a device) */ 1706 1706 PPDMDEVINSR3 pDevInsR3; 1707 /** Pointer to the ring-3 mapping of the allocation . */1707 /** Pointer to the ring-3 mapping of the allocation, if MMIO2. */ 1708 1708 RTR3PTR pvR3; 1709 1709 /** Pointer to the next range - R3. */ 1710 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3; 1710 R3PTRTYPE(struct PGMREGMMIORANGE *) pNextR3; 1711 /** Whether this is MMIO2 or plain MMIO. */ 1712 bool fMmio2; 1711 1713 /** Whether it's mapped or not. */ 1712 1714 bool fMapped; … … 1722 1724 uint8_t idMmio2; 1723 1725 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */ 1724 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 11 : 11]; 1725 /** Live save per page tracking data. */ 1726 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 : 2]; 1727 /** Pointer to the physical handler for MMIO. */ 1728 R3PTRTYPE(PPGMPHYSHANDLER) pPhysHandlerR3; 1729 /** Live save per page tracking data for MMIO2. */ 1726 1730 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages; 1727 1731 /** The associated RAM range. */ 1728 1732 PGMRAMRANGE RamRange; 1729 } PGMMMIO2RANGE; 1730 /** Pointer to a MMIO2 range. */ 1731 typedef PGMMMIO2RANGE *PPGMMMIO2RANGE; 1733 } PGMREGMMIORANGE; 1734 AssertCompileMemberAlignment(PGMREGMMIORANGE, RamRange, 16); 1735 /** Pointer to a MMIO2 or pre-registered MMIO range. */ 1736 typedef PGMREGMMIORANGE *PPGMREGMMIORANGE; 1732 1737 1733 1738 /** @name Internal MMIO2 constants. … … 1736 1741 #define PGM_MMIO2_MAX_RANGES 8 1737 1742 /** The maximum number of pages in a MMIO2 range. */ 1738 #define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x0 0ffffff)1743 #define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x01000000) 1739 1744 /** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */ 1740 1745 #define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage) ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) ) … … 3308 3313 /** Pointer to the list of MMIO2 ranges - for R3. 3309 3314 * Registration order. */ 3310 R3PTRTYPE(PPGM MMIO2RANGE) pMmio2RangesR3;3315 R3PTRTYPE(PPGMREGMMIORANGE) pRegMmioRangesR3; 3311 3316 /** Pointer to SHW+GST mode data (function pointers). 3312 3317 * The index into this table is made up from */ … … 3314 3319 RTR3PTR R3PtrAlignment0; 3315 3320 /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */ 3316 R3PTRTYPE(PPGM MMIO2RANGE)apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];3321 R3PTRTYPE(PPGMREGMMIORANGE) apMmio2RangesR3[PGM_MMIO2_MAX_RANGES]; 3317 3322 3318 3323 /** RAM range TLB for R0. */ … … 3334 3339 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0; 3335 3340 RTR0PTR R0PtrAlignment0; 3336 /** MMIO2 lookup array for ring- 3. Indexed by idMmio2 minus 1. */3337 R0PTRTYPE(PPGM MMIO2RANGE)apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];3341 /** MMIO2 lookup array for ring-0. Indexed by idMmio2 minus 1. */ 3342 R0PTRTYPE(PPGMREGMMIORANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES]; 3338 3343 3339 3344 /** RAM range TLB for RC. */ … … 4139 4144 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 4140 4145 4146 int pgmHandlerPhysicalExCreate(PVM pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, 4147 RTRCPTR pvUserRC, R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler); 4148 int pgmHandlerPhysicalExRegister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast); 4149 int pgmHandlerPhysicalExDeregister(PVM pVM, PPGMPHYSHANDLER pPhysHandler); 4150 int pgmHandlerPhysicalExDestroy(PVM pVM, PPGMPHYSHANDLER pHandler); 4141 4151 void pgmR3HandlerPhysicalUpdateAll(PVM pVM); 4142 4152 bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys); -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r62016 r64115 878 878 GEN_CHECK_OFF(PGMROMRANGE, aPages); 879 879 GEN_CHECK_OFF(PGMROMRANGE, aPages[1]); 880 GEN_CHECK_SIZE(PGMMMIO2RANGE); 881 GEN_CHECK_OFF(PGMMMIO2RANGE, pDevInsR3); 882 GEN_CHECK_OFF(PGMMMIO2RANGE, pNextR3); 883 GEN_CHECK_OFF(PGMMMIO2RANGE, fMapped); 884 GEN_CHECK_OFF(PGMMMIO2RANGE, fOverlapping); 885 GEN_CHECK_OFF(PGMMMIO2RANGE, iRegion); 886 GEN_CHECK_OFF(PGMMMIO2RANGE, RamRange); 880 GEN_CHECK_SIZE(PGMREGMMIORANGE); 881 GEN_CHECK_OFF(PGMREGMMIORANGE, pDevInsR3); 882 GEN_CHECK_OFF(PGMREGMMIORANGE, pNextR3); 883 GEN_CHECK_OFF(PGMREGMMIORANGE, fMmio2); 884 GEN_CHECK_OFF(PGMREGMMIORANGE, fMapped); 885 GEN_CHECK_OFF(PGMREGMMIORANGE, fOverlapping); 886 GEN_CHECK_OFF(PGMREGMMIORANGE, iRegion); 887 GEN_CHECK_OFF(PGMREGMMIORANGE, RamRange); 887 888 GEN_CHECK_SIZE(PGMTREES); 888 889 GEN_CHECK_OFF(PGMTREES, PhysHandlers); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r62276 r64115 385 385 CHECK_SIZE(PGMPAGE, 16); 386 386 CHECK_MEMBER_ALIGNMENT(PGMRAMRANGE, aPages, 16); 387 CHECK_MEMBER_ALIGNMENT(PGM MMIO2RANGE, RamRange, 16);387 CHECK_MEMBER_ALIGNMENT(PGMREGMMIORANGE, RamRange, 16); 388 388 389 389 /* rem */
Note:
See TracChangeset
for help on using the changeset viewer.