- Timestamp:
- Mar 28, 2008 5:15:38 PM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 29139
- Location:
- trunk
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/mm.h
r7354 r7635 293 293 MMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv); 294 294 MMDECL(int) MMHyperFree(PVM pVM, void *pv); 295 MMDECL(void) MMHyperHeapCheck(PVM pVM); 295 296 #ifdef DEBUG 296 297 MMDECL(void) MMHyperHeapDump(PVM pVM); … … 337 338 MMR3DECL(void) MMR3Reset(PVM pVM); 338 339 MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages); 339 MMR3DECL(int) MMR3 IncreaseFixedReservation(PVM pVM, uint32_t cAddFixedPages);340 MMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc); 340 341 MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages); 341 342 … … 351 352 MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr); 352 353 MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr); 354 MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTGCPTR pGCPtr); 353 355 MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr); 354 356 MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr); … … 369 371 MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc); 370 372 #endif 371 MMR3DECL(int) MMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, unsigned cb);372 373 MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary, bool fShadow, const char *pszDesc); 373 374 MMR3DECL(int) MMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange); -
trunk/include/VBox/pci.h
r7072 r7635 75 75 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance. 76 76 * @param iRegion The region number. 77 * @param GCPhysAddress Physical address of the region. If iType is PCI_ADDRESS_SPACE_IO, this is an 78 * I/O port, else it's a physical address. 79 * This address is *NOT* relative to pci_mem_base like earlier! 77 * @param GCPhysAddress Physical address of the region. If enmType is PCI_ADDRESS_SPACE_IO, this 78 * is an I/O port, otherwise it's a physical address. 79 * 80 * NIL_RTGCPHYS indicates that a MMIO2 mapping is about to be unmapped and 81 * that the device deregister access handlers for it and update its internal 82 * state to reflect this. 83 * 80 84 * @param enmType One of the PCI_ADDRESS_SPACE_* values. 85 * 86 * @remarks The address is *NOT* relative to pci_mem_base. 81 87 */ 82 88 typedef DECLCALLBACK(int) FNPCIIOREGIONMAP(PPCIDEVICE pPciDev, /*unsigned*/ int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType); -
trunk/include/VBox/pdmdev.h
r5999 r7635 644 644 DECLR3CALLBACKMEMBER(void, pfnIoApicSetIrq,(PPDMDEVINS pDevIns, int iIrq, int iLevel)); 645 645 646 /** 647 * Checks if the given address is an MMIO2 base address or not. 648 * 649 * @returns true/false accordingly. 650 * @param pDevIns The PCI device instance. 651 * @param pOwner The owner of the memory, optional. 652 * @param GCPhys The address to check. 653 */ 654 DECLR3CALLBACKMEMBER(bool, pfnIsMMIO2Base,(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys)); 655 656 /** 657 * Gets the address of the GC PCI Bus helpers. 658 * 659 * This should be called at both construction and relocation time 660 * to obtain the correct address of the GC helpers. 661 * 662 * @returns GC pointer to the PCI Bus helpers. 663 * @param pDevIns Device instance of the PCI Bus. 664 * @thread EMT only. 665 */ 666 DECLR3CALLBACKMEMBER(PCPDMPCIHLPGC, pfnGetGCHelpers,(PPDMDEVINS pDevIns)); 667 668 /** 669 * Gets the address of the R0 PCI Bus helpers. 670 * 671 * This should be called at both construction and relocation time 672 * to obtain the correct address of the GC helpers. 673 * 674 * @returns R0 pointer to the PCI Bus helpers. 675 * @param pDevIns Device instance of the PCI Bus. 676 * @thread EMT only. 677 */ 678 DECLR3CALLBACKMEMBER(PCPDMPCIHLPR0, pfnGetR0Helpers,(PPDMDEVINS pDevIns)); 679 646 680 #ifdef VBOX_WITH_PDM_LOCK 647 681 /** … … 662 696 DECLR3CALLBACKMEMBER(void, pfnUnlock,(PPDMDEVINS pDevIns)); 663 697 #endif 664 665 /**666 * Gets the address of the GC PCI Bus helpers.667 *668 * This should be called at both construction and relocation time669 * to obtain the correct address of the GC helpers.670 *671 * @returns GC pointer to the PCI Bus helpers.672 * @param pDevIns Device instance of the PCI Bus.673 * @thread EMT only.674 */675 DECLR3CALLBACKMEMBER(PCPDMPCIHLPGC, pfnGetGCHelpers,(PPDMDEVINS pDevIns));676 677 /**678 * Gets the address of the R0 PCI Bus helpers.679 *680 * This should be called at both construction and relocation time681 * to obtain the correct address of the GC helpers.682 *683 * @returns R0 pointer to the PCI Bus helpers.684 * @param pDevIns Device instance of the PCI Bus.685 * @thread EMT only.686 */687 DECLR3CALLBACKMEMBER(PCPDMPCIHLPR0, pfnGetR0Helpers,(PPDMDEVINS pDevIns));688 698 689 699 /** Just a safety precaution. */ … … 696 706 697 707 /** Current PDMPCIHLPR3 version number. */ 698 #define PDM_PCIHLPR3_VERSION 0xf10 10000708 #define PDM_PCIHLPR3_VERSION 0xf1020000 699 709 700 710 … … 1709 1719 */ 1710 1720 DECLR3CALLBACKMEMBER(int, pfnIOPortDeregister,(PPDMDEVINS pDevIns, RTIOPORT Port, RTUINT cPorts)); 1711 1712 1721 1713 1722 /** … … 2569 2578 DECLR3CALLBACKMEMBER(int, pfnROMProtectShadow,(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTUINT cbRange)); 2570 2579 2580 /** 2581 * Allocate and register a MMIO2 region. 2582 * 2583 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's 2584 * RAM associated with a device. It is also non-shared memory with a 2585 * permanent ring-3 mapping and page backing (presently). 2586 * 2587 * @returns VBox status. 2588 * @param pDevIns The device instance. 2589 * @param iRegion The region number. Use the PCI region number as 2590 * this must be known to the PCI bus device too. If it's not associated 2591 * with the PCI device, then any number up to UINT8_MAX is fine. 2592 * @param cb The size (in bytes) of the region. 2593 * @param ppv Where to store the address of the ring-3 mapping of the memory. 2594 * @param pszDesc Pointer to description string. This must not be freed. 2595 * @thread EMT. 2596 */ 2597 DECLR3CALLBACKMEMBER(int, pfnMMIO2Register,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc)); 2598 2599 /** 2600 * Deregisters and frees a MMIO2 region. 2601 * 2602 * Any physical (and virtual) access handlers registered for the region must 2603 * be deregistered before calling this function. 2604 * 2605 * @returns VBox status code. 2606 * @param pDevIns The device instance. 2607 * @param iRegion The region number used during registration. 2608 * @thread EMT. 2609 */ 2610 DECLR3CALLBACKMEMBER(int, pfnMMIO2Deregister,(PPDMDEVINS pDevIns, uint32_t iRegion)); 2611 2612 /** 2613 * Maps a MMIO2 region into the physical memory space. 2614 * 2615 * A MMIO2 range may overlap with base memory if a lot of RAM 2616 * is configured for the VM, in which case we'll drop the base 2617 * memory pages. Presently we will make no attempt to preserve 2618 * anything that happens to be present in the base memory that 2619 * is replaced, this is of course incorrectly but it's too much 2620 * effort. 2621 * 2622 * @returns VBox status code. 2623 * @param pDevIns The device instance. 2624 * @param iRegion The region number used during registration. 2625 * @param GCPhys The physical address to map it at. 2626 * @thread EMT. 2627 */ 2628 DECLR3CALLBACKMEMBER(int, pfnMMIO2Map,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)); 2629 2630 /** 2631 * Unmaps a MMIO2 region previously mapped using pfnMMIO2Map. 2632 * 2633 * @returns VBox status code. 2634 * @param pDevIns The device instance. 2635 * @param iRegion The region number used during registration. 2636 * @param GCPhys The physical address it's currently mapped at. 2637 * @thread EMT. 2638 */ 2639 DECLR3CALLBACKMEMBER(int, pfnMMIO2Unmap,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)); 2640 2641 /** 2642 * Maps a portion of an MMIO2 region into the hypervisor region. 2643 * 2644 * Callers of this API must never deregister the MMIO2 region before the 2645 * VM is powered off. 2646 * 2647 * @return VBox status code. 2648 * @param pDevIns The device owning the MMIO2 memory. 2649 * @param iRegion The region. 2650 * @param off The offset into the region. Will be rounded down to closest page boundrary. 2651 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary. 2652 * @param pszDesc Mapping description. 2653 * @param pGCPtr Where to store the GC address. 2654 */ 2655 DECLR3CALLBACKMEMBER(int, pfnMMHyperMapMMIO2,(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, 2656 const char *pszDesc, PRTGCPTR pGCPtr)); 2657 2571 2658 /** @} */ 2572 2659 … … 2581 2668 2582 2669 /** Current PDMDEVHLP version number. */ 2583 #define PDM_DEVHLP_VERSION 0xf205000 02670 #define PDM_DEVHLP_VERSION 0xf2050001 2584 2671 2585 2672 … … 3044 3131 return pDevIns->pDevHlp->pfnROMRegister(pDevIns, GCPhysStart, cbRange, pvBinary, fShadow, pszDesc); 3045 3132 } 3046 3047 3133 /** 3048 3134 * @copydoc PDMDEVHLP::pfnROMProtectShadow … … 3051 3137 { 3052 3138 return pDevIns->pDevHlp->pfnROMProtectShadow(pDevIns, GCPhysStart, cbRange); 3139 } 3140 3141 /** 3142 * @copydoc PDMDEVHLP::pfnMMIO2Register 3143 */ 3144 DECLINLINE(int) PDMDevHlpMMIO2Register(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc) 3145 { 3146 return pDevIns->pDevHlp->pfnMMIO2Register(pDevIns, iRegion, cb, ppv, pszDesc); 3147 } 3148 3149 /** 3150 * @copydoc PDMDEVHLP::pfnMMIO2Deregister 3151 */ 3152 DECLINLINE(int) PDMDevHlpMMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion) 3153 { 3154 return pDevIns->pDevHlp->pfnMMIO2Deregister(pDevIns, iRegion); 3155 } 3156 3157 /** 3158 * @copydoc PDMDEVHLP::pfnMMIO2Map 3159 */ 3160 DECLINLINE(int) PDMDevHlpMMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 3161 { 3162 return pDevIns->pDevHlp->pfnMMIO2Map(pDevIns, iRegion, GCPhys); 3163 } 3164 3165 /** 3166 * @copydoc PDMDEVHLP::pfnMMIO2Unmap 3167 */ 3168 DECLINLINE(int) PDMDevHlpMMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 3169 { 3170 return pDevIns->pDevHlp->pfnMMIO2Unmap(pDevIns, iRegion, GCPhys); 3171 } 3172 3173 /** 3174 * @copydoc PDMDEVHLP::pfnMMHyperMapMMIO2 3175 */ 3176 DECLINLINE(int) PDMDevHlpMMHyperMapMMIO2(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, 3177 const char *pszDesc, PRTGCPTR pGCPtr) 3178 { 3179 return pDevIns->pDevHlp->pfnMMHyperMapMMIO2(pDevIns, iRegion, off, cb, pszDesc, pGCPtr); 3053 3180 } 3054 3181 -
trunk/include/VBox/pgm.h
r7629 r7635 467 467 #endif /* !VBOX_WITH_NEW_PHYS_CODE */ 468 468 PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc); 469 PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc); 470 PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion); 471 PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 472 PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 473 PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys); 474 PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys); 469 475 470 476 /** @group PGMR3PhysRegisterRom flags. … … 484 490 PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc); 485 491 #endif /* !VBOX_WITH_NEW_PHYS_CODE */ 486 PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb);487 492 PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask); 488 493 PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable); -
trunk/src/VBox/Devices/Bus/DevPCI.cpp
r7072 r7635 1 /* $Id: $ */ 1 2 /** @file 2 * 3 * PCI Device. 3 * PCI BUS Device. 4 4 */ 5 5 … … 214 214 static void pci_update_mappings(PCIDevice *d) 215 215 { 216 PPCIBUS pBus = d->Int.s.pBus; 216 217 PCIIORegion *r; 217 218 int cmd, i; … … 279 280 } 280 281 } else { 281 int rc = d->pDevIns->pDevHlp->pfnMMIODeregister(d->pDevIns, 282 r->addr + PCIBUS2PCIGLOBALS(d->Int.s.pBus)->pci_mem_base, 283 r->size); 284 #if 0 /** @todo deal correctly with deregistration of MMIO2 ranges and such like. */ 285 AssertMsg(VBOX_SUCCESS(rc) || !strcmp(d->name, "vga") || !strcmp(d->name, "VMMDev"), ("rc=%Vrc d=%s\n", rc, d->name)); NOREF(rc); 286 #else /* less strict check */ 287 AssertMsg(VBOX_SUCCESS(rc) || rc == VERR_IOM_MMIO_RANGE_NOT_FOUND, ("rc=%Vrc d=%s\n", rc, d->name)); NOREF(rc); 288 #endif 282 RTGCPHYS GCPhysBase = r->addr + PCIBUS2PCIGLOBALS(pBus)->pci_mem_base; 283 int rc; 284 if (pBus->pPciHlpR3->pfnIsMMIO2Base(pBus->pDevInsHC, d->pDevIns, GCPhysBase)) 285 { 286 /* unmap it. */ 287 int rc = r->map_func(d, i, NIL_RTGCPHYS, r->size, (PCIADDRESSSPACE)(r->type)); 288 AssertRC(rc); 289 rc = PDMDevHlpMMIO2Unmap(d->pDevIns, i, GCPhysBase); 290 } 291 else 292 rc = d->pDevIns->pDevHlp->pfnMMIODeregister(d->pDevIns, GCPhysBase, r->size); 293 AssertMsgRC(rc, ("rc=%Rrc d=%s i=%d GCPhysBase=%RGp size=%#x\n", rc, d->name, i, GCPhysBase, r->size)); 289 294 } 290 295 } … … 292 297 if (r->addr != ~0U) { 293 298 int rc = r->map_func(d, i, 294 r->addr + (r->type & PCI_ADDRESS_SPACE_IO ? 0 : PCIBUS2PCIGLOBALS( d->Int.s.pBus)->pci_mem_base),299 r->addr + (r->type & PCI_ADDRESS_SPACE_IO ? 0 : PCIBUS2PCIGLOBALS(pBus)->pci_mem_base), 295 300 r->size, (PCIADDRESSSPACE)(r->type)); 296 301 AssertRC(rc); … … 772 777 { 773 778 case 0x0101: 774 if (vendor_id == 0x8086 && 779 if (vendor_id == 0x8086 && 775 780 (device_id == 0x7010 || device_id == 0x7111)) { 776 781 /* PIIX3 or PIIX4 IDE */ … … 1083 1088 if (pData->devices[i]) 1084 1089 { 1085 LogRel(("New device in slot %#x, %s (vendor=%#06x device=%#06x)\n", i, pData->devices[i]->name, 1090 LogRel(("New device in slot %#x, %s (vendor=%#06x device=%#06x)\n", i, pData->devices[i]->name, 1086 1091 PCIDevGetVendorId(pData->devices[i]), PCIDevGetDeviceId(pData->devices[i]))); 1087 1092 if (SSMR3HandleGetAfter(pSSMHandle) != SSMAFTER_DEBUG_IT) … … 1100 1105 if (!pDev) 1101 1106 { 1102 LogRel(("Device in slot %#x has been removed! vendor=%#06x device=%#06x\n", i, 1107 LogRel(("Device in slot %#x has been removed! vendor=%#06x device=%#06x\n", i, 1103 1108 PCIDevGetVendorId(&DevTmp), PCIDevGetDeviceId(&DevTmp))); 1104 1109 if (SSMR3HandleGetAfter(pSSMHandle) != SSMAFTER_DEBUG_IT) … … 1118 1123 /* commit the loaded device config. */ 1119 1124 memcpy(pDev->config, DevTmp.config, sizeof(pDev->config)); 1120 if (DevTmp.Int.s.iIrq >= PCI_DEVICES_MAX) 1125 if (DevTmp.Int.s.iIrq >= PCI_DEVICES_MAX) 1121 1126 { 1122 1127 LogRel(("Device %s: Too many devices %d (max=%d)\n", pDev->name, DevTmp.Int.s.iIrq, PCI_DEVICES_MAX)); … … 1326 1331 1327 1332 1328 /** 1333 /** 1329 1334 * @copydoc PDMPCIBUSREG::pfnSetConfigCallbacksHC 1330 1335 */ 1331 static DECLCALLBACK(void) pciSetConfigCallbacks(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev, PFNPCICONFIGREAD pfnRead, PPFNPCICONFIGREAD ppfnReadOld, 1336 static DECLCALLBACK(void) pciSetConfigCallbacks(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev, PFNPCICONFIGREAD pfnRead, PPFNPCICONFIGREAD ppfnReadOld, 1332 1337 PFNPCICONFIGWRITE pfnWrite, PPFNPCICONFIGWRITE ppfnWriteOld) 1333 1338 { … … 1495 1500 return PDMDEV_SET_ERROR(pDevIns, rc, 1496 1501 N_("Failed to register ourselves as a PCI Bus")); 1502 if (pBus->pPciHlpR3->u32Version != PDM_PCIHLPR3_VERSION) 1503 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS, 1504 N_("PCI helper version mismatch; got %#x expected %#x"), 1505 pBus->pPciHlpR3->u32Version != PDM_PCIHLPR3_VERSION); 1497 1506 1498 1507 pBus->pPciHlpGC = pBus->pPciHlpR3->pfnGetGCHelpers(pDevIns); -
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r7259 r7635 1999 1999 for(y = 0; y < height; y++) { 2000 2000 addr = addr1; 2001 /* CGA/MDA compatibility. Note that these addresses are all 2001 /* CGA/MDA compatibility. Note that these addresses are all 2002 2002 * shifted left by two compared to VGA specs. 2003 2003 */ … … 4108 4108 4109 4109 /** 4110 * Callback function for mapping an PCI I/O region.4110 * Callback function for unmapping and/or mapping the VRAM MMIO2 region (called by the PCI bus). 4111 4111 * 4112 4112 * @return VBox status code. … … 4121 4121 { 4122 4122 int rc; 4123 PVGASTATE pData = PDMINS2DATA(pPciDev->pDevIns, PVGASTATE); 4123 PPDMDEVINS pDevIns = pPciDev->pDevIns; 4124 PVGASTATE pData = PDMINS2DATA(pDevIns, PVGASTATE); 4124 4125 LogFlow(("vgaR3IORegionMap: iRegion=%d GCPhysAddress=%VGp cb=%#x enmType=%d\n", iRegion, GCPhysAddress, cb, enmType)); 4125 4126 /* 4127 * VRam mapping. 4128 */ 4129 if (iRegion == 0 && enmType == PCI_ADDRESS_SPACE_MEM_PREFETCH) 4126 AssertReturn(iRegion == 0 && enmType == PCI_ADDRESS_SPACE_MEM_PREFETCH, VERR_INTERNAL_ERROR); 4127 4128 if (GCPhysAddress != NIL_RTGCPHYS) 4130 4129 { 4131 4130 /* 4132 * Register and lock the VRAM. 4133 * 4134 * Windows usually re-initializes the PCI devices, so we have to check whether the memory was 4135 * already registered before trying to do that all over again. 4131 * Mapping the VRAM. 4136 4132 */ 4137 PVM pVM = PDMDevHlpGetVM(pPciDev->pDevIns); 4138 if (pData->GCPhysVRAM) 4133 rc = PDMDevHlpMMIO2Map(pDevIns, iRegion, GCPhysAddress); 4134 AssertRC(rc); 4135 if (RT_SUCCESS(rc)) 4139 4136 { 4140 AssertMsg(pData->GCPhysVRAM == GCPhysAddress, 4141 ("The Guest OS relocated our LFB! old GCPhysVRAM=%VGp new GCPhysAddress=%VGp\n", 4142 pData->GCPhysVRAM, GCPhysAddress)); 4143 rc = VINF_SUCCESS; 4144 } 4145 else 4146 { 4147 /* 4148 * Register and lock the VRAM. 4149 */ 4150 rc = MMR3PhysRegister(pVM, pData->vram_ptrHC, GCPhysAddress, pData->vram_size, MM_RAM_FLAGS_MMIO2, "VRam"); 4151 if (VBOX_SUCCESS(rc)) 4152 { 4153 if (!pData->GCPhysVRAM) 4154 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, 4155 GCPhysAddress, GCPhysAddress + (pData->vram_size - 1), 4156 vgaR3LFBAccessHandler, pData, 4157 g_DeviceVga.szR0Mod, "vgaR0LFBAccessHandler", pData->pDevInsHC->pvInstanceDataR0, 4158 g_DeviceVga.szGCMod, "vgaGCLFBAccessHandler", pData->pDevInsHC->pvInstanceDataGC, 4159 "VGA LFB"); 4160 if (VBOX_SUCCESS(rc)) 4161 { 4162 /* 4163 * Map the first 256KB of the VRAM into GC for GC VGA support. 4164 */ 4165 RTGCPTR GCPtr; 4166 rc = MMR3HyperMapGCPhys(pVM, GCPhysAddress, VGA_MAPPING_SIZE, "VGA VRam", &GCPtr); 4167 if (VBOX_SUCCESS(rc)) 4168 { 4169 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL); 4170 4171 pData->vram_ptrGC = GCPtr; 4172 pData->GCPhysVRAM = GCPhysAddress; 4173 return VINF_SUCCESS; 4174 } 4175 AssertMsgFailed(("MMR3HyperMapGCPhys failed, rc=%Vrc\n", rc)); 4176 } 4177 else 4178 AssertMsgFailed(("Failed to register write handler for VRAM! rc=%Vrc\n", rc)); 4179 } 4180 else 4181 AssertReleaseMsgFailed(("Failed to register VRAM! rc=%Vra\n", rc)); 4182 } 4183 return rc; 4137 rc = PGMR3HandlerPhysicalRegister(PDMDevHlpGetVM(pDevIns), 4138 PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, 4139 GCPhysAddress, GCPhysAddress + (pData->vram_size - 1), 4140 vgaR3LFBAccessHandler, pData, 4141 g_DeviceVga.szR0Mod, "vgaR0LFBAccessHandler", pDevIns->pvInstanceDataR0, 4142 g_DeviceVga.szGCMod, "vgaGCLFBAccessHandler", pDevIns->pvInstanceDataGC, 4143 "VGA LFB"); 4144 AssertRC(rc); 4145 if (RT_SUCCESS(rc)) 4146 pData->GCPhysVRAM = GCPhysAddress; 4147 } 4184 4148 } 4185 4149 else 4186 AssertReleaseMsgFailed(("Huh!?! iRegion=%d enmType=%d\n", iRegion, enmType)); 4187 return VERR_INTERNAL_ERROR; 4150 { 4151 /* 4152 * Unmapping of the VRAM in progress. 4153 * Deregister the access handler so PGM doesn't get upset. 4154 */ 4155 Assert(pData->GCPhysVRAM); 4156 rc = PGMHandlerPhysicalDeregister(PDMDevHlpGetVM(pDevIns), pData->GCPhysVRAM); 4157 AssertRC(rc); 4158 pData->GCPhysVRAM = 0; 4159 } 4160 return rc; 4188 4161 } 4189 4162 … … 4619 4592 4620 4593 /* 4594 * Allocate the VRAM and map the first 256KB of it into GC so we can speed up VGA support. 4595 */ 4596 rc = PDMDevHlpMMIO2Register(pDevIns, 0 /* iRegion */, pData->vram_size, (void **)&pData->vram_ptrHC, "VRam"); 4597 AssertMsgRC(rc, ("PDMDevHlpMMIO2Register(%#x,) -> %Rrc\n", pData->vram_size, rc)); 4598 4599 rc = PDMDevHlpMMHyperMapMMIO2(pDevIns, 0 /* iRegion */, 0 /* off */, VGA_MAPPING_SIZE, "VGA VRam", &pData->vram_ptrGC); 4600 AssertMsgRC(rc, ("MMR3HyperMapGCPhys(%#x,) -> %Rrc\n", pData->vram_size, rc)); 4601 4602 /* 4621 4603 * Register I/O ports, ROM and save state. 4622 4604 */ … … 4762 4744 AssertReleaseMsg(g_cbVgaBiosBinary <= _64K && g_cbVgaBiosBinary >= 32*_1K, ("g_cbVgaBiosBinary=%#x\n", g_cbVgaBiosBinary)); 4763 4745 AssertReleaseMsg(RT_ALIGN_Z(g_cbVgaBiosBinary, PAGE_SIZE) == g_cbVgaBiosBinary, ("g_cbVgaBiosBinary=%#x\n", g_cbVgaBiosBinary)); 4764 rc = PDMDevHlpROMRegister(pDevIns, 0x000c0000, g_cbVgaBiosBinary, &g_abVgaBiosBinary[0], 4746 rc = PDMDevHlpROMRegister(pDevIns, 0x000c0000, g_cbVgaBiosBinary, &g_abVgaBiosBinary[0], 4765 4747 false /* fShadow */, "VGA BIOS"); 4766 4748 if (VBOX_FAILURE(rc)) … … 4781 4763 if (pData->Dev.devfn != 16 && iInstance == 0) 4782 4764 Log(("!!WARNING!!: pData->dev.devfn=%d (ignore if testcase or no started by Main)\n", pData->Dev.devfn)); 4783 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, pData->vram_size, PCI_ADDRESS_SPACE_MEM_PREFETCH, vgaR3IORegionMap); 4765 4766 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0 /* iRegion */, pData->vram_size, PCI_ADDRESS_SPACE_MEM_PREFETCH, vgaR3IORegionMap); 4784 4767 if (VBOX_FAILURE(rc)) 4785 4768 return rc; … … 4798 4781 if (VBOX_FAILURE(rc)) 4799 4782 return rc; 4800 4801 /*4802 * Allocate the VRAM.4803 */4804 rc = SUPPageAlloc(pData->vram_size >> PAGE_SHIFT, (void **)&pData->vram_ptrHC);4805 if (VBOX_FAILURE(rc))4806 {4807 AssertMsgFailed(("SUPPageAlloc(%#x,) -> %d\n", pData->vram_size, rc));4808 return rc;4809 }4810 4783 4811 4784 #ifdef VBE_NEW_DYN_LIST -
trunk/src/VBox/Devices/VMMDev/VBoxDev.cpp
r7436 r7635 1 /* $Id$ */ 1 2 /** @file 2 * 3 * VBox Guest/VMM/host communication: 4 * Virtual communication device 3 * VMMDev - Guest <-> VMM/Host communication device. 5 4 */ 6 5 … … 21 20 /* Enable dev_vmm Log3 statements to get IRQ-related logging. */ 22 21 23 #include <stdio.h>24 #include <string.h>25 26 22 #define LOG_GROUP LOG_GROUP_DEV_VMM 27 23 #include <VBox/log.h> … … 36 32 37 33 #include <iprt/assert.h> 34 #include <iprt/string.h> 38 35 #include <iprt/time.h> 39 36 #ifndef IN_GC … … 1236 1233 /* just pass on the information */ 1237 1234 Log(("VMMDev: returning VRDP status %d level %d\n", pData->fVRDPEnabled, pData->u32VRDPExperienceLevel)); 1238 1235 1239 1236 vrdpChangeRequest->u8VRDPActive = pData->fVRDPEnabled; 1240 1237 vrdpChangeRequest->u32VRDPExperienceLevel = pData->u32VRDPExperienceLevel; 1241 1238 1242 1239 pRequestHeader->rc = VINF_SUCCESS; 1243 1240 } … … 1259 1256 Log(("VMMDev: returning memory balloon size =%d\n", pData->u32MemoryBalloonSize)); 1260 1257 memBalloonChangeRequest->u32BalloonSize = pData->u32MemoryBalloonSize; 1261 memBalloonChangeRequest->u32PhysMemSize = (pData->u64GuestRAMSize / (uint64_t)_1M);1258 memBalloonChangeRequest->u32PhysMemSize = pData->cbGuestRAM / (uint64_t)_1M; 1262 1259 1263 1260 if (memBalloonChangeRequest->eventAck == VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) … … 1381 1378 if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_SYSTEM_CACHE) 1382 1379 Log(("CPU%d: System cache size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemSystemCache / (_1M/pGuestStats->u32PageSize))); 1383 1380 1384 1381 if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PAGE_FILE_SIZE) 1385 1382 Log(("CPU%d: Page file size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PageFileSize / (_1M/pGuestStats->u32PageSize))); … … 1564 1561 static DECLCALLBACK(int) vmmdevIORAMRegionMap(PPCIDEVICE pPciDev, /*unsigned*/ int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType) 1565 1562 { 1566 int rc;1563 LogFlow(("vmmdevR3IORAMRegionMap: iRegion=%d GCPhysAddress=%VGp cb=%#x enmType=%d\n", iRegion, GCPhysAddress, cb, enmType)); 1567 1564 VMMDevState *pData = PCIDEV_2_VMMDEVSTATE(pPciDev); 1568 LogFlow(("vmmdevR3IORAMRegionMap: iRegion=%d GCPhysAddress=%VGp cb=%#x enmType=%d\n", iRegion, GCPhysAddress, cb, enmType));1569 1570 1565 int rc; 1566 1567 AssertReturn(iRegion == 1 && enmType == PCI_ADDRESS_SPACE_MEM, VERR_INTERNAL_ERROR); 1571 1568 Assert(pData->pVMMDevRAMHC != NULL); 1572 1569 1573 memset (pData->pVMMDevRAMHC, 0, sizeof (VMMDevMemory)); 1574 pData->pVMMDevRAMHC->u32Size = sizeof (VMMDevMemory); 1575 pData->pVMMDevRAMHC->u32Version = VMMDEV_MEMORY_VERSION; 1576 1577 /* 1578 * VMMDev RAM mapping. 1579 */ 1580 if (iRegion == 1 && enmType == PCI_ADDRESS_SPACE_MEM) 1570 if (GCPhysAddress != NIL_RTGCPHYS) 1581 1571 { 1582 1572 /* 1583 * Register and lock the RAM. 1584 * 1585 * Windows usually re-initializes the PCI devices, so we have to check whether the memory was 1586 * already registered before trying to do that all over again. 1573 * Map the MMIO2 memory. 1587 1574 */ 1588 PVM pVM = PDMDevHlpGetVM(pPciDev->pDevIns); 1589 1590 if (pData->GCPhysVMMDevRAM) 1591 { 1592 /* 1593 * Relocate the already registered VMMDevRAM. 1594 */ 1595 rc = MMR3PhysRelocate(pVM, pData->GCPhysVMMDevRAM, GCPhysAddress, VMMDEV_RAM_SIZE); 1596 if (VBOX_SUCCESS(rc)) 1597 { 1598 pData->GCPhysVMMDevRAM = GCPhysAddress; 1599 return VINF_SUCCESS; 1600 } 1601 AssertReleaseMsgFailed(("Failed to relocate VMMDev RAM from %VGp to %VGp! rc=%Vra\n", pData->GCPhysVMMDevRAM, GCPhysAddress, rc)); 1602 } 1603 else 1604 { 1605 /* 1606 * Register and lock the VMMDevRAM. 1607 */ 1608 /** @todo MM_RAM_FLAGS_MMIO2 seems to be appropriate for a RW memory. 1609 * Need to check. May be a RO memory is enough for the device. 1610 */ 1611 rc = MMR3PhysRegister(pVM, pData->pVMMDevRAMHC, GCPhysAddress, VMMDEV_RAM_SIZE, MM_RAM_FLAGS_MMIO2, "VBoxDev"); 1612 if (VBOX_SUCCESS(rc)) 1613 { 1614 pData->GCPhysVMMDevRAM = GCPhysAddress; 1615 return VINF_SUCCESS; 1616 } 1617 AssertReleaseMsgFailed(("Failed to register VMMDev RAM! rc=%Vra\n", rc)); 1618 } 1619 return rc; 1620 } 1621 1622 AssertReleaseMsgFailed(("VMMDev wrong region type: iRegion=%d enmType=%d\n", iRegion, enmType)); 1623 return VERR_INTERNAL_ERROR; 1575 pData->GCPhysVMMDevRAM = GCPhysAddress; 1576 rc = PDMDevHlpMMIO2Map(pPciDev->pDevIns, iRegion, GCPhysAddress); 1577 } 1578 else 1579 { 1580 /* 1581 * It is about to be unmapped, just clean up. 1582 */ 1583 pData->GCPhysVMMDevRAM = NIL_RTGCPHYS; 1584 rc = VINF_SUCCESS; 1585 } 1586 1587 return rc; 1624 1588 } 1625 1589 … … 2096 2060 2097 2061 return VINF_SUCCESS; 2062 } 2063 2064 /** 2065 * (Re-)initializes the MMIO2 data. 2066 * 2067 * @param pData Pointer to the VMMDev instance data. 2068 */ 2069 static void vmmdevInitRam(VMMDevState *pData) 2070 { 2071 memset(pData->pVMMDevRAMHC, 0, sizeof(VMMDevMemory)); 2072 pData->pVMMDevRAMHC->u32Size = sizeof(VMMDevMemory); 2073 pData->pVMMDevRAMHC->u32Version = VMMDEV_MEMORY_VERSION; 2098 2074 } 2099 2075 … … 2160 2136 2161 2137 /* 2138 * Interfaces 2139 */ 2140 /* Base */ 2141 pData->Base.pfnQueryInterface = vmmdevPortQueryInterface; 2142 2143 /* VMMDev port */ 2144 pData->Port.pfnQueryAbsoluteMouse = vmmdevQueryAbsoluteMouse; 2145 pData->Port.pfnSetAbsoluteMouse = vmmdevSetAbsoluteMouse; 2146 pData->Port.pfnQueryMouseCapabilities = vmmdevQueryMouseCapabilities; 2147 pData->Port.pfnSetMouseCapabilities = vmmdevSetMouseCapabilities; 2148 pData->Port.pfnRequestDisplayChange = vmmdevRequestDisplayChange; 2149 pData->Port.pfnSetCredentials = vmmdevSetCredentials; 2150 pData->Port.pfnVBVAChange = vmmdevVBVAChange; 2151 pData->Port.pfnRequestSeamlessChange = vmmdevRequestSeamlessChange; 2152 pData->Port.pfnSetMemoryBalloon = vmmdevSetMemoryBalloon; 2153 pData->Port.pfnSetStatisticsInterval = vmmdevSetStatisticsInterval; 2154 pData->Port.pfnVRDPChange = vmmdevVRDPChange; 2155 2156 /* Shared folder LED */ 2157 pData->SharedFolders.Led.u32Magic = PDMLED_MAGIC; 2158 pData->SharedFolders.ILeds.pfnQueryStatusLed = vmmdevQueryStatusLed; 2159 2160 #ifdef VBOX_HGCM 2161 /* HGCM port */ 2162 pData->HGCMPort.pfnCompleted = hgcmCompleted; 2163 #endif 2164 2165 /** @todo convert this into a config parameter like we do everywhere else.*/ 2166 pData->cbGuestRAM = MMR3PhysGetRamSize(PDMDevHlpGetVM(pDevIns)); 2167 2168 /* 2162 2169 * Register the backdoor logging port 2163 2170 */ … … 2172 2179 AssertRCReturn(rc, rc); 2173 2180 #endif 2181 2182 /* 2183 * Allocate and initialize the MMIO2 memory. 2184 */ 2185 rc = PDMDevHlpMMIO2Register(pDevIns, 1 /*iRegion*/, VMMDEV_RAM_SIZE, (void **)&pData->pVMMDevRAMHC, "VMMDev"); 2186 if (RT_FAILURE(rc)) 2187 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 2188 N_("Failed to allocate %u bytes of memory for the VMM device"), VMMDEV_RAM_SIZE); 2189 vmmdevInitRam(pData); 2174 2190 2175 2191 /* … … 2189 2205 2190 2206 /* 2191 * Interfaces2192 */2193 /* Base */2194 pData->Base.pfnQueryInterface = vmmdevPortQueryInterface;2195 2196 /* VMMDev port */2197 pData->Port.pfnQueryAbsoluteMouse = vmmdevQueryAbsoluteMouse;2198 pData->Port.pfnSetAbsoluteMouse = vmmdevSetAbsoluteMouse;2199 pData->Port.pfnQueryMouseCapabilities = vmmdevQueryMouseCapabilities;2200 pData->Port.pfnSetMouseCapabilities = vmmdevSetMouseCapabilities;2201 pData->Port.pfnRequestDisplayChange = vmmdevRequestDisplayChange;2202 pData->Port.pfnSetCredentials = vmmdevSetCredentials;2203 pData->Port.pfnVBVAChange = vmmdevVBVAChange;2204 pData->Port.pfnRequestSeamlessChange = vmmdevRequestSeamlessChange;2205 pData->Port.pfnSetMemoryBalloon = vmmdevSetMemoryBalloon;2206 pData->Port.pfnSetStatisticsInterval = vmmdevSetStatisticsInterval;2207 pData->Port.pfnVRDPChange = vmmdevVRDPChange;2208 2209 /* Shared folder LED */2210 pData->SharedFolders.Led.u32Magic = PDMLED_MAGIC;2211 pData->SharedFolders.ILeds.pfnQueryStatusLed = vmmdevQueryStatusLed;2212 2213 #ifdef VBOX_HGCM2214 /* HGCM port */2215 pData->HGCMPort.pfnCompleted = hgcmCompleted;2216 #endif2217 2218 /*2219 2207 * Get the corresponding connector interface 2220 2208 */ … … 2256 2244 } 2257 2245 2258 /* 2246 /* 2259 2247 * Register saved state and init the HGCM CmdList critsect. 2260 2248 */ … … 2271 2259 #endif /* VBOX_HGCM */ 2272 2260 2273 /*2274 * Allocate the VMMDev RAM region.2275 */2276 /** @todo freeing of the RAM. */2277 rc = SUPPageAlloc(VMMDEV_RAM_SIZE >> PAGE_SHIFT, (void **)&pData->pVMMDevRAMHC);2278 AssertMsgRCReturn(rc, ("VMMDev SUPPageAlloc(%#x,) -> %Vrc\n", VMMDEV_RAM_SIZE, rc), rc);2279 2280 /* initialize the VMMDev memory */2281 pData->pVMMDevRAMHC->u32Size = sizeof (VMMDevMemory);2282 pData->pVMMDevRAMHC->u32Version = VMMDEV_MEMORY_VERSION;2283 2284 PVM pVM = PDMDevHlpGetVM(pDevIns);2285 pData->u64GuestRAMSize = MMR3PhysGetRamSize(pVM);2286 2261 return rc; 2287 2262 } … … 2311 2286 pData->u32HostEventFlags = 0; 2312 2287 2288 /* re-initialize the VMMDev memory */ 2313 2289 if (pData->pVMMDevRAMHC) 2314 { 2315 /* re-initialize the VMMDev memory */ 2316 memset (pData->pVMMDevRAMHC, 0, VMMDEV_RAM_SIZE); 2317 pData->pVMMDevRAMHC->u32Size = sizeof (VMMDevMemory); 2318 pData->pVMMDevRAMHC->u32Version = VMMDEV_MEMORY_VERSION; 2319 } 2290 vmmdevInitRam(pData); 2320 2291 2321 2292 /* credentials have to go away */ -
trunk/src/VBox/Devices/VMMDev/VMMDevState.h
r7072 r7635 1 /* $Id$ */ 1 2 /** @file 2 * 3 * VBox Guest/VMM/host communication: 4 * HGCM - Host-Guest Communication Manager header 3 * VMMDev - Guest <-> VMM/Host communication device, Internal header. 5 4 */ 6 5 … … 17 16 */ 18 17 19 #ifndef __ VMMDevState_h__20 #define __ VMMDevState_h__18 #ifndef ___VMMDevState_h___ 19 #define ___VMMDevState_h___ 21 20 22 21 #include <VBox/cdefs.h> … … 72 71 /** Current host side event flags */ 73 72 uint32_t u32HostEventFlags; 74 /** Mask of events guest is interested in. Note that the HGCM events 73 /** Mask of events guest is interested in. Note that the HGCM events 75 74 * are enabled automatically by the VMMDev device when guest issues 76 75 * HGCM commands. … … 136 135 137 136 /* guest ram size */ 138 uint64_t u64GuestRAMSize;137 uint64_t cbGuestRAM; 139 138 140 139 /* statistics interval change request */ … … 146 145 bool fVRDPEnabled; 147 146 uint32_t u32VRDPExperienceLevel; 148 147 149 148 #ifdef TIMESYNC_BACKDOOR 150 149 bool fTimesyncBackdoorLo; 151 150 uint64_t hostTime; 152 151 #endif 153 /** Set if GetHostTime should fail. 152 /** Set if GetHostTime should fail. 154 153 * Loaded from the GetHostTimeDisabled configuration value. */ 155 154 bool fGetHostTimeDisabled; … … 185 184 uint32_t u32NotMask); 186 185 187 #endif /* __VMMDevState_h__ */ 186 #endif /* !___VMMDevState_h___ */ 187 -
trunk/src/VBox/VMM/MM.cpp
r7613 r7635 527 527 int mmR3UpdateReservation(PVM pVM) 528 528 { 529 VM_ASSERT_EMT(pVM); 529 530 if (pVM->mm.s.fDoneMMR3InitPaging) 530 531 return GMMR3UpdateReservation(pVM, … … 541 542 * This can be called before MMR3InitPaging. 542 543 * 543 * @returns VBox status code. 544 * @returns VBox status code. Will set VM error on failure. 544 545 * @param pVM The shared VM structure. 545 546 * @param cAddBasePages The number of pages to add. … … 561 562 562 563 /** 563 * Interface for PGM to increasethe reservation of fixed pages.564 * Interface for PGM to adjust the reservation of fixed pages. 564 565 * 565 566 * This can be called before MMR3InitPaging. 566 567 * 567 * @returns VBox status code. 568 * @param pVM The shared VM structure. 569 * @param cAddFixedPages The number of pages to add. 570 */ 571 MMR3DECL(int) MMR3IncreaseFixedReservation(PVM pVM, uint32_t cAddFixedPages) 568 * @returns VBox status code. Will set VM error on failure. 569 * @param pVM The shared VM structure. 570 * @param cDeltaFixedPages The number of pages to add (positive) or subtract (negative). 571 * @param pszDesc Some description associated with the reservation. 572 */ 573 MMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc) 572 574 { 573 575 const uint32_t cOld = pVM->mm.s.cFixedPages; 574 pVM->mm.s.cFixedPages += c AddFixedPages;575 LogFlow(("MMR3Ad dFixedReservation: +%u (%u -> %u)\n", cAddFixedPages, cOld, pVM->mm.s.cFixedPages));576 pVM->mm.s.cFixedPages += cDeltaFixedPages; 577 LogFlow(("MMR3AdjustFixedReservation: %d (%u -> %u)\n", cDeltaFixedPages, cOld, pVM->mm.s.cFixedPages)); 576 578 int rc = mmR3UpdateReservation(pVM); 577 579 if (RT_FAILURE(rc)) 578 580 { 579 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory (%#x -> %#x)"), cOld, pVM->mm.s.cFixedPages); 581 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory (%#x -> %#x; %s)"), 582 cOld, pVM->mm.s.cFixedPages, pszDesc); 580 583 pVM->mm.s.cFixedPages = cOld; 581 584 } … … 589 592 * This can be called before MMR3InitPaging. 590 593 * 591 * @returns VBox status code. 594 * @returns VBox status code. Will set VM error on failure. 592 595 * @param pVM The shared VM structure. 593 596 * @param cShadowPages The new page count. -
trunk/src/VBox/VMM/MMHyper.cpp
r6816 r7635 183 183 } 184 184 185 case MMLOOKUPHYPERTYPE_MMIO2: 186 { 187 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb; 188 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE) 189 { 190 RTHCPHYS HCPhys; 191 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys); 192 if (RT_FAILURE(rc)) 193 break; 194 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0); 195 if (RT_FAILURE(rc)) 196 break; 197 } 198 break; 199 } 200 185 201 case MMLOOKUPHYPERTYPE_DYNAMIC: 186 202 /* do nothing here since these are either fences or managed by someone else using PGM. */ … … 383 399 return rc; 384 400 } 401 402 403 /** 404 * Maps a portion of an MMIO2 region into the hypervisor region. 405 * 406 * Callers of this API must never deregister the MMIO2 region before the 407 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2 408 * API will be needed to perform cleanups. 409 * 410 * @return VBox status code. 411 * 412 * @param pVM Pointer to the shared VM structure. 413 * @param pDevIns The device owning the MMIO2 memory. 414 * @param iRegion The region. 415 * @param off The offset into the region. Will be rounded down to closest page boundrary. 416 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary. 417 * @param pszDesc Mapping description. 418 * @param pGCPtr Where to store the GC address. 419 */ 420 MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, 421 const char *pszDesc, PRTGCPTR pGCPtr) 422 { 423 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%VGp cb=%VGp pszDesc=%p:{%s} pGCPtr=%p\n", 424 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pGCPtr)); 425 int rc; 426 427 /* 428 * Validate input. 429 */ 430 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER); 431 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER); 432 uint32_t const offPage = off & PAGE_OFFSET_MASK; 433 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 434 cb += offPage; 435 cb = RT_ALIGN_Z(cb, PAGE_SIZE); 436 const RTGCPHYS offEnd = off + cb; 437 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER); 438 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE) 439 { 440 RTHCPHYS HCPhys; 441 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys); 442 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc); 443 } 444 445 /* 446 * Add the memory to the hypervisor area. 447 */ 448 RTGCPTR GCPtr; 449 PMMLOOKUPHYPER pLookup; 450 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup); 451 if (VBOX_SUCCESS(rc)) 452 { 453 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2; 454 pLookup->u.MMIO2.pDevIns = pDevIns; 455 pLookup->u.MMIO2.iRegion = iRegion; 456 pLookup->u.MMIO2.off = off; 457 458 /* 459 * Update the page table. 460 */ 461 if (pVM->mm.s.fPGMInitialized) 462 { 463 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE) 464 { 465 RTHCPHYS HCPhys; 466 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys); 467 AssertRCReturn(rc, VERR_INTERNAL_ERROR); 468 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0); 469 if (VBOX_FAILURE(rc)) 470 { 471 AssertMsgFailed(("rc=%Vrc offCur=%RGp %s\n", rc, offCur, pszDesc)); 472 break; 473 } 474 } 475 } 476 477 if (VBOX_SUCCESS(rc) && pGCPtr) 478 *pGCPtr = GCPtr | offPage; 479 } 480 return rc; 481 } 482 483 385 484 386 485 … … 855 954 856 955 case MMLOOKUPHYPERTYPE_GCPHYS: 956 case MMLOOKUPHYPERTYPE_MMIO2: 857 957 case MMLOOKUPHYPERTYPE_DYNAMIC: 858 /* can convert these kind of records. */958 /* can (or don't want to) convert these kind of records. */ 859 959 break; 860 960 … … 978 1078 break; 979 1079 1080 case MMLOOKUPHYPERTYPE_MMIO2: 1081 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s MMIO2 %VGp%*s %s\n", 1082 pLookup->off + pVM->mm.s.pvHyperAreaGC, 1083 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb, 1084 sizeof(RTHCPTR) * 2, "", 1085 pLookup->u.MMIO2.off, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "", 1086 pLookup->pszDesc); 1087 break; 1088 980 1089 case MMLOOKUPHYPERTYPE_DYNAMIC: 981 1090 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n", -
trunk/src/VBox/VMM/MMInternal.h
r7632 r7635 563 563 /** Mapping of contiguous GC physical memory. */ 564 564 MMLOOKUPHYPERTYPE_GCPHYS, 565 /** Mapping of MMIO2 memory. */ 566 MMLOOKUPHYPERTYPE_MMIO2, 565 567 /** Dynamic mapping area (MMR3HyperReserve). 566 568 * A conversion will require to check what's in the page table for the pages. */ … … 610 612 RTGCPHYS GCPhys; 611 613 } GCPhys; 614 /** MMIO2 memory. */ 615 struct 616 { 617 /** The device instance owning the MMIO2 region. */ 618 PPDMDEVINS pDevIns; 619 /** The region number. */ 620 uint32_t iRegion; 621 /** The offset into the MMIO2 region. */ 622 RTGCPHYS off; 623 } MMIO2; 612 624 } u; 613 625 /** Description. */ -
trunk/src/VBox/VMM/MMPhys.cpp
r6856 r7635 181 181 /* Cleanup is done in VM destruction to which failure of this function will lead. */ 182 182 /* Not true in case of MM_PHYS_TYPE_DYNALLOC_CHUNK */ 183 }184 185 return rc;186 }187 188 189 /**190 * Relocate previously registered externally allocated RAM for the virtual machine.191 *192 * Use this only for MMIO ranges or the guest will become very confused.193 * The memory registered with the VM thru this interface must not be freed194 * before the virtual machine has been destroyed. Bad things may happen... :-)195 *196 * @return VBox status code.197 * @param pVM VM handle.198 * @param GCPhysOld The physical address the ram was registered at.199 * @param GCPhysNew The physical address the ram shall be registered at.200 * @param cb Size of the memory. Must be page aligend.201 */202 MMR3DECL(int) MMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, unsigned cb)203 {204 Log(("MMR3PhysRelocate: GCPhysOld=%VGp GCPhysNew=%VGp cb=%#x\n", GCPhysOld, GCPhysNew, cb));205 206 /*207 * Validate input.208 */209 AssertMsg(pVM, ("Invalid VM pointer\n"));210 AssertReturn(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld, VERR_INVALID_PARAMETER);211 AssertReturn(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew, VERR_INVALID_PARAMETER);212 AssertReturn(RT_ALIGN(cb, PAGE_SIZE) == cb, VERR_INVALID_PARAMETER);213 RTGCPHYS GCPhysLast;214 GCPhysLast = GCPhysOld + (cb - 1);215 AssertReturn(GCPhysLast > GCPhysOld, VERR_INVALID_PARAMETER);216 GCPhysLast = GCPhysNew + (cb - 1);217 AssertReturn(GCPhysLast > GCPhysNew, VERR_INVALID_PARAMETER);218 219 /*220 * Find the old memory region.221 */222 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;223 while (pCur)224 {225 if ( pCur->eType == MM_LOCKED_TYPE_PHYS226 && GCPhysOld == pCur->u.phys.GCPhys227 && cb == pCur->cb)228 break;229 230 /* next */231 pCur = pCur->pNext;232 }233 if (!pCur)234 {235 AssertMsgFailed(("Unknown old region! %VGp LB%#x\n", GCPhysOld, cb));236 return VERR_INVALID_PARAMETER;237 }238 239 /*240 * Region is already locked, just need to change GC address.241 */242 /** @todo r=bird: check for conflicts? */243 pCur->u.phys.GCPhys = GCPhysNew;244 245 /*246 * Relocate the registered RAM range with PGM.247 */248 int rc = PGMR3PhysRelocate(pVM, GCPhysOld, GCPhysNew, cb);249 if (VBOX_SUCCESS(rc))250 {251 /* Somewhat hackish way to relocate the region with REM. There252 * is unfortunately no official way to unregister anything with253 * REM, as there is no way to unregister memory with QEMU.254 * This implementation seems to work, but is not very pretty. */255 /// @todo one day provide a proper MMIO relocation operation256 REMR3NotifyPhysReserve(pVM, GCPhysOld, cb);257 REMR3NotifyPhysRamRegister(pVM, GCPhysNew, cb,258 pCur->aPhysPages[0].Phys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2));259 183 } 260 184 -
trunk/src/VBox/VMM/PDM.cpp
r6796 r7635 107 107 #include <VBox/pdm.h> 108 108 #include <VBox/mm.h> 109 #include <VBox/pgm.h> 109 110 #include <VBox/ssm.h> 110 111 #include <VBox/vm.h> … … 428 429 //pdmR3ThreadDestroyDevice(pVM, pDevIns); 429 430 //PDMR3QueueDestroyDevice(pVM, pDevIns); 431 PGMR3PhysMMIO2Deregister(pVM, pDevIns, UINT32_MAX); 430 432 } 431 433 -
trunk/src/VBox/VMM/PDMDevice.cpp
r7628 r7635 69 69 * Internal Functions * 70 70 *******************************************************************************/ 71 __BEGIN_DECLS 71 72 static DECLCALLBACK(int) pdmR3DevReg_Register(PPDMDEVREGCB pCallbacks, PCPDMDEVREG pDevReg); 72 73 static DECLCALLBACK(void *) pdmR3DevReg_MMHeapAlloc(PPDMDEVREGCB pCallbacks, size_t cb); … … 162 163 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx); 163 164 static DECLCALLBACK(int) pdmR3DevHlp_ROMProtectShadow(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTUINT cbRange); 165 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Register(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc); 166 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion); 167 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 168 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 169 static DECLCALLBACK(int) pdmR3DevHlp_MMHyperMapMMIO2(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTGCPTR pGCPtr); 164 170 165 171 static DECLCALLBACK(PVM) pdmR3DevHlp_Untrusted_GetVM(PPDMDEVINS pDevIns); … … 196 202 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx); 197 203 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_ROMProtectShadow(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTUINT cbRange); 204 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Register(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc); 205 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion); 206 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 207 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys); 208 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMHyperMapMMIO2(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTGCPTR pGCPtr); 198 209 /** @} */ 199 210 … … 247 258 static DECLCALLBACK(void) pdmR3PciHlp_IsaSetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel); 248 259 static DECLCALLBACK(void) pdmR3PciHlp_IoApicSetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel); 260 static DECLCALLBACK(bool) pdmR3PciHlp_IsMMIO2Base(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys); 249 261 #ifdef VBOX_WITH_PDM_LOCK 250 262 static DECLCALLBACK(int) pdmR3PciHlp_Lock(PPDMDEVINS pDevIns, int rc); … … 270 282 */ 271 283 #define PDM_PHYS_READWRITE_FROM_ANY_THREAD 284 285 __END_DECLS 272 286 273 287 /******************************************************************************* … … 359 373 pdmR3DevHlp_GetCpuId, 360 374 pdmR3DevHlp_ROMProtectShadow, 375 pdmR3DevHlp_MMIO2Register, 376 pdmR3DevHlp_MMIO2Deregister, 377 pdmR3DevHlp_MMIO2Map, 378 pdmR3DevHlp_MMIO2Unmap, 379 pdmR3DevHlp_MMHyperMapMMIO2, 361 380 PDM_DEVHLP_VERSION /* the end */ 362 381 }; … … 448 467 pdmR3DevHlp_Untrusted_QueryCPUId, 449 468 pdmR3DevHlp_Untrusted_ROMProtectShadow, 469 pdmR3DevHlp_Untrusted_MMIO2Register, 470 pdmR3DevHlp_Untrusted_MMIO2Deregister, 471 pdmR3DevHlp_Untrusted_MMIO2Map, 472 pdmR3DevHlp_Untrusted_MMIO2Unmap, 473 pdmR3DevHlp_Untrusted_MMHyperMapMMIO2, 450 474 PDM_DEVHLP_VERSION /* the end */ 451 475 }; … … 514 538 pdmR3PciHlp_IsaSetIrq, 515 539 pdmR3PciHlp_IoApicSetIrq, 540 pdmR3PciHlp_IsMMIO2Base, 541 pdmR3PciHlp_GetGCHelpers, 542 pdmR3PciHlp_GetR0Helpers, 516 543 #ifdef VBOX_WITH_PDM_LOCK 517 544 pdmR3PciHlp_Lock, 518 545 pdmR3PciHlp_Unlock, 519 546 #endif 520 pdmR3PciHlp_GetGCHelpers,521 pdmR3PciHlp_GetR0Helpers,522 547 PDM_PCIHLPR3_VERSION, /* the end */ 523 548 }; … … 1884 1909 LogFlow(("pdmR3DevHlp_ISASetIrq: caller='%s'/%d: returns void\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 1885 1910 } 1911 1886 1912 1887 1913 /** @copydoc PDMDEVHLP::pfnISASetIrqNoWait */ … … 3598 3624 3599 3625 3626 /** 3627 * @copydoc PDMDEVHLP::pfnMMIO2Register 3628 */ 3629 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Register(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc) 3630 { 3631 PDMDEV_ASSERT_DEVINS(pDevIns); 3632 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 3633 LogFlow(("pdmR3DevHlp_MMIO2Register: caller='%s'/%d: iRegion=#x cb=%#RGp ppv=%p pszDescp=%p:{%s}\n", 3634 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, iRegion, cb, ppv, pszDesc, pszDesc)); 3635 3636 int rc = PGMR3PhysMMIO2Register(pDevIns->Internal.s.pVMHC, pDevIns, iRegion, cb, ppv, pszDesc); 3637 3638 LogFlow(("pdmR3DevHlp_MMIO2Register: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)); 3639 return rc; 3640 } 3641 3642 3643 /** 3644 * @copydoc PDMDEVHLP::pfnMMIO2Deregister 3645 */ 3646 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion) 3647 { 3648 PDMDEV_ASSERT_DEVINS(pDevIns); 3649 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 3650 LogFlow(("pdmR3DevHlp_MMIO2Deregister: caller='%s'/%d: iRegion=#x\n", 3651 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, iRegion)); 3652 3653 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER); 3654 3655 int rc = PGMR3PhysMMIO2Deregister(pDevIns->Internal.s.pVMHC, pDevIns, iRegion); 3656 3657 LogFlow(("pdmR3DevHlp_MMIO2Deregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)); 3658 return rc; 3659 } 3660 3661 3662 /** 3663 * @copydoc PDMDEVHLP::pfnMMIO2Map 3664 */ 3665 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 3666 { 3667 PDMDEV_ASSERT_DEVINS(pDevIns); 3668 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 3669 LogFlow(("pdmR3DevHlp_MMIO2Map: caller='%s'/%d: iRegion=#x GCPhys=%#RGp\n", 3670 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, iRegion, GCPhys)); 3671 3672 int rc = PGMR3PhysMMIO2Map(pDevIns->Internal.s.pVMHC, pDevIns, iRegion, GCPhys); 3673 3674 LogFlow(("pdmR3DevHlp_MMIO2Map: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)); 3675 return rc; 3676 } 3677 3678 3679 /** 3680 * @copydoc PDMDEVHLP::pfnMMIO2Unmap 3681 */ 3682 static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 3683 { 3684 PDMDEV_ASSERT_DEVINS(pDevIns); 3685 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 3686 LogFlow(("pdmR3DevHlp_MMIO2Unmap: caller='%s'/%d: iRegion=#x GCPhys=%#RGp\n", 3687 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, iRegion, GCPhys)); 3688 3689 int rc = PGMR3PhysMMIO2Unmap(pDevIns->Internal.s.pVMHC, pDevIns, iRegion, GCPhys); 3690 3691 LogFlow(("pdmR3DevHlp_MMIO2Unmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)); 3692 return rc; 3693 } 3694 3695 3696 /** 3697 * @copydoc PDMDEVHLP::pfnMMHyperMapMMIO2 3698 */ 3699 static DECLCALLBACK(int) pdmR3DevHlp_MMHyperMapMMIO2(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, 3700 const char *pszDesc, PRTGCPTR pGCPtr) 3701 { 3702 PDMDEV_ASSERT_DEVINS(pDevIns); 3703 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 3704 LogFlow(("pdmR3DevHlp_MMHyperMapMMIO2: caller='%s'/%d: iRegion=#x off=%RGp cb=%RGp pszDesc=%p:{%s} pGCPtr=%p\n", 3705 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, iRegion, off, cb, pszDesc, pszDesc, pGCPtr)); 3706 3707 int rc = MMR3HyperMapMMIO2(pDevIns->Internal.s.pVMHC, pDevIns, iRegion, off, cb, pszDesc, pGCPtr); 3708 3709 LogFlow(("pdmR3DevHlp_MMHyperMapMMIO2: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)); 3710 return rc; 3711 } 3712 3713 3714 3715 3600 3716 3601 3717 /** @copydoc PDMDEVHLP::pfnGetVM */ … … 3906 4022 return VERR_ACCESS_DENIED; 3907 4023 } 4024 4025 4026 /** @copydoc PDMDEVHLP::pfnMMIO2Register */ 4027 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Register(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc) 4028 { 4029 PDMDEV_ASSERT_DEVINS(pDevIns); 4030 AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 4031 return VERR_ACCESS_DENIED; 4032 } 4033 4034 4035 /** @copydoc PDMDEVHLP::pfnMMIO2Deregister */ 4036 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_t iRegion) 4037 { 4038 PDMDEV_ASSERT_DEVINS(pDevIns); 4039 AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 4040 return VERR_ACCESS_DENIED; 4041 } 4042 4043 4044 /** @copydoc PDMDEVHLP::pfnMMIO2Map */ 4045 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Map(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4046 { 4047 PDMDEV_ASSERT_DEVINS(pDevIns); 4048 AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 4049 return VERR_ACCESS_DENIED; 4050 } 4051 4052 4053 /** @copydoc PDMDEVHLP::pfnMMIO2Unmap */ 4054 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMIO2Unmap(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 4055 { 4056 PDMDEV_ASSERT_DEVINS(pDevIns); 4057 AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 4058 return VERR_ACCESS_DENIED; 4059 } 4060 4061 4062 /** @copydoc PDMDEVHLP::pfnMMHyperMapMMIO2 */ 4063 static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_MMHyperMapMMIO2(PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTGCPTR pGCPtr) 4064 { 4065 PDMDEV_ASSERT_DEVINS(pDevIns); 4066 AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance)); 4067 return VERR_ACCESS_DENIED; 4068 } 4069 4070 3908 4071 3909 4072 … … 4153 4316 Log4(("pdmR3PciHlp_IsaSetIrq: iIrq=%d iLevel=%d\n", iIrq, iLevel)); 4154 4317 PDMIoApicSetIrq(pDevIns->Internal.s.pVMHC, iIrq, iLevel); 4318 } 4319 4320 4321 /** @copydoc PDMPCIHLPR3::pfnIsMMIO2Base */ 4322 static DECLCALLBACK(bool) pdmR3PciHlp_IsMMIO2Base(PPDMDEVINS pDevIns, PPDMDEVINS pOwner, RTGCPHYS GCPhys) 4323 { 4324 PDMDEV_ASSERT_DEVINS(pDevIns); 4325 VM_ASSERT_EMT(pDevIns->Internal.s.pVMHC); 4326 bool fRc = PGMR3PhysMMIO2IsBase(pDevIns->Internal.s.pVMHC, pOwner, GCPhys); 4327 Log4(("pdmR3PciHlp_IsMMIO2Base: pOwner=%p GCPhys=%RGp -> %RTbool\n", pOwner, GCPhys, fRc)); 4328 return fRc; 4155 4329 } 4156 4330 -
trunk/src/VBox/VMM/PGMInternal.h
r7629 r7635 573 573 (pPage)->fSomethingElse = 0; \ 574 574 (pPage)->idPageX = (_idPage); \ 575 (pPage)->u3Type = (_uType); \ 575 /*(pPage)->u3Type = (_uType); - later */ \ 576 PGM_PAGE_SET_TYPE(pPage, _uType); \ 576 577 (pPage)->u29B = 0; \ 577 578 } while (0) … … 599 600 #define PGM_PAGE_STATE_ZERO 0 600 601 /** A allocated page. 601 * This is a per-VM page allocated from the page pool. 602 * This is a per-VM page allocated from the page pool (or wherever 603 * we get MMIO2 pages from if the type is MMIO2). 602 604 */ 603 605 #define PGM_PAGE_STATE_ALLOCATED 1 … … 713 715 * @param _enmType The new page type (PGMPAGETYPE). 714 716 */ 717 #ifdef VBOX_WITH_NEW_PHYS_CODE 715 718 #define PGM_PAGE_SET_TYPE(pPage, _enmType) \ 716 719 do { (pPage)->u3Type = (_enmType); } while (0) 720 #else 721 #define PGM_PAGE_SET_TYPE(pPage, _enmType) \ 722 do { \ 723 (pPage)->u3Type = (_enmType); \ 724 if ((_enmType) == PGMPAGETYPE_ROM) \ 725 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM; \ 726 else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \ 727 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \ 728 else if ((_enmType) == PGMPAGETYPE_MMIO2) \ 729 (pPage)->HCPhys |= MM_RAM_FLAGS_MMIO2; \ 730 } while (0) 731 #endif 717 732 718 733 … … 985 1000 /** Pointer to the next range - R0. */ 986 1001 R0PTRTYPE(struct PGMROMRANGE *) pNextR0; 987 /** Pointer to the next range - R0. */1002 /** Pointer to the next range - GC. */ 988 1003 GCPTRTYPE(struct PGMROMRANGE *) pNextGC; 989 1004 #if GC_ARCH_BITS == 32 … … 1010 1025 /** Pointer to a ROM range. */ 1011 1026 typedef PGMROMRANGE *PPGMROMRANGE; 1027 1028 1029 /** 1030 * A registered MMIO2 (= Device RAM) range. 1031 * 1032 * There are a few reason why we need to keep track of these 1033 * registrations. One of them is the deregistration & cleanup 1034 * stuff, while another is that the PGMRAMRANGE associated with 1035 * such a region may have to be removed from the ram range list. 1036 * 1037 * Overlapping with a RAM range has to be 100% or none at all. The 1038 * pages in the existing RAM range must not be ROM nor MMIO. A guru 1039 * meditation will be raised if a partial overlap or an overlap of 1040 * ROM pages is encountered. On an overlap we will free all the 1041 * existing RAM pages and put in the ram range pages instead. 1042 */ 1043 typedef struct PGMMMIO2RANGE 1044 { 1045 /** The owner of the range. (a device) */ 1046 PPDMDEVINSR3 pDevInsR3; 1047 /** Pointer to the ring-3 mapping of the allocation. */ 1048 RTR3PTR pvR3; 1049 /** Pointer to the next range - R3. */ 1050 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3; 1051 /** Whether it's mapped or not. */ 1052 bool fMapped; 1053 /** Whether it's overlapping or not. */ 1054 bool fOverlapping; 1055 /** The PCI region number. 1056 * @remarks This ASSUMES that nobody will ever really need to have multiple 1057 * PCI devices with matching MMIO region numbers on a single device. */ 1058 uint8_t iRegion; 1059 /**< Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */ 1060 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5]; 1061 /** The associated RAM range. */ 1062 PGMRAMRANGE RamRange; 1063 } PGMMMIO2RANGE; 1064 /** Pointer to a MMIO2 range. */ 1065 typedef PGMMMIO2RANGE *PPGMMMIO2RANGE; 1012 1066 1013 1067 … … 2048 2102 RTGCPTR GCPtrPadding2; 2049 2103 2104 /** Pointer to the list of MMIO2 ranges - for R3. 2105 * Registration order. */ 2106 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3; 2107 2050 2108 /** PGM offset based trees - HC Ptr. */ 2051 2109 R3R0PTRTYPE(PPGMTREES) pTreesHC; … … 2529 2587 2530 2588 2589 void pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); 2531 2590 int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys); 2532 2591 int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); -
trunk/src/VBox/VMM/PGMPhys.cpp
r7613 r7635 110 110 111 111 /** 112 * Links a new RAM range intothe list.112 * Unlink an existing RAM range from the list. 113 113 * 114 114 * @param pVM Pointer to the shared VM structure. … … 116 116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head. 117 117 */ 118 static void pgmR3PhysUnlinkRamRange (PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)118 static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev) 119 119 { 120 120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam); … … 131 131 else 132 132 { 133 Assert(pVM->pgm.s.pRamRangesR3 == pRam); 133 134 pVM->pgm.s.pRamRangesR3 = pNext; 134 135 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR; … … 137 138 138 139 pgmUnlock(pVM); 140 } 141 142 143 /** 144 * Unlink an existing RAM range from the list. 145 * 146 * @param pVM Pointer to the shared VM structure. 147 * @param pRam Pointer to the new list entry. 148 */ 149 static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam) 150 { 151 /* find prev. */ 152 PPGMRAMRANGE pPrev = NULL; 153 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; 154 while (pCur != pRam) 155 { 156 pPrev = pCur; 157 pCur = pCur->pNextR3; 158 } 159 AssertFatal(pCur); 160 161 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev); 139 162 } 140 163 … … 202 225 PPGMRAMRANGE pNew; 203 226 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 204 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%z d\n", cbRamRange), rc);227 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc); 205 228 206 229 /* … … 275 298 276 299 /** 300 * Locate a MMIO2 range. 301 * 302 * @returns Pointer to the MMIO2 range. 303 * @param pVM Pointer to the shared VM structure. 304 * @param pDevIns The device instance owning the region. 305 * @param iRegion The region. 306 */ 307 DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion) 308 { 309 /* 310 * Search the list. 311 */ 312 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3) 313 if (pCur->pDevInsR3 == pDevIns) 314 return pCur; 315 return NULL; 316 } 317 318 319 /** 277 320 * Allocate and register a MMIO2 region. 278 321 * … … 287 330 * is replaced, this is of course incorrectly but it's too much 288 331 * effort. 289 */ 290 PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb, void **ppv, const char *pszDesc) 291 { 292 return -1; 293 } 294 295 296 /** 297 * Reallocates a MMIO2 region. 332 * 333 * @returns VBox status code. 334 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory. 335 * @retval VERR_ALREADY_EXISTS if the region already exists. 336 * 337 * @param pVM Pointer to the shared VM structure. 338 * @param pDevIns The device instance owning the region. 339 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region 340 * this number has to be the number of that region. Otherwise 341 * it can be any number safe UINT8_MAX. 342 * @param cb The size of the region. Must be page aligned. 343 * @param ppv Where to store the pointer to the ring-3 mapping of the memory. 344 * @param pszDesc The description. 345 */ 346 PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc) 347 { 348 /* 349 * Validate input. 350 */ 351 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 352 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 353 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 354 AssertPtrReturn(ppv, VERR_INVALID_POINTER); 355 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 356 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER); 357 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS); 358 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 359 AssertReturn(cb, VERR_INVALID_PARAMETER); 360 361 const uint32_t cPages = cb >> PAGE_SHIFT; 362 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER); 363 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY); 364 365 /* 366 * Try reserve and allocate the backing memory first as this is what is 367 * most likely to fail. 368 */ 369 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc); 370 if (RT_FAILURE(rc)) 371 return rc; 372 373 void *pvPages; 374 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE)); 375 if (RT_SUCCESS(rc)) 376 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages); 377 if (RT_SUCCESS(rc)) 378 { 379 /* 380 * Create the MMIO2 range record for it. 381 */ 382 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]); 383 PPGMMMIO2RANGE pNew; 384 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 385 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange)); 386 if (RT_SUCCESS(rc)) 387 { 388 pNew->pDevInsR3 = pDevIns; 389 pNew->pvR3 = pvPages; 390 //pNew->pNext = NULL; 391 //pNew->fMapped = false; 392 //pNew->fOverlapping = false; 393 pNew->iRegion = iRegion; 394 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 395 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 396 pNew->RamRange.pszDesc = pszDesc; 397 pNew->RamRange.cb = cb; 398 //pNew->RamRange.fFlags = 0; 399 400 pNew->RamRange.pvHC = pvPages; ///@todo remove this 401 pNew->RamRange.pavHCChunkHC = NULL; ///@todo remove this 402 pNew->RamRange.pavHCChunkGC = 0; ///@todo remove this 403 404 uint32_t iPage = cPages; 405 while (iPage-- > 0) 406 { 407 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage], 408 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID, 409 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED); 410 } 411 412 /* 413 * Link it into the list. 414 * Since there is no particular order, just push it. 415 */ 416 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3; 417 pVM->pgm.s.pMmio2RangesR3 = pNew; 418 419 *ppv = pvPages; 420 RTMemTmpFree(paPages); 421 return VINF_SUCCESS; 422 } 423 424 SUPPageFreeLocked(pvPages, cPages); 425 } 426 RTMemTmpFree(paPages); 427 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc); 428 return rc; 429 } 430 431 432 /** 433 * Deregisters and frees a MMIO2 region. 434 * 435 * Any physical (and virtual) access handlers registered for the region must 436 * be deregistered before calling this function. 437 * 438 * @returns VBox status code. 439 * @param pVM Pointer to the shared VM structure. 440 * @param pDevIns The device instance owning the region. 441 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match. 442 */ 443 PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion) 444 { 445 /* 446 * Validate input. 447 */ 448 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 449 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 450 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER); 451 452 int rc = VINF_SUCCESS; 453 unsigned cFound = 0; 454 PPGMMMIO2RANGE pPrev = NULL; 455 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; 456 while (pCur) 457 { 458 if ( pCur->pDevInsR3 == pDevIns 459 && ( iRegion == UINT32_MAX 460 || pCur->iRegion == iRegion)) 461 { 462 cFound++; 463 464 /* 465 * Unmap it if it's mapped. 466 */ 467 if (pCur->fMapped) 468 { 469 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys); 470 AssertRC(rc2); 471 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 472 rc = rc2; 473 } 474 475 /* 476 * Unlink it 477 */ 478 PPGMMMIO2RANGE pNext = pCur->pNextR3; 479 if (pPrev) 480 pPrev->pNextR3 = pNext; 481 else 482 pVM->pgm.s.pMmio2RangesR3 = pNext; 483 pCur->pNextR3 = NULL; 484 485 /* 486 * Free the memory. 487 */ 488 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT); 489 AssertRC(rc2); 490 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 491 rc = rc2; 492 493 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc); 494 AssertRC(rc2); 495 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 496 rc = rc2; 497 498 /* we're leaking hyper memory here if done at runtime. */ 499 Assert( VMR3GetState(pVM) == VMSTATE_OFF 500 || VMR3GetState(pVM) == VMSTATE_DESTROYING 501 || VMR3GetState(pVM) == VMSTATE_TERMINATED); 502 /*rc = MMHyperFree(pVM, pCur); 503 AssertRCReturn(rc, rc); - not safe, see the alloc call. */ 504 505 /* next */ 506 pCur = pNext; 507 } 508 else 509 { 510 pPrev = pCur; 511 pCur = pCur->pNextR3; 512 } 513 } 514 515 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc; 516 } 517 518 519 /** 520 * Maps a MMIO2 region. 298 521 * 299 522 * This is done when a guest / the bios / state loading changes the 300 523 * PCI config. The replacing of base memory has the same restrictions 301 524 * as during registration, of course. 302 */ 303 PDMR3DECL(int) PGMR3PhysMMIO2Relocate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew) 304 { 305 return -1; 306 } 307 308 309 /** 310 * Deregisters and frees a MMIO2 region. 311 * 312 * Any physical (and virtual) access handlers registered for the region must 313 * be deregistered before calling this function. 314 */ 315 PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, RTGCPHYS GCPhys, void *pv) 316 { 317 return -1; 525 * 526 * @returns VBox status code. 527 * 528 * @param pVM Pointer to the shared VM structure. 529 * @param pDevIns The 530 */ 531 PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 532 { 533 /* 534 * Validate input 535 */ 536 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 537 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 538 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 539 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER); 540 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 541 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 542 543 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion); 544 AssertReturn(pCur, VERR_NOT_FOUND); 545 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER); 546 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS); 547 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS); 548 549 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1; 550 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 551 552 /* 553 * Find our location in the ram range list, checking for 554 * restriction we don't bother implementing yet (partially overlapping). 555 */ 556 bool fRamExists = false; 557 PPGMRAMRANGE pRamPrev = NULL; 558 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; 559 while (pRam && GCPhysLast >= pRam->GCPhys) 560 { 561 if ( GCPhys <= pRam->GCPhysLast 562 && GCPhysLast >= pRam->GCPhys) 563 { 564 /* completely within? */ 565 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys 566 && GCPhysLast <= pRam->GCPhysLast, 567 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n", 568 GCPhys, GCPhysLast, pCur->RamRange.pszDesc, 569 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 570 VERR_PGM_RAM_CONFLICT); 571 fRamExists = true; 572 break; 573 } 574 575 /* next */ 576 pRamPrev = pRam; 577 pRam = pRam->pNextR3; 578 } 579 if (fRamExists) 580 { 581 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 582 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 583 while (cPagesLeft-- > 0) 584 { 585 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 586 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n", 587 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc), 588 VERR_PGM_RAM_CONFLICT); 589 pPage++; 590 } 591 } 592 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n", 593 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc)); 594 595 /* 596 * Make the changes. 597 */ 598 pgmLock(pVM); 599 600 pCur->RamRange.GCPhys = GCPhys; 601 pCur->RamRange.GCPhysLast = GCPhysLast; 602 pCur->fMapped = true; 603 pCur->fOverlapping = fRamExists; 604 605 if (fRamExists) 606 { 607 /* replace the pages, freeing all present RAM pages. */ 608 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0]; 609 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 610 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 611 while (cPagesLeft-- > 0) 612 { 613 pgmPhysFreePage(pVM, pPageDst, GCPhys); 614 615 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc); 616 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys); 617 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2); 618 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED); 619 620 GCPhys += PAGE_SIZE; 621 pPageSrc++; 622 pPageDst++; 623 } 624 } 625 else 626 { 627 /* link in the ram range */ 628 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev); 629 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0); 630 } 631 632 pgmUnlock(pVM); 633 634 return VINF_SUCCESS; 635 } 636 637 638 /** 639 * Unmaps a MMIO2 region. 640 * 641 * This is done when a guest / the bios / state loading changes the 642 * PCI config. The replacing of base memory has the same restrictions 643 * as during registration, of course. 644 */ 645 PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys) 646 { 647 /* 648 * Validate input 649 */ 650 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 651 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 652 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 653 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER); 654 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 655 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 656 657 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion); 658 AssertReturn(pCur, VERR_NOT_FOUND); 659 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER); 660 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER); 661 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS); 662 663 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n", 664 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc)); 665 666 /* 667 * Unmap it. 668 */ 669 pgmLock(pVM); 670 671 if (pCur->fOverlapping) 672 { 673 /* Restore the RAM pages we've replaced. */ 674 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; 675 while (pRam->GCPhys > pCur->RamRange.GCPhysLast) 676 pRam = pRam->pNextR3; 677 678 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg; 679 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS); 680 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 681 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT; 682 while (cPagesLeft-- > 0) 683 { 684 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg); 685 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM); 686 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO); 687 688 pPageDst++; 689 } 690 } 691 else 692 { 693 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb); 694 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange); 695 } 696 697 pCur->RamRange.GCPhys = NIL_RTGCPHYS; 698 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS; 699 pCur->fOverlapping = false; 700 pCur->fMapped = false; 701 702 pgmUnlock(pVM); 703 704 return VINF_SUCCESS; 705 } 706 707 708 /** 709 * Checks if the given address is an MMIO2 base address or not. 710 * 711 * @returns true/false accordingly. 712 * @param pVM Pointer to the shared VM structure. 713 * @param pDevIns The owner of the memory, optional. 714 * @param GCPhys The address to check. 715 */ 716 PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys) 717 { 718 /* 719 * Validate input 720 */ 721 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 722 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 723 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER); 724 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 725 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 726 727 /* 728 * Search the list. 729 */ 730 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3) 731 if (pCur->RamRange.GCPhys == GCPhys) 732 { 733 Assert(pCur->fMapped); 734 return true; 735 } 736 return false; 737 } 738 739 740 /** 741 * Gets the HC physical address of a page in the MMIO2 region. 742 * 743 * This is API is intended for MMHyper and shouldn't be called 744 * by anyone else... 745 * 746 * @returns VBox status code. 747 * @param pVM Pointer to the shared VM structure. 748 * @param pDevIns The owner of the memory, optional. 749 * @param iRegion The region. 750 * @param off The page expressed an offset into the MMIO2 region. 751 * @param pHCPhys Where to store the result. 752 */ 753 PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys) 754 { 755 /* 756 * Validate input 757 */ 758 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 759 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 760 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 761 762 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion); 763 AssertReturn(pCur, VERR_NOT_FOUND); 764 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER); 765 766 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT]; 767 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage); 768 return VINF_SUCCESS; 318 769 } 319 770 … … 431 882 VERR_PGM_RAM_CONFLICT); 432 883 Assert(PGM_PAGE_IS_ZERO(pPage)); 884 pPage++; 433 885 } 434 886 } … … 616 1068 } 617 1069 618 pgmR3PhysUnlinkRamRange (pVM, pRamNew, pRamPrev);1070 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev); 619 1071 if (pRamNew) 620 1072 MMHyperFree(pVM, pRamNew); … … 1278 1730 1279 1731 #endif /* !VBOX_WITH_NEW_PHYS_CODE */ 1280 1281 /**1282 * Interface MMIO handler relocation calls.1283 *1284 * It relocates an existing physical memory range with PGM.1285 *1286 * @returns VBox status.1287 * @param pVM The VM handle.1288 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)1289 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)1290 * @param cb Size of the RAM range. (page aligned)1291 */1292 PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)1293 {1294 /*1295 * Validate input.1296 * (Not so important because callers are only MMR3PhysRelocate(),1297 * but anyway...)1298 */1299 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));1300 1301 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);1302 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);1303 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);1304 RTGCPHYS GCPhysLast;1305 GCPhysLast = GCPhysOld + (cb - 1);1306 if (GCPhysLast < GCPhysOld)1307 {1308 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));1309 return VERR_INVALID_PARAMETER;1310 }1311 GCPhysLast = GCPhysNew + (cb - 1);1312 if (GCPhysLast < GCPhysNew)1313 {1314 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));1315 return VERR_INVALID_PARAMETER;1316 }1317 1318 /*1319 * Find and remove old range location.1320 */1321 pgmLock(pVM);1322 PPGMRAMRANGE pPrev = NULL;1323 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;1324 while (pCur)1325 {1326 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)1327 break;1328 1329 /* next */1330 pPrev = pCur;1331 pCur = pCur->pNextR3;1332 }1333 if (pPrev)1334 {1335 pPrev->pNextR3 = pCur->pNextR3;1336 pPrev->pNextR0 = pCur->pNextR0;1337 pPrev->pNextGC = pCur->pNextGC;1338 }1339 else1340 {1341 pVM->pgm.s.pRamRangesR3 = pCur->pNextR3;1342 pVM->pgm.s.pRamRangesR0 = pCur->pNextR0;1343 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;1344 }1345 1346 /*1347 * Update the range.1348 */1349 pCur->GCPhys = GCPhysNew;1350 pCur->GCPhysLast= GCPhysLast;1351 PPGMRAMRANGE pNew = pCur;1352 1353 /*1354 * Find range location and check for conflicts.1355 */1356 pPrev = NULL;1357 pCur = pVM->pgm.s.pRamRangesR3;1358 while (pCur)1359 {1360 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)1361 {1362 AssertMsgFailed(("Conflict! This cannot happen!\n"));1363 pgmUnlock(pVM);1364 return VERR_PGM_RAM_CONFLICT;1365 }1366 if (GCPhysLast < pCur->GCPhys)1367 break;1368 1369 /* next */1370 pPrev = pCur;1371 pCur = pCur->pNextR3;1372 }1373 1374 /*1375 * Reinsert the RAM range.1376 */1377 pNew->pNextR3 = pCur;1378 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : 0;1379 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : 0;1380 if (pPrev)1381 {1382 pPrev->pNextR3 = pNew;1383 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);1384 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);1385 }1386 else1387 {1388 pVM->pgm.s.pRamRangesR3 = pNew;1389 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);1390 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);1391 }1392 1393 pgmUnlock(pVM);1394 return VINF_SUCCESS;1395 }1396 1732 1397 1733 -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r6625 r7635 69 69 70 70 case MMLOOKUPHYPERTYPE_GCPHYS: /* (for now we'll not allow these kind of conversions) */ 71 case MMLOOKUPHYPERTYPE_MMIO2: 71 72 case MMLOOKUPHYPERTYPE_DYNAMIC: 72 73 break; … … 535 536 536 537 case MMLOOKUPHYPERTYPE_GCPHYS: /* (for now we'll not allow these kind of conversions) */ 538 case MMLOOKUPHYPERTYPE_MMIO2: 537 539 case MMLOOKUPHYPERTYPE_DYNAMIC: 538 540 break; -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r7629 r7635 240 240 } 241 241 pgmUnlock(pVM); 242 } 243 244 245 /** 246 * Frees the specified RAM page. 247 * 248 * This is used by ballooning and remapping MMIO2. 249 * 250 * @param pVM Pointer to the shared VM structure. 251 * @param pPage Pointer to the page structure. 252 * @param GCPhys The guest physical address of the page, if applicable. 253 */ 254 void pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) 255 { 256 AssertFatal(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM); 257 258 /** @todo implement this... */ 259 AssertFatalFailed(); 242 260 } 243 261 -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r7050 r7635 563 563 GEN_CHECK_OFF(PGMROMRANGE, aPages); 564 564 GEN_CHECK_OFF(PGMROMRANGE, aPages[1]); 565 GEN_CHECK_SIZE(PGMMMIO2RANGE); 566 GEN_CHECK_OFF(PGMMMIO2RANGE, pDevInsR3); 567 GEN_CHECK_OFF(PGMMMIO2RANGE, pNextR3); 568 GEN_CHECK_OFF(PGMMMIO2RANGE, fMapped); 569 GEN_CHECK_OFF(PGMMMIO2RANGE, fOverlapping); 570 GEN_CHECK_OFF(PGMMMIO2RANGE, iRegion); 571 GEN_CHECK_OFF(PGMMMIO2RANGE, RamRange); 565 572 GEN_CHECK_SIZE(PGMTREES); 566 573 GEN_CHECK_OFF(PGMTREES, PhysHandlers); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r7629 r7635 210 210 CHECK_SIZE(PGMPAGE, 16); 211 211 CHECK_MEMBER_ALIGNMENT(PGMRAMRANGE, aPages, 16); 212 CHECK_MEMBER_ALIGNMENT(PGMMMIO2RANGE, RamRange, 16); 212 213 213 214 /* misc */ -
trunk/src/recompiler/VBoxREMWrapper.cpp
r7617 r7635 2041 2041 } 2042 2042 2043 REM DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)2043 REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys) 2044 2044 { 2045 2045 #ifdef USE_REM_STUBS -
trunk/src/recompiler/VBoxRecompiler.c
r7616 r7635 2611 2611 2612 2612 /** 2613 * Notification about a successful MMR3PhysRegister() call.2613 * Notification about a successful memory deregistration or reservation. 2614 2614 * 2615 2615 * @param pVM VM Handle. 2616 2616 * @param GCPhys Start physical address. 2617 2617 * @param cb The size of the range. 2618 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't 2619 * reserve any memory soon. 2618 2620 */ 2619 2621 REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb) … … 2700 2702 pVM->rem.s.fIgnoreAll = true; 2701 2703 2704 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */ 2702 2705 if (enmType == PGMPHYSHANDLERTYPE_MMIO) 2703 2706 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED); … … 2711 2714 else 2712 2715 { 2713 /* This is not perfect, but it'll do for PD monitoring... */2714 Assert(cb == PAGE_SIZE);2715 2716 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys); 2717 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb); 2716 2718 cpu_register_physical_memory(GCPhys, cb, GCPhys); 2717 2719 } … … 2785 2787 * @remark This function will only work correctly in VBOX_STRICT builds! 2786 2788 */ 2787 REM DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)2789 REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys) 2788 2790 { 2789 2791 #ifdef VBOX_STRICT
Note:
See TracChangeset
for help on using the changeset viewer.