Changeset 92162 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Oct 31, 2021 11:34:31 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r91263 r92162 172 172 { 173 173 PVMCPU pVCpu0 = pVM->apCpusR3[0]; 174 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, iemVmxApicAccessPageHandler, 174 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, false /*fKeepPgmLock*/, 175 iemVmxApicAccessPageHandler, 175 176 NULL /* pszModR0 */, 176 177 "iemVmxApicAccessPageHandler", NULL /* pszPfHandlerR0 */, -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r82968 r92162 159 159 * Register the MMIO access handler type. 160 160 */ 161 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, 161 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, false /*fKeepPgmLock*/, 162 162 iomMmioHandlerNew, 163 163 NULL, "iomMmioHandlerNew", "iomMmioPfHandlerNew", -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r92071 r92162 348 348 PDMDEV_ASSERT_DEVINS(pDevIns); 349 349 PVM pVM = pDevIns->Internal.s.pVMR3; 350 LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX6 r\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));350 LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion)); 351 351 VM_ASSERT_EMT0_RETURN(pVM, NIL_RTGCPHYS); 352 352 … … 356 356 return GCPhys; 357 357 } 358 359 360 /** @interface_method_impl{PDMDEVHLPR3,pfnMmio2QueryAndResetDirtyBitmap} */ 361 static DECLCALLBACK(int) pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, 362 void *pvBitmap, size_t cbBitmap) 363 { 364 PDMDEV_ASSERT_DEVINS(pDevIns); 365 PVM pVM = pDevIns->Internal.s.pVMR3; 366 LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: hRegion=%#RX64 pvBitmap=%p cbBitmap=%#zx\n", 367 pDevIns->pReg->szName, pDevIns->iInstance, hRegion, pvBitmap, cbBitmap)); 368 369 int rc = PGMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pDevIns, hRegion, pvBitmap, cbBitmap); 370 371 LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 372 return rc; 373 } 374 375 376 /** @interface_method_impl{PDMDEVHLPR3,pfnMmio2ControlDirtyPageTracking} */ 377 static DECLCALLBACK(int) pdmR3DevHlp_Mmio2ControlDirtyPageTracking(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, bool fEnabled) 378 { 379 PDMDEV_ASSERT_DEVINS(pDevIns); 380 PVM pVM = pDevIns->Internal.s.pVMR3; 381 LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: hRegion=%#RX64 fEnabled=%RTbool\n", 382 pDevIns->pReg->szName, pDevIns->iInstance, hRegion, fEnabled)); 383 384 int rc = PGMR3PhysMmio2ControlDirtyPageTracking(pVM, pDevIns, hRegion, fEnabled); 385 386 LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 387 return rc; 388 } 389 358 390 359 391 /** … … 364 396 PDMDEV_ASSERT_DEVINS(pDevIns); 365 397 PVM pVM = pDevIns->Internal.s.pVMR3; 366 LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX6 riNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion));398 LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX64 iNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion)); 367 399 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 368 400 … … 841 873 pszDesc, pszDesc, phType)); 842 874 843 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, pfnHandlerR3,875 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, false /*fKeepPgmLock*/, pfnHandlerR3, 844 876 pDevIns->pReg->pszR0Mod, pszHandlerR0, pszPfHandlerR0, 845 877 pDevIns->pReg->pszRCMod, pszHandlerRC, pszPfHandlerRC, … … 4799 4831 pdmR3DevHlp_Mmio2Reduce, 4800 4832 pdmR3DevHlp_Mmio2GetMappingAddress, 4833 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 4834 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 4801 4835 pdmR3DevHlp_Mmio2ChangeRegionNo, 4802 4836 pdmR3DevHlp_MmioMapMmio2Page, … … 5193 5227 pdmR3DevHlp_Mmio2Reduce, 5194 5228 pdmR3DevHlp_Mmio2GetMappingAddress, 5229 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 5230 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 5195 5231 pdmR3DevHlp_Mmio2ChangeRegionNo, 5196 5232 pdmR3DevHlp_MmioMapMmio2Page, … … 5901 5937 pdmR3DevHlp_Mmio2Reduce, 5902 5938 pdmR3DevHlp_Mmio2GetMappingAddress, 5939 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 5940 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 5903 5941 pdmR3DevHlp_Mmio2ChangeRegionNo, 5904 5942 pdmR3DevHlp_MmioMapMmio2Page, -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r92046 r92162 950 950 */ 951 951 if (RT_SUCCESS(rc)) 952 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 952 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */ 953 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, false /*fKeepPgmLock*/, 953 954 pgmPhysRomWriteHandler, 954 955 NULL, NULL, "pgmPhysRomWritePfHandler", … … 956 957 "ROM write protection", 957 958 &pVM->pgm.s.hRomPhysHandlerType); 959 960 /* 961 * Register the physical access handler doing dirty MMIO2 tracing. 962 */ 963 if (RT_SUCCESS(rc)) 964 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/, 965 pgmPhysMmio2WriteHandler, 966 NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler", 967 NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler", 968 "MMIO2 dirty page tracing", 969 &pVM->pgm.s.hMmio2DirtyPhysHandlerType); 958 970 959 971 /* -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r91848 r92162 65 65 * @param pVM The cross context VM structure. 66 66 * @param enmKind The kind of access handler. 67 * @param fKeepPgmLock Whether to hold the PGM lock while calling the 68 * handler or not. Mainly for PGM callers. 67 69 * @param pfnHandlerR3 Pointer to the ring-3 handler callback. 68 70 * @param pfnHandlerR0 Pointer to the ring-0 handler callback. … … 73 75 * safe). 74 76 */ 75 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, 77 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock, 76 78 PFNPGMPHYSHANDLER pfnHandlerR3, 77 79 R0PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR0, … … 97 99 pType->uState = enmKind == PGMPHYSHANDLERKIND_WRITE 98 100 ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL; 101 pType->fKeepPgmLock = fKeepPgmLock; 99 102 pType->pfnHandlerR3 = pfnHandlerR3; 100 103 pType->pfnHandlerR0 = pfnHandlerR0; … … 122 125 * @param pVM The cross context VM structure. 123 126 * @param enmKind The kind of access handler. 127 * @param fKeepPgmLock Whether to hold the PGM lock while calling the 128 * handler or not. Mainly for PGM callers. 124 129 * @param pfnHandlerR3 Pointer to the ring-3 handler callback. 125 130 * @param pszModR0 The name of the ring-0 module, NULL is an alias for … … 139 144 * safe). 140 145 */ 141 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, 146 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock, 142 147 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3, 143 148 const char *pszModR0, const char *pszHandlerR0, const char *pszPfHandlerR0, … … 194 199 } 195 200 if (RT_SUCCESS(rc)) 196 return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, pfnHandlerR3,201 return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, fKeepPgmLock, pfnHandlerR3, 197 202 pfnHandlerR0, pfnPfHandlerR0, pszDesc, phType); 198 203 } -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r92157 r92162 2685 2685 2686 2686 2687 2688 /********************************************************************************************************************************* 2689 * MMIO2 * 2690 *********************************************************************************************************************************/ 2691 2687 2692 /** 2688 2693 * Locate a MMIO2 range. … … 2738 2743 2739 2744 /** 2745 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map. 2746 */ 2747 static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2) 2748 { 2749 int rc = VINF_SUCCESS; 2750 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3) 2751 { 2752 Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)); 2753 int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys, 2754 pCurMmio2->RamRange.GCPhysLast); 2755 AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast, 2756 pCurMmio2->RamRange.pszDesc, rc2)); 2757 if (RT_SUCCESS(rc2)) 2758 pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING; 2759 else if (RT_SUCCESS(rc)) 2760 rc = rc2; 2761 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2762 return rc; 2763 } 2764 AssertFailed(); 2765 return rc; 2766 } 2767 2768 2769 /** 2770 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap. 2771 */ 2772 static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2) 2773 { 2774 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3) 2775 { 2776 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING) 2777 { 2778 int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3); 2779 AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast, 2780 pCurMmio2->RamRange.pszDesc, rc2)); 2781 pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING; 2782 } 2783 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2784 return VINF_SUCCESS; 2785 } 2786 AssertFailed(); 2787 return VINF_SUCCESS; 2788 2789 } 2790 2791 2792 /** 2740 2793 * Calculates the number of chunks 2741 2794 * … … 2757 2810 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving 2758 2811 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB). 2812 * 2813 * P.S. If we want to include a dirty bitmap, we'd have to drop down to 1040384 pages. 2759 2814 */ 2760 2815 uint32_t cbChunk = 16U*_1M; 2761 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */ 2762 AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2); 2816 uint32_t cPagesPerChunk = 1048000; /* max ~1048059 */ 2817 Assert(cPagesPerChunk / 64 * 64 == cPagesPerChunk); /* (NEM requirement) */ 2818 AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048000 < 16U*_1M - PAGE_SIZE * 2); 2763 2819 AssertRelease(cPagesPerChunk <= PGM_MMIO2_MAX_PAGE_COUNT); /* See above note. */ 2764 2820 AssertRelease(RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk); … … 2795 2851 * UINT8_MAX. 2796 2852 * @param cb The size of the region. Must be page aligned. 2853 * @param fFlags PGMPHYS_MMIO2_FLAGS_XXX. 2854 * @param idMmio2 The MMIO2 ID for the first chunk. 2797 2855 * @param pszDesc The description. 2798 2856 * @param ppHeadRet Where to return the pointer to the first … … 2801 2859 * @thread EMT 2802 2860 */ 2803 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, 2804 const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)2861 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, 2862 uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet) 2805 2863 { 2806 2864 /* … … 2819 2877 int rc = VINF_SUCCESS; 2820 2878 uint32_t cPagesLeft = cb >> X86_PAGE_SHIFT; 2821 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++ )2879 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++) 2822 2880 { 2823 2881 /* … … 2875 2933 //pNew->pvR3 = NULL; 2876 2934 //pNew->pNext = NULL; 2877 //pNew->fFlags = 0;2878 2935 if (iChunk == 0) 2879 2936 pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK; 2880 2937 if (iChunk + 1 == cChunks) 2881 2938 pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK; 2939 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2940 pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES; 2882 2941 pNew->iSubDev = iSubDev; 2883 2942 pNew->iRegion = iRegion; 2884 2943 pNew->idSavedState = UINT8_MAX; 2885 pNew->idMmio2 = UINT8_MAX;2944 pNew->idMmio2 = idMmio2; 2886 2945 //pNew->pPhysHandlerR3 = NULL; 2887 2946 //pNew->paLSPages = NULL; … … 2898 2957 cPagesLeft -= cPagesTrackedByChunk; 2899 2958 ppNext = &pNew->pNextR3; 2959 2960 /* 2961 * Pre-allocate a handler if we're tracking dirty pages. 2962 */ 2963 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2964 { 2965 rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, 2966 (RTR3PTR)(uintptr_t)idMmio2, idMmio2, idMmio2, pszDesc, &pNew->pPhysHandlerR3); 2967 AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2)); 2968 } 2900 2969 } 2901 2970 Assert(cPagesLeft == 0); … … 2914 2983 PPGMREGMMIO2RANGE pFree = *ppHeadRet; 2915 2984 *ppHeadRet = pFree->pNextR3; 2985 2986 if (pFree->pPhysHandlerR3) 2987 { 2988 pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3); 2989 pFree->pPhysHandlerR3 = NULL; 2990 } 2916 2991 2917 2992 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) … … 3036 3111 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3037 3112 AssertReturn(cb, VERR_INVALID_PARAMETER); 3038 AssertReturn(! fFlags, VERR_INVALID_PARAMETER);3113 AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS); 3039 3114 3040 3115 const uint32_t cPages = cb >> PAGE_SHIFT; … … 3060 3135 */ 3061 3136 unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL); 3137 3062 3138 PGM_LOCK_VOID(pVM); 3063 uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1; 3064 unsigned cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks; 3139 AssertCompile(PGM_MMIO2_MAX_RANGES < 255); 3140 uint8_t const idMmio2 = pVM->pgm.s.cMmio2Regions + 1; 3141 unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks; 3065 3142 if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES) 3066 3143 { … … 3096 3173 */ 3097 3174 PPGMREGMMIO2RANGE pNew; 3098 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, pszDesc, &pNew);3175 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew); 3099 3176 if (RT_SUCCESS(rc)) 3100 3177 { … … 3111 3188 #endif 3112 3189 pCur->RamRange.pvR3 = pbCurPages; 3113 pCur->idMmio2 = idMmio2;3114 3190 3115 3191 uint32_t iDstPage = pCur->RamRange.cb >> X86_PAGE_SHIFT; … … 3125 3201 iSrcPage += pCur->RamRange.cb >> X86_PAGE_SHIFT; 3126 3202 pbCurPages += pCur->RamRange.cb; 3127 idMmio2++;3128 3203 } 3129 3204 … … 3214 3289 3215 3290 uint8_t idMmio2 = pCur->idMmio2; 3216 if (idMmio2 != UINT8_MAX) 3291 Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)); 3292 if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)) 3217 3293 { 3218 3294 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur); … … 3234 3310 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3235 3311 rc = rc2; 3312 3313 if (pCur->pPhysHandlerR3) 3314 { 3315 pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3); 3316 pCur->pPhysHandlerR3 = NULL; 3317 } 3236 3318 3237 3319 /* we're leaking hyper memory here if done at runtime. */ … … 3517 3599 } 3518 3600 3519 #if 0 /* will be reused */ 3520 /* 3521 * Register the access handler if plain MMIO. 3601 /* 3602 * If the range have dirty page monitoring enabled, enable that. 3522 3603 * 3523 * We must register access handlers for each range since the access handler 3524 * code refuses to deal with multiple ranges (and we can). 3525 */ 3526 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3527 { 3528 AssertFailed(); 3529 int rc = VINF_SUCCESS; 3530 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3531 { 3532 Assert(!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)); 3533 rc = pgmHandlerPhysicalExRegister(pVM, pCurMmio->pPhysHandlerR3, pCurMmio->RamRange.GCPhys, 3534 pCurMmio->RamRange.GCPhysLast); 3535 if (RT_FAILURE(rc)) 3536 break; 3537 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Use this to mark that the handler is registered. */ 3538 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3539 break; 3540 } 3541 if (RT_FAILURE(rc)) 3542 { 3543 /* Almost impossible, but try clean up properly and get out of here. */ 3544 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3545 { 3546 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 3547 { 3548 pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED; 3549 pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3); 3550 } 3551 3552 if (!fRamExists) 3553 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange); 3554 else 3555 { 3556 Assert(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */ 3557 3558 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT; 3559 PPGMPAGE pPageDst = &pRam->aPages[(pCurMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 3560 while (cPagesLeft-- > 0) 3561 { 3562 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM); 3563 pPageDst++; 3564 } 3565 } 3566 3567 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS; 3568 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS; 3569 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3570 break; 3571 } 3572 3573 /** @todo NEM notification cleanup */ 3574 PGM_UNLOCK(pVM); 3575 return rc; 3576 } 3577 } 3578 #endif 3604 * We ignore failures here for now because if we fail, the whole mapping 3605 * will have to be reversed and we'll end up with nothing at all on the 3606 * screen and a grumpy guest, whereas if we just go on, we'll only have 3607 * visual distortions to gripe about. There will be something in the 3608 * release log. 3609 */ 3610 if ( pFirstMmio->pPhysHandlerR3 3611 && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3612 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio); 3579 3613 3580 3614 /* … … 3674 3708 AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER); 3675 3709 3676 #if 0 /* will be reused */ 3677 /* 3678 * If plain MMIO, we must deregister the handlers first. 3679 */ 3680 if (!(fOldFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3681 { 3682 AssertFailed(); 3683 3684 PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; 3685 rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3); 3686 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc); 3687 while (!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)) 3688 { 3689 pCurMmio = pCurMmio->pNextR3; 3690 rc = pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3); 3691 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), VERR_PGM_PHYS_MMIO_EX_IPE); 3692 } 3693 } 3694 #endif 3710 /* 3711 * If monitoring dirty pages, we must deregister the handlers first. 3712 */ 3713 if ( pFirstMmio->pPhysHandlerR3 3714 && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3715 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio); 3695 3716 3696 3717 /* … … 3842 3863 ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags), 3843 3864 rc = VERR_NOT_SUPPORTED); 3865 3866 #ifdef VBOX_WITH_PGM_NEM_MODE 3867 /* 3868 * Currently not supported for NEM in simple memory mode. 3869 */ 3870 /** @todo implement this for NEM. */ 3871 if (RT_SUCCESS(rc)) 3872 AssertLogRelMsgStmt(VM_IS_NEM_ENABLED(pVM), ("%s: %#x\n", pFirstMmio->RamRange.pszDesc), 3873 rc = VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 3874 #endif 3844 3875 if (RT_SUCCESS(rc)) 3845 3876 { … … 3918 3949 return NIL_RTGCPHYS; 3919 3950 } 3951 3952 3953 /** 3954 * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap. 3955 * 3956 * Called holding the PGM lock. 3957 */ 3958 static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, 3959 void *pvBitmap, size_t cbBitmap) 3960 { 3961 /* 3962 * Continue validation. 3963 */ 3964 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3965 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 3966 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3967 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK), 3968 VERR_INVALID_FUNCTION); 3969 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 3970 3971 RTGCPHYS cbTotal = 0; 3972 uint16_t fTotalDirty = 0; 3973 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 3974 { 3975 cbTotal += pCur->cbReal; /** @todo good question for NEM... */ 3976 fTotalDirty |= pCur->fFlags; 3977 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3978 break; 3979 pCur = pCur->pNextR3; 3980 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 3981 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3982 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, 3983 VERR_INTERNAL_ERROR_4); 3984 } 3985 size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, PAGE_SIZE * 64, RTGCPHYS) / PAGE_SIZE / 8; 3986 3987 if (cbBitmap) 3988 { 3989 AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER); 3990 AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER); 3991 AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER); 3992 } 3993 3994 /* 3995 * Do the work. 3996 */ 3997 int rc = VINF_SUCCESS; 3998 if (pvBitmap) 3999 { 4000 if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY) 4001 { 4002 if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4003 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4004 { 4005 /* 4006 * Reset each chunk, gathering dirty bits. 4007 */ 4008 RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */ 4009 uint32_t iPageNo = 0; 4010 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4011 { 4012 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4013 { 4014 int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pCur->RamRange.GCPhys, pvBitmap, iPageNo); 4015 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 4016 rc = rc2; 4017 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4018 } 4019 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4020 break; 4021 iPageNo += pCur->RamRange.cb >> PAGE_SHIFT; 4022 } 4023 } 4024 else 4025 { 4026 /* 4027 * If not mapped or tracking is disabled, we return the 4028 * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages. We cannot 4029 * get more accurate data than that after unmapping or disabling. 4030 */ 4031 RT_BZERO(pvBitmap, cbBitmap); 4032 uint32_t iPageNo = 0; 4033 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4034 { 4035 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4036 { 4037 ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pCur->RamRange.cb >> PAGE_SHIFT)); 4038 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4039 } 4040 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4041 break; 4042 iPageNo += pCur->RamRange.cb >> PAGE_SHIFT; 4043 } 4044 } 4045 } 4046 /* 4047 * No dirty chunks. 4048 */ 4049 else 4050 RT_BZERO(pvBitmap, cbBitmap); 4051 } 4052 /* 4053 * No bitmap. Reset the region if tracking is currently enabled. 4054 */ 4055 else if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4056 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4057 rc = PGMHandlerPhysicalReset(pVM, pFirstRegMmio->RamRange.GCPhys); 4058 4059 return rc; 4060 } 4061 4062 4063 /** 4064 * Queries the dirty page bitmap and resets the monitoring. 4065 * 4066 * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when 4067 * creating the range for this to work. 4068 * 4069 * @returns VBox status code. 4070 * @retval VERR_INVALID_FUNCTION if not created using 4071 * PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES. 4072 * @param pVM The cross context VM structure. 4073 * @param pDevIns The device owning the MMIO2 handle. 4074 * @param hMmio2 The region handle. 4075 * @param pvBitmap The output bitmap. Must be 8-byte aligned. Ignored 4076 * when @a cbBitmap is zero. 4077 * @param cbBitmap The size of the bitmap. Must be the size of the whole 4078 * MMIO2 range, rounded up to the nearest 8 bytes. 4079 * When zero only a reset is done. 4080 */ 4081 VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, 4082 void *pvBitmap, size_t cbBitmap) 4083 { 4084 /* 4085 * Do some basic validation before grapping the PGM lock and continuing. 4086 */ 4087 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER); 4088 AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER); 4089 int rc = PGM_LOCK(pVM); 4090 if (RT_SUCCESS(rc)) 4091 { 4092 rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap); 4093 PGM_UNLOCK(pVM); 4094 } 4095 return rc; 4096 } 4097 4098 /** 4099 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking 4100 * 4101 * Called owning the PGM lock. 4102 */ 4103 static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled) 4104 { 4105 /* 4106 * Continue validation. 4107 */ 4108 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 4109 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 4110 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4111 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK) 4112 , VERR_INVALID_FUNCTION); 4113 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 4114 4115 /* 4116 * Anyting needing doing? 4117 */ 4118 if (fEnabled != RT_BOOL(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4119 { 4120 LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pFirstRegMmio->RamRange.pszDesc)); 4121 4122 /* 4123 * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag. 4124 */ 4125 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 4126 { 4127 if (fEnabled) 4128 pCur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4129 else 4130 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4131 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4132 break; 4133 pCur = pCur->pNextR3; 4134 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 4135 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4136 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES 4137 , VERR_INTERNAL_ERROR_4); 4138 } 4139 4140 /* 4141 * Enable/disable handlers if currently mapped. 4142 * 4143 * We ignore status codes here as we've already changed the flags and 4144 * returning a failure status now would be confusing. Besides, the two 4145 * functions will continue past failures. As argued in the mapping code, 4146 * it's in the release log. 4147 */ 4148 if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 4149 { 4150 if (fEnabled) 4151 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio); 4152 else 4153 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio); 4154 } 4155 } 4156 else 4157 LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pFirstRegMmio->RamRange.pszDesc)); 4158 4159 return VINF_SUCCESS; 4160 } 4161 4162 4163 /** 4164 * Controls the dirty page tracking for an MMIO2 range. 4165 * 4166 * @returns VBox status code. 4167 * @param pVM The cross context VM structure. 4168 * @param pDevIns The device owning the MMIO2 memory. 4169 * @param hMmio2 The handle of the region. 4170 * @param fEnabled The new tracking state. 4171 */ 4172 VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled) 4173 { 4174 /* 4175 * Do some basic validation before grapping the PGM lock and continuing. 4176 */ 4177 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER); 4178 int rc = PGM_LOCK(pVM); 4179 if (RT_SUCCESS(rc)) 4180 { 4181 rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled); 4182 PGM_UNLOCK(pVM); 4183 } 4184 return rc; 4185 } 4186 3920 4187 3921 4188 /** -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r91854 r92162 280 280 281 281 pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE; 282 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 282 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/, 283 283 pgmPoolAccessHandler, 284 284 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler",
Note:
See TracChangeset
for help on using the changeset viewer.