- Timestamp:
- May 18, 2015 1:09:16 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r55903 r55909 132 132 133 133 134 /** @def PGM_ALL_CB_DECL 135 * Macro for declaring a handler callback for all contexts. The handler 136 * callback is static in ring-3, and exported in RC and R0. 137 * @sa PGM_ALL_CB2_DECL. 138 */ 139 #if defined(IN_RC) || defined(IN_RING0) 140 # ifdef __cplusplus 141 # define PGM_ALL_CB_DECL(type) extern "C" DECLEXPORT(type) 142 # else 143 # define PGM_ALL_CB_DECL(type) DECLEXPORT(type) 144 # endif 145 #else 146 # define PGM_ALL_CB_DECL(type) static type 147 #endif 148 149 /** @def PGM_ALL_CB2_DECL 150 * Macro for declaring a handler callback for all contexts. The handler 151 * callback is hidden in ring-3, and exported in RC and R0. 152 * @sa PGM_ALL_CB2_DECL. 153 */ 154 #if defined(IN_RC) || defined(IN_RING0) 155 # ifdef __cplusplus 156 # define PGM_ALL_CB2_DECL(type) extern "C" DECLEXPORT(type) 157 # else 158 # define PGM_ALL_CB2_DECL(type) DECLEXPORT(type) 159 # endif 160 #else 161 # define PGM_ALL_CB2_DECL(type) DECLHIDDEN(type) 162 #endif 163 164 134 165 /** 135 166 * \#PF Handler callback for physical access handler ranges in RC and R0. … … 170 201 * @param enmOrigin The origin of this call. 171 202 * @param pvUser User argument. 172 * 173 * @todo Add pVCpu, possibly replacing pVM. 174 */ 175 typedef DECLCALLBACK(int) FNPGMR3PHYSHANDLER(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 176 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser); 203 */ 204 typedef DECLCALLBACK(int) FNPGMPHYSHANDLER(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 205 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser); 177 206 /** Pointer to PGM access callback. */ 178 typedef FNPGM R3PHYSHANDLER *PFNPGMR3PHYSHANDLER;207 typedef FNPGMPHYSHANDLER *PFNPGMPHYSHANDLER; 179 208 180 209 … … 567 596 568 597 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, 569 PFNPGM R3PHYSHANDLER pfnHandlerR3,598 PFNPGMPHYSHANDLER pfnHandlerR3, 570 599 R0PTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerR0, 571 600 RCPTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerRC, 572 601 const char *pszDesc, PPGMPHYSHANDLERTYPE phType); 573 602 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, 574 R3PTRTYPE(PFNPGM R3PHYSHANDLER) pfnHandlerR3,603 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3, 575 604 const char *pszModR0, const char *pszPfHandlerR0, 576 605 const char *pszModRC, const char *pszPfHandlerRC, const char *pszDesc, -
trunk/src/VBox/Devices/Graphics/DevVGA-SVGA.cpp
r55904 r55909 164 164 #ifdef IN_RING3 165 165 # ifdef DEBUG_FIFO_ACCESS 166 static FNPGM R3PHYSHANDLER vmsvgaR3FIFOAccessHandler;166 static FNPGMPHYSHANDLER vmsvgaR3FIFOAccessHandler; 167 167 # endif 168 168 # ifdef DEBUG_GMR_ACCESS 169 static FNPGM R3PHYSHANDLER vmsvgaR3GMRAccessHandler;169 static FNPGMPHYSHANDLER vmsvgaR3GMRAccessHandler; 170 170 # endif 171 171 #endif -
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r55904 r55909 268 268 #ifndef IN_RING3 269 269 RT_C_DECLS_BEGIN 270 DECLEXPORT(FNPGMRZPHYSPFHANDLER) vgaLbfAccessPfHandler;270 DECLEXPORT(FNPGMRZPHYSPFHANDLER) vgaLbfAccessPfHandler; 271 271 RT_C_DECLS_END 272 272 #endif 273 #ifdef IN_RING3 274 static FNPGMR3PHYSHANDLER vgaR3LFBAccessHandler; 275 #endif 273 PGM_ALL_CB_DECL(FNPGMPHYSHANDLER) vgaLFBAccessHandler; 276 274 277 275 … … 3551 3549 return vgaLFBAccess(pVM, pThis, GCPhysFault, pvFault); 3552 3550 } 3553 3554 #else /* IN_RING3 */ 3551 #endif /* !IN_RING3 */ 3552 3555 3553 3556 3554 /** 3557 * @callback_method_impl{FNPGM R3PHYSHANDLER, HC access handler for the LFB.}3558 */ 3559 static DECLCALLBACK(int) vgaR3LFBAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,3560 3555 * @callback_method_impl{FNPGMPHYSHANDLER, HC access handler for the LFB.} 3556 */ 3557 PGM_ALL_CB_DECL(int) vgaLFBAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 3558 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 3561 3559 { 3562 3560 PVGASTATE pThis = (PVGASTATE)pvUser; … … 3572 3570 return rc; 3573 3571 } 3574 #endif /* IN_RING3 */ 3572 3575 3573 3576 3574 /* -=-=-=-=-=- All rings: VGA BIOS I/Os -=-=-=-=-=- */ … … 6217 6215 */ 6218 6216 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 6219 vga R3LFBAccessHandler,6217 vgaLFBAccessHandler, 6220 6218 g_DeviceVga.szR0Mod, "vgaLbfAccessPfHandler", 6221 6219 g_DeviceVga.szRCMod, "vgaLbfAccessPfHandler", -
trunk/src/VBox/Devices/Network/DevPCNet.cpp
r55904 r55909 633 633 static void pcnetPollTimerStart(PPCNETSTATE pThis); 634 634 static int pcnetXmitPending(PPCNETSTATE pThis, bool fOnWorkerThread); 635 #ifdef PCNET_NO_POLLING 636 PGM_ALL_CB_DECL(FNPGMPHYSHANDLER) pcnetHandleRingWrite; 637 # ifndef IN_RING3 638 RT_C_DECLS_BEGIN 639 DECLEXPORT(CTX_SUFF(FNPGM,PHYSPFHANDLER)) pcnetHandleRingWritePf; 640 RT_C_DECLS_END 641 # endif 642 #endif 635 643 636 644 … … 1089 1097 1090 1098 #ifdef PCNET_NO_POLLING 1099 1091 1100 # ifndef IN_RING3 1092 RT_C_DECLS_BEGIN1093 DECLEXPORT(CTX_SUFF(FNPGM,PHYSPFHANDLER)) pcnetHandleRingWritePf;1094 RT_C_DECLS_END1095 1096 1101 /** 1097 1102 * #PF Virtual Handler callback for Guest write access to the ring descriptor page(pThis) … … 1147 1152 return VINF_IOM_R3_MMIO_WRITE; /* handle in ring3 */ 1148 1153 } 1149 1150 # else /* IN_RING3 */ 1151 1152 static FNPGMR3PHYSHANDLER pcnetR3HandleRingWrite; 1153 1154 /** 1155 * #PF Handler callback for physical access handler ranges (MMIO among others) in HC. 1154 #endif /* !IN_RING3 */ 1155 1156 1157 /** 1158 * #PF Handler callback for physical access handler ranges (MMIO among others). 1156 1159 * 1157 1160 * The handler can not raise any faults, it's mainly for monitoring write access … … 1170 1173 * @param pvUser User argument. 1171 1174 */ 1172 static DECLCALLBACK(int) pcnetR3HandleRingWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,1173 1175 PGM_ALL_CB_DECL(int) pcnetHandleRingWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 1176 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1174 1177 { 1175 1178 PPDMDEVINS pDevIns = (PPDMDEVINS)pvUser; 1176 1179 PPCNETSTATE pThis = PDMINS_2_DATA(pDevIns, PPCNETSTATE); 1177 1180 1178 Log(("#%d pcnet R3HandleRingWrite: write to %#010x\n", PCNET_INST_NR, GCPhys));1181 Log(("#%d pcnetHandleRingWrite: write to %#010x\n", PCNET_INST_NR, GCPhys)); 1179 1182 #ifdef VBOX_WITH_STATISTICS 1180 1183 STAM_COUNTER_INC(&CTXSUFF(pThis->StatRingWrite)); … … 1206 1209 return VINF_SUCCESS; 1207 1210 } 1208 # endif /* !IN_RING3 */1209 1211 #endif /* PCNET_NO_POLLING */ 1210 1212 … … 4978 4980 4979 4981 rc = PGMR3HandlerPhysicalTypeRegister(PDMDevHlpGetVM(pDevIns), PGMPHYSHANDLERKIND_WRITE, 4980 pcnet R3HandleRingWrite,4982 pcnetHandleRingWrite, 4981 4983 g_DevicePCNet.szR0Mod, "pcnetHandleRingWritePf", 4982 4984 g_DevicePCNet.szRCMod, "pcnetHandleRingWritePf", -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r55903 r55909 1788 1788 1789 1789 1790 #ifdef IN_RING31791 1790 /** 1792 1791 * \#PF Handler callback for MMIO ranges. … … 1804 1803 * @param pvUser Pointer to the MMIO range entry. 1805 1804 */ 1806 DECLCALLBACK(int) iomR3MmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,1807 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1805 PGM_ALL_CB2_DECL(int) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, 1806 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1808 1807 { 1809 1808 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; … … 1847 1846 return rc; 1848 1847 } 1849 #endif /* IN_RING3 */1850 1848 1851 1849 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r55903 r55909 149 149 } 150 150 151 #endif /* IN_RING3 */ 151 #endif /* !IN_RING3 */ 152 153 154 /** 155 * Access handler callback for ROM write accesses. 156 * 157 * @returns VINF_SUCCESS if the handler have carried out the operation. 158 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation. 159 * @param pVM Pointer to the VM. 160 * @param pVCpu The cross context CPU structure for the calling EMT. 161 * @param GCPhys The physical address the guest is writing to. 162 * @param pvPhys The HC mapping of that address. 163 * @param pvBuf What the guest is reading/writing. 164 * @param cbBuf How much it's reading/writing. 165 * @param enmAccessType The access type. 166 * @param enmOrigin Who is making the access. 167 * @param pvUser User argument. 168 */ 169 PGM_ALL_CB2_DECL(int) pgmPhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 170 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 171 { 172 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser; 173 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT; 174 Assert(iPage < (pRom->cb >> PAGE_SHIFT)); 175 PPGMROMPAGE pRomPage = &pRom->aPages[iPage]; 176 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf)); 177 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin); 178 179 if (enmAccessType == PGMACCESSTYPE_READ) 180 { 181 switch (pRomPage->enmProt) 182 { 183 /* 184 * Take the default action. 185 */ 186 case PGMROMPROT_READ_ROM_WRITE_IGNORE: 187 case PGMROMPROT_READ_RAM_WRITE_IGNORE: 188 case PGMROMPROT_READ_ROM_WRITE_RAM: 189 case PGMROMPROT_READ_RAM_WRITE_RAM: 190 return VINF_PGM_HANDLER_DO_DEFAULT; 191 192 default: 193 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n", 194 pRom->aPages[iPage].enmProt, iPage, GCPhys), 195 VERR_IPE_NOT_REACHED_DEFAULT_CASE); 196 } 197 } 198 else 199 { 200 Assert(enmAccessType == PGMACCESSTYPE_WRITE); 201 switch (pRomPage->enmProt) 202 { 203 /* 204 * Ignore writes. 205 */ 206 case PGMROMPROT_READ_ROM_WRITE_IGNORE: 207 case PGMROMPROT_READ_RAM_WRITE_IGNORE: 208 return VINF_SUCCESS; 209 210 /* 211 * Write to the RAM page. 212 */ 213 case PGMROMPROT_READ_ROM_WRITE_RAM: 214 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */ 215 { 216 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */ 217 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage); 218 219 /* 220 * Take the lock, do lazy allocation, map the page and copy the data. 221 * 222 * Note that we have to bypass the mapping TLB since it works on 223 * guest physical addresses and entering the shadow page would 224 * kind of screw things up... 225 */ 226 int rc = pgmLock(pVM); 227 AssertRC(rc); 228 229 PPGMPAGE pShadowPage = &pRomPage->Shadow; 230 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt)) 231 { 232 pShadowPage = pgmPhysGetPage(pVM, GCPhys); 233 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE); 234 } 235 236 void *pvDstPage; 237 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage); 238 if (RT_SUCCESS(rc)) 239 { 240 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf); 241 pRomPage->LiveSave.fWrittenTo = true; 242 } 243 244 pgmUnlock(pVM); 245 return rc; 246 } 247 248 default: 249 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n", 250 pRom->aPages[iPage].enmProt, iPage, GCPhys), 251 VERR_IPE_NOT_REACHED_DEFAULT_CASE); 252 } 253 } 254 } 255 152 256 153 257 /** … … 2126 2230 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); 2127 2231 2128 PFNPGM R3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);2232 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler); 2129 2233 void *pvUser = pPhys->CTX_SUFF(pvUser); 2130 2234 … … 2387 2491 if (RT_SUCCESS(rc)) 2388 2492 { 2389 PFNPGM R3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);2493 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler); 2390 2494 void *pvUser = pCur->CTX_SUFF(pvUser); 2391 2495 … … 2612 2716 cbRange = offVirt; 2613 2717 #ifdef IN_RING3 2614 PFNPGM R3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);2718 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); 2615 2719 void *pvUser = pPhys->CTX_SUFF(pvUser); 2616 2720 … … 2688 2792 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) )); 2689 2793 2690 PFNPGM R3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);2794 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); 2691 2795 void *pvUser = pPhys->CTX_SUFF(pvUser); 2692 2796 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r55903 r55909 51 51 static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable); 52 52 static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage); 53 #ifndef IN_RING354 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPoolAccessPfHandler;55 #endif56 53 #if defined(LOG_ENABLED) || defined(VBOX_STRICT) 57 54 static const char *pgmPoolPoolKindToStr(uint8_t enmKind); … … 143 140 * @param pVM Pointer to the VM. 144 141 * @param pvDst Destination address 145 * @param pvSrc Source guest virtual address. 142 * @param pvSrc Pointer to the mapping of @a GCPhysSrc or NULL depending 143 * on the context (e.g. \#PF in R0 & RC). 146 144 * @param GCPhysSrc The source guest physical address. 147 145 * @param cb Size of data to read 148 146 */ 149 DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvSrc, 150 RTGCPHYS GCPhysSrc, size_t cb) 147 DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, void const *pvSrc, RTGCPHYS GCPhysSrc, size_t cb) 151 148 { 152 149 #if defined(IN_RING3) … … 172 169 * @param pPage The head page. 173 170 * @param GCPhysFault The guest physical fault address. 174 * @param uAddress In R0 and GC this is the guest context fault address (flat).175 * In R3 this is the host context 'fault' address.171 * @param pvAddress Pointer to the mapping of @a GCPhysFault or NULL 172 * depending on the context (e.g. \#PF in R0 & RC). 176 173 * @param cbWrite Write size; might be zero if the caller knows we're not crossing entry boundaries 177 174 */ 178 void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault,179 CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR)pvAddress, unsigned cbWrite)175 static void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, 176 void const *pvAddress, unsigned cbWrite) 180 177 { 181 178 AssertMsg(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX, ("%u (idx=%u)\n", pPage->iMonitoredPrev, pPage->idx)); … … 348 345 { 349 346 X86PTEPAE GstPte; 350 # ifdef IN_RING3 351 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, (RTHCPTR)((RTHCUINTPTR)pvAddress + sizeof(GstPte)), GCPhysFault + sizeof(GstPte), sizeof(GstPte)); 352 # else 353 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress + sizeof(GstPte), GCPhysFault + sizeof(GstPte), sizeof(GstPte)); 354 # endif 347 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, 348 pvAddress ? (uint8_t const *)pvAddress + sizeof(GstPte) : NULL, 349 GCPhysFault + sizeof(GstPte), sizeof(GstPte)); 355 350 AssertRC(rc); 356 351 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]), GstPte.u & X86_PTE_PAE_PG_MASK)); … … 423 418 } 424 419 } 425 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */420 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). - not working any longer... */ 426 421 if ( uShw.pPD->a[iShw].n.u1Present 427 422 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) … … 926 921 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 927 922 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 928 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);923 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 929 924 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 930 925 #else 931 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);926 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 932 927 #endif 933 928 #ifdef IN_RC … … 985 980 uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1); 986 981 if (cbWrite <= 8) 987 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, cbWrite);982 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, cbWrite); 988 983 else 989 984 { 990 985 Assert(cbWrite <= 16); 991 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, 8);992 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + 8, pvFault + 8, cbWrite - 8);986 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, 8); 987 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + 8, NULL, cbWrite - 8); 993 988 } 994 989 … … 1356 1351 1357 1352 # endif /* !IN_RING3 */ 1353 1354 /** 1355 * Access handler callback for PT write accesses. 1356 * 1357 * The handler can not raise any faults, it's mainly for monitoring write access 1358 * to certain pages. 1359 * 1360 * @returns VINF_SUCCESS if the handler has carried out the operation. 1361 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation. 1362 * @param pVM Pointer to the VM. 1363 * @param pVCpu The cross context CPU structure for the calling EMT. 1364 * @param GCPhys The physical address the guest is writing to. 1365 * @param pvPhys The HC mapping of that address. 1366 * @param pvBuf What the guest is reading/writing. 1367 * @param cbBuf How much it's reading/writing. 1368 * @param enmAccessType The access type. 1369 * @param enmOrigin Who is making the access. 1370 * @param pvUser User argument. 1371 */ 1372 PGM_ALL_CB2_DECL(int) pgmPoolAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 1373 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1374 { 1375 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1376 STAM_PROFILE_START(&pPool->StatMonitorR3, a); 1377 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 1378 LogFlow(("PGM_ALL_CB_DECL: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n", 1379 GCPhys, pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind)); 1380 1381 NOREF(pvBuf); NOREF(enmAccessType); NOREF(enmOrigin); 1382 1383 /* 1384 * We don't have to be very sophisticated about this since there are relativly few calls here. 1385 * However, we must try our best to detect any non-cpu accesses (disk / networking). 1386 */ 1387 pgmLock(pVM); 1388 if (PHYS_PAGE_ADDRESS(GCPhys) != PHYS_PAGE_ADDRESS(pPage->GCPhys)) 1389 { 1390 /* Pool page changed while we were waiting for the lock; ignore. */ 1391 Log(("CPU%d: PGM_ALL_CB_DECL pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhys), PHYS_PAGE_ADDRESS(pPage->GCPhys))); 1392 pgmUnlock(pVM); 1393 return VINF_PGM_HANDLER_DO_DEFAULT; 1394 } 1395 1396 Assert(pPage->enmKind != PGMPOOLKIND_FREE); 1397 /** @todo we can do better than this now. */ 1398 if ( ( pPage->cModifications < 96 /* it's cheaper here. */ 1399 || pgmPoolIsPageLocked(pPage) ) 1400 && cbBuf <= 4) 1401 { 1402 /* Clear the shadow entry. */ 1403 if (!pPage->cModifications++) 1404 pgmPoolMonitorModifiedInsert(pPool, pPage); 1405 /** @todo r=bird: making unsafe assumption about not crossing entries here! */ 1406 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvBuf, 0 /* unknown write size */); 1407 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a); 1408 } 1409 else 1410 { 1411 pgmPoolMonitorChainFlush(pPool, pPage); /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */ 1412 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a); 1413 } 1414 pgmUnlock(pVM); 1415 return VINF_PGM_HANDLER_DO_DEFAULT; 1416 } 1417 1418 1419 1358 1420 1359 1421 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r55903 r55909 70 70 static DECLCALLBACK(int) gimR3Save(PVM pVM, PSSMHANDLE pSSM); 71 71 static DECLCALLBACK(int) gimR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uSSMVersion, uint32_t uPass); 72 static FNPGM R3PHYSHANDLER gimR3Mmio2WriteHandler;72 static FNPGMPHYSHANDLER gimR3Mmio2WriteHandler; 73 73 74 74 -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r55896 r55909 187 187 */ 188 188 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, 189 iom R3MmioHandler,189 iomMmioHandler, 190 190 NULL, "iomMmioPfHandler", 191 191 NULL, "iomMmioPfHandler", -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r55896 r55909 1456 1456 if (RT_SUCCESS(rc)) 1457 1457 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 1458 pgm R3PhysRomWriteHandler,1458 pgmPhysRomWriteHandler, 1459 1459 NULL, "pgmPhysRomWritePfHandler", 1460 1460 NULL, "pgmPhysRomWritePfHandler", -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r55903 r55909 81 81 */ 82 82 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, 83 PFNPGM R3PHYSHANDLER pfnHandlerR3,83 PFNPGMPHYSHANDLER pfnHandlerR3, 84 84 R0PTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerR0, 85 85 RCPTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerRC, … … 143 143 */ 144 144 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, 145 R3PTRTYPE(PFNPGM R3PHYSHANDLER) pfnHandlerR3,145 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3, 146 146 const char *pszModR0, const char *pszPfHandlerR0, 147 147 const char *pszModRC, const char *pszPfHandlerRC, const char *pszDesc, -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r55903 r55909 3497 3497 3498 3498 /** 3499 * \#PF Handler callback for ROM write accesses.3500 *3501 * @returns VINF_SUCCESS if the handler have carried out the operation.3502 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.3503 * @param pVM Pointer to the VM.3504 * @param pVCpu The cross context CPU structure for the calling EMT.3505 * @param GCPhys The physical address the guest is writing to.3506 * @param pvPhys The HC mapping of that address.3507 * @param pvBuf What the guest is reading/writing.3508 * @param cbBuf How much it's reading/writing.3509 * @param enmAccessType The access type.3510 * @param enmOrigin Who is making the access.3511 * @param pvUser User argument.3512 */3513 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,3514 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)3515 {3516 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;3517 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;3518 Assert(iPage < (pRom->cb >> PAGE_SHIFT));3519 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];3520 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));3521 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);3522 3523 if (enmAccessType == PGMACCESSTYPE_READ)3524 {3525 switch (pRomPage->enmProt)3526 {3527 /*3528 * Take the default action.3529 */3530 case PGMROMPROT_READ_ROM_WRITE_IGNORE:3531 case PGMROMPROT_READ_RAM_WRITE_IGNORE:3532 case PGMROMPROT_READ_ROM_WRITE_RAM:3533 case PGMROMPROT_READ_RAM_WRITE_RAM:3534 return VINF_PGM_HANDLER_DO_DEFAULT;3535 3536 default:3537 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",3538 pRom->aPages[iPage].enmProt, iPage, GCPhys),3539 VERR_IPE_NOT_REACHED_DEFAULT_CASE);3540 }3541 }3542 else3543 {3544 Assert(enmAccessType == PGMACCESSTYPE_WRITE);3545 switch (pRomPage->enmProt)3546 {3547 /*3548 * Ignore writes.3549 */3550 case PGMROMPROT_READ_ROM_WRITE_IGNORE:3551 case PGMROMPROT_READ_RAM_WRITE_IGNORE:3552 return VINF_SUCCESS;3553 3554 /*3555 * Write to the RAM page.3556 */3557 case PGMROMPROT_READ_ROM_WRITE_RAM:3558 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */3559 {3560 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */3561 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);3562 3563 /*3564 * Take the lock, do lazy allocation, map the page and copy the data.3565 *3566 * Note that we have to bypass the mapping TLB since it works on3567 * guest physical addresses and entering the shadow page would3568 * kind of screw things up...3569 */3570 int rc = pgmLock(pVM);3571 AssertRC(rc);3572 3573 PPGMPAGE pShadowPage = &pRomPage->Shadow;3574 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))3575 {3576 pShadowPage = pgmPhysGetPage(pVM, GCPhys);3577 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);3578 }3579 3580 void *pvDstPage;3581 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);3582 if (RT_SUCCESS(rc))3583 {3584 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);3585 pRomPage->LiveSave.fWrittenTo = true;3586 }3587 3588 pgmUnlock(pVM);3589 return rc;3590 }3591 3592 default:3593 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",3594 pRom->aPages[iPage].enmProt, iPage, GCPhys),3595 VERR_IPE_NOT_REACHED_DEFAULT_CASE);3596 }3597 }3598 }3599 3600 3601 /**3602 3499 * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify 3603 3500 * that the virgin part is untouched. -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r55903 r55909 114 114 * Internal Functions * 115 115 *******************************************************************************/ 116 static FNPGMR3PHYSHANDLER pgmR3PoolAccessHandler;117 116 #ifdef VBOX_WITH_DEBUGGER 118 117 static FNDBGCCMD pgmR3PoolCmdCheck; … … 283 282 pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE; 284 283 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 285 pgm R3PoolAccessHandler,284 pgmPoolAccessHandler, 286 285 NULL, "pgmPoolAccessPfHandler", 287 286 NULL, "pgmPoolAccessPfHandler", … … 491 490 492 491 493 494 /**495 * Worker used by pgmR3PoolAccessHandler when it's invoked by an async thread.496 *497 * @param pPool The pool.498 * @param pPage The page.499 */500 static DECLCALLBACK(void) pgmR3PoolFlushReusedPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)501 {502 /* for the present this should be safe enough I think... */503 pgmLock(pPool->pVMR3);504 if ( pPage->fReusedFlushPending505 && pPage->enmKind != PGMPOOLKIND_FREE)506 pgmPoolFlushPage(pPool, pPage);507 pgmUnlock(pPool->pVMR3);508 }509 510 511 /**512 * \#PF Handler callback for PT write accesses.513 *514 * The handler can not raise any faults, it's mainly for monitoring write access515 * to certain pages.516 *517 * @returns VINF_SUCCESS if the handler has carried out the operation.518 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.519 * @param pVM Pointer to the VM.520 * @param pVCpu The cross context CPU structure for the calling EMT.521 * @param GCPhys The physical address the guest is writing to.522 * @param pvPhys The HC mapping of that address.523 * @param pvBuf What the guest is reading/writing.524 * @param cbBuf How much it's reading/writing.525 * @param enmAccessType The access type.526 * @param enmOrigin Who is making the access.527 * @param pvUser User argument.528 */529 static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,530 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)531 {532 STAM_PROFILE_START(&pVM->pgm.s.pPoolR3->StatMonitorR3, a);533 PPGMPOOL pPool = pVM->pgm.s.pPoolR3;534 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;535 LogFlow(("pgmR3PoolAccessHandler: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",536 GCPhys, pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));537 538 NOREF(pvBuf); NOREF(enmAccessType); NOREF(enmOrigin);539 540 /*541 * We don't have to be very sophisticated about this since there are relativly few calls here.542 * However, we must try our best to detect any non-cpu accesses (disk / networking).543 *544 * Just to make life more interesting, we'll have to deal with the async threads too.545 * We cannot flush a page if we're in an async thread because of REM notifications.546 */547 pgmLock(pVM);548 if (PHYS_PAGE_ADDRESS(GCPhys) != PHYS_PAGE_ADDRESS(pPage->GCPhys))549 {550 /* Pool page changed while we were waiting for the lock; ignore. */551 Log(("CPU%d: pgmR3PoolAccessHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhys), PHYS_PAGE_ADDRESS(pPage->GCPhys)));552 pgmUnlock(pVM);553 return VINF_PGM_HANDLER_DO_DEFAULT;554 }555 556 Assert(pPage->enmKind != PGMPOOLKIND_FREE);557 558 /* @todo this code doesn't make any sense. remove the if (!pVCpu) block */559 if (!pVCpu) /** @todo This shouldn't happen any longer, all access handlers will be called on an EMT. All ring-3 handlers, except MMIO, already own the PGM lock. @bugref{3170} */560 {561 Log(("pgmR3PoolAccessHandler: async thread, requesting EMT to flush the page: %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",562 pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));563 STAM_COUNTER_INC(&pPool->StatMonitorR3Async);564 if (!pPage->fReusedFlushPending)565 {566 pgmUnlock(pVM);567 int rc = VMR3ReqCallVoidNoWait(pPool->pVMR3, VMCPUID_ANY, (PFNRT)pgmR3PoolFlushReusedPage, 2, pPool, pPage);568 AssertRCReturn(rc, rc);569 pgmLock(pVM);570 pPage->fReusedFlushPending = true;571 pPage->cModifications += 0x1000;572 }573 574 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);575 /** @todo r=bird: making unsafe assumption about not crossing entries here! */576 while (cbBuf > 4)577 {578 cbBuf -= 4;579 pvPhys = (uint8_t *)pvPhys + 4;580 GCPhys += 4;581 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);582 }583 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);584 }585 else if ( ( pPage->cModifications < 96 /* it's cheaper here. */586 || pgmPoolIsPageLocked(pPage)587 )588 && cbBuf <= 4)589 {590 /* Clear the shadow entry. */591 if (!pPage->cModifications++)592 pgmPoolMonitorModifiedInsert(pPool, pPage);593 /** @todo r=bird: making unsafe assumption about not crossing entries here! */594 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);595 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);596 }597 else598 {599 pgmPoolMonitorChainFlush(pPool, pPage); /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */600 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a);601 }602 pgmUnlock(pVM);603 return VINF_PGM_HANDLER_DO_DEFAULT;604 }605 606 607 492 /** 608 493 * Rendezvous callback used by pgmR3PoolClearAll that clears all shadow pages -
trunk/src/VBox/VMM/include/IOMInternal.h
r55903 r55909 425 425 DECLEXPORT(FNPGMRZPHYSPFHANDLER) iomMmioPfHandler; 426 426 #endif 427 #ifdef IN_RING3 428 FNPGMR3PHYSHANDLER iomR3MmioHandler; 429 #endif 427 PGM_ALL_CB2_DECL(FNPGMPHYSHANDLER) iomMmioHandler; 430 428 431 429 /* IOM locking helpers. */ -
trunk/src/VBox/VMM/include/PGMInternal.h
r55903 r55909 595 595 RTRCPTR RCPtrPadding; 596 596 /** Pointer to R3 callback function. */ 597 R3PTRTYPE(PFNPGM R3PHYSHANDLER)pfnHandlerR3;597 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3; 598 598 /** Pointer to R0 callback function for \#PFs. */ 599 599 R0PTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerR0; … … 4146 4146 int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock); 4147 4147 void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock); 4148 PGM_ALL_CB2_DECL(FNPGMPHYSHANDLER) pgmPhysRomWriteHandler; 4148 4149 #ifndef IN_RING3 4149 4150 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPhysPfHandlerRedirectToHC; … … 4161 4162 4162 4163 #ifdef IN_RING3 4163 FNPGMR3PHYSHANDLER pgmR3PhysRomWriteHandler;4164 4164 void pgmR3PhysRelinkRamRanges(PVM pVM); 4165 4165 int pgmR3PhysRamPreAllocate(PVM pVM); … … 4205 4205 uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte); 4206 4206 void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte); 4207 void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);4208 4207 int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage); 4209 4208 void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage); 4209 PGM_ALL_CB2_DECL(FNPGMPHYSHANDLER) pgmPoolAccessHandler; 4210 #ifndef IN_RING3 4211 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPoolAccessPfHandler; 4212 #endif 4210 4213 4211 4214 void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
Note:
See TracChangeset
for help on using the changeset viewer.