Changeset 55966 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- May 20, 2015 12:42:53 PM (10 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r55909 r55966 276 276 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE! 277 277 */ 278 static int iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb) 278 static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, 279 const void *pvData, unsigned cb) 279 280 { 280 281 #ifdef VBOX_WITH_STATISTICS … … 292 293 #endif 293 294 294 VBOXSTRICTRC rc ;295 VBOXSTRICTRC rcStrict; 295 296 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback))) 296 297 { … … 298 299 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU 299 300 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) ) 300 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),301 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */301 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), 302 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */ 302 303 else 303 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);304 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb); 304 305 } 305 306 else 306 rc = VINF_SUCCESS;307 rcStrict = VINF_SUCCESS; 307 308 308 309 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a); 309 310 STAM_COUNTER_INC(&pStats->Accesses); 310 return VBOXSTRICTRC_TODO(rc);311 return rcStrict; 311 312 } 312 313 … … 492 493 * Wrapper which does the read and updates range statistics when such are enabled. 493 494 */ 494 DECLINLINE(int) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue) 495 DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, 496 void *pvValue, unsigned cbValue) 495 497 { 496 498 #ifdef VBOX_WITH_STATISTICS … … 508 510 #endif 509 511 510 VBOXSTRICTRC rc ;512 VBOXSTRICTRC rcStrict; 511 513 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback))) 512 514 { … … 517 519 && !(GCPhys & 7) 518 520 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) ) 519 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue); 521 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, 522 pvValue, cbValue); 520 523 else 521 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);524 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue); 522 525 } 523 526 else 524 rc = VINF_IOM_MMIO_UNUSED_FF;525 if (rc != VINF_SUCCESS)526 { 527 switch (VBOXSTRICTRC_VAL(rc ))527 rcStrict = VINF_IOM_MMIO_UNUSED_FF; 528 if (rcStrict != VINF_SUCCESS) 529 { 530 switch (VBOXSTRICTRC_VAL(rcStrict)) 528 531 { 529 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;530 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;532 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break; 533 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break; 531 534 } 532 535 } … … 534 537 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a); 535 538 STAM_COUNTER_INC(&pStats->Accesses); 536 return VBOXSTRICTRC_VAL(rc);539 return rcStrict; 537 540 } 538 541 … … 596 599 597 600 uint64_t u64Data = 0; 598 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);601 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb)); 599 602 if (rc == VINF_SUCCESS) 600 603 { … … 658 661 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc); 659 662 660 int rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);663 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb)); 661 664 if (rc == VINF_SUCCESS) 662 665 iomMMIOStatLength(pVM, cb); … … 809 812 if (rc != VINF_SUCCESS) 810 813 break; 811 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);814 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb)); 812 815 if (rc != VINF_SUCCESS) 813 816 break; … … 874 877 { 875 878 uint32_t u32Data; 876 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);879 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb)); 877 880 if (rc != VINF_SUCCESS) 878 881 break; 879 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);882 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb)); 880 883 if (rc != VINF_SUCCESS) 881 884 break; … … 909 912 { 910 913 uint32_t u32Data; 911 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);914 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb)); 912 915 if (rc != VINF_SUCCESS) 913 916 break; … … 1072 1075 do 1073 1076 { 1074 rc = iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb);1077 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb)); 1075 1078 if (rc != VINF_SUCCESS) 1076 1079 break; … … 1135 1138 * Perform read. 1136 1139 */ 1137 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb);1140 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb)); 1138 1141 if (rc == VINF_SUCCESS) 1139 1142 { … … 1181 1184 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb)) 1182 1185 /* cmp reg, [MMIO]. */ 1183 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);1186 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb)); 1184 1187 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) 1185 1188 /* cmp [MMIO], reg|imm. */ 1186 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1189 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1187 1190 else 1188 1191 { … … 1260 1263 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); 1261 1264 fAndWrite = false; 1262 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);1265 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb)); 1263 1266 } 1264 1267 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) … … 1273 1276 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3) 1274 1277 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3)) 1275 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1278 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1276 1279 else 1277 1280 rc = VINF_IOM_R3_MMIO_READ_WRITE; … … 1292 1295 if (fAndWrite) 1293 1296 /* Store result to MMIO. */ 1294 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1297 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1295 1298 else 1296 1299 { … … 1341 1344 { 1342 1345 /* and test, [MMIO]. */ 1343 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);1346 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb)); 1344 1347 } 1345 1348 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) 1346 1349 { 1347 1350 /* test [MMIO], reg|imm. */ 1348 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1351 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1349 1352 } 1350 1353 else … … 1406 1409 1407 1410 /* bt [MMIO], reg|imm. */ 1408 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData);1411 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData)); 1409 1412 if (rc == VINF_SUCCESS) 1410 1413 { … … 1448 1451 { 1449 1452 /* xchg reg, [MMIO]. */ 1450 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);1453 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb)); 1451 1454 if (rc == VINF_SUCCESS) 1452 1455 { 1453 1456 /* Store result to MMIO. */ 1454 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1457 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1455 1458 1456 1459 if (rc == VINF_SUCCESS) … … 1469 1472 { 1470 1473 /* xchg [MMIO], reg. */ 1471 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);1474 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb)); 1472 1475 if (rc == VINF_SUCCESS) 1473 1476 { 1474 1477 /* Store result to MMIO. */ 1475 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);1478 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb)); 1476 1479 if (rc == VINF_SUCCESS) 1477 1480 { … … 1743 1746 * @param pvUser Pointer to the MMIO ring-3 range entry. 1744 1747 */ 1745 DECLEXPORT( int) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,1746 RTGCPHYS GCPhysFault, void *pvUser)1748 DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, 1749 RTGCPHYS GCPhysFault, void *pvUser) 1747 1750 { 1748 1751 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n", 1749 1752 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); 1750 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser); 1751 return VBOXSTRICTRC_VAL(rcStrict); 1753 return iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser); 1752 1754 } 1753 1755 … … 1803 1805 * @param pvUser Pointer to the MMIO range entry. 1804 1806 */ 1805 PGM_ALL_CB2_DECL( int) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,1806 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1807 PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, 1808 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1807 1809 { 1808 1810 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; … … 1836 1838 * Perform the access. 1837 1839 */ 1840 VBOXSTRICTRC rcStrict; 1838 1841 if (enmAccessType == PGMACCESSTYPE_READ) 1839 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);1842 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); 1840 1843 else 1841 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);1842 1843 AssertRC(rc); 1844 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); 1845 AssertRC(rcStrict); 1846 1844 1847 iomMmioReleaseRange(pVM, pRange); 1845 1848 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); 1846 return rc ;1849 return rcStrict; 1847 1850 } 1848 1851 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r55903 r55966 164 164 #endif 165 165 PVM pVM = pVCpu->CTX_SUFF(pVM); 166 int rc;166 VBOXSTRICTRC rcStrict; 167 167 168 168 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage)) … … 193 193 { 194 194 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 195 rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);195 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 196 196 # else 197 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);197 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 198 198 # endif 199 if ( RT_FAILURE(rc )199 if ( RT_FAILURE(rcStrict) 200 200 || !(uErr & X86_TRAP_PF_RW) 201 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)201 || rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 202 202 { 203 AssertRC(rc );203 AssertRC(rcStrict); 204 204 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersOutOfSync); 205 205 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; }); 206 return rc ;206 return rcStrict; 207 207 } 208 208 } … … 221 221 { 222 222 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 223 rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);223 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 224 224 # else 225 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);225 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 226 226 # endif 227 if ( RT_FAILURE(rc )228 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)227 if ( RT_FAILURE(rcStrict) 228 || rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 229 229 { 230 AssertRC(rc );230 AssertRC(rcStrict); 231 231 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersOutOfSync); 232 232 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; }); 233 return rc ;233 return rcStrict; 234 234 } 235 235 } … … 260 260 } 261 261 262 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);262 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser); 263 263 264 264 # ifdef VBOX_WITH_STATISTICS … … 271 271 } 272 272 else 273 rc = VINF_EM_RAW_EMULATE_INSTR;273 rcStrict = VINF_EM_RAW_EMULATE_INSTR; 274 274 275 275 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndPhys; }); 276 return rc ;276 return rcStrict; 277 277 } 278 278 } … … 289 289 && !(uErr & X86_TRAP_PF_P)) 290 290 { 291 rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);292 if ( RT_FAILURE(rc )293 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE291 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 292 if ( RT_FAILURE(rcStrict) 293 || rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE 294 294 || !(uErr & X86_TRAP_PF_RW)) 295 295 { 296 AssertRC(rc );296 AssertRC(rcStrict); 297 297 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersOutOfSync); 298 298 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndVirt; }); 299 return rc ;299 return rcStrict; 300 300 } 301 301 } … … 334 334 *pfLockTaken = false; 335 335 336 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPtrStart,337 pvFault - GCPtrStart, pvUser);336 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPtrStart, 337 pvFault - GCPtrStart, pvUser); 338 338 339 339 # ifdef VBOX_WITH_STATISTICS … … 345 345 # endif 346 346 # else 347 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */347 rcStrict = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 348 348 # endif 349 349 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersVirtual); 350 350 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; }); 351 return rc ;351 return rcStrict; 352 352 } 353 353 /* Unhandled part of a monitored page */ … … 358 358 /* Check by physical address. */ 359 359 unsigned iPage; 360 rc = pgmHandlerVirtualFindByPhysAddr(pVM, pGstWalk->Core.GCPhys, &pCur, &iPage); 361 Assert(RT_SUCCESS(rc) || !pCur); 360 pCur = pgmHandlerVirtualFindByPhysAddr(pVM, pGstWalk->Core.GCPhys, &iPage); 362 361 if (pCur) 363 362 { … … 379 378 - (GCPtrStart & PAGE_OFFSET_MASK); 380 379 Assert(off < pCur->cb); 381 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPtrStart, off, pvUser);380 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPtrStart, off, pvUser); 382 381 383 382 # ifdef VBOX_WITH_STATISTICS … … 389 388 # endif 390 389 # else 391 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */390 rcStrict = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 392 391 # endif 393 392 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersVirtualByPhys); 394 393 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; }); 395 return rc ;394 return rcStrict; 396 395 } 397 396 } … … 414 413 { 415 414 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 416 rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);415 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 417 416 # else 418 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);419 # endif 420 if ( RT_FAILURE(rc )421 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE417 rcStrict = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 418 # endif 419 if ( RT_FAILURE(rcStrict) 420 || rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE 422 421 || !(uErr & X86_TRAP_PF_RW)) 423 422 { 424 AssertRC(rc );423 AssertRC(rcStrict); 425 424 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersOutOfSync); 426 425 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; }); 427 return rc ;426 return rcStrict; 428 427 } 429 428 } … … 432 431 * It's writing to an unhandled part of the LDT page several million times. 433 432 */ 434 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault));435 LogFlow(("PGM: PGMInterpretInstruction -> rc =%d pPage=%R[pgmpage]\n", rc, pPage));433 rcStrict = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 434 LogFlow(("PGM: PGMInterpretInstruction -> rcStrict=%d pPage=%R[pgmpage]\n", VBOXSTRICTRC_VAL(rcStrict), pPage)); 436 435 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndUnhandled; }); 437 return rc ;436 return rcStrict; 438 437 } /* if any kind of handler */ 439 438 -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r55889 r55966 164 164 case PGMPHYSHANDLERKIND_MMIO: 165 165 case PGMPHYSHANDLERKIND_ALL: 166 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others . */166 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */ 167 167 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER); 168 168 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER); … … 1455 1455 * Search for virtual handler with matching physical address 1456 1456 * 1457 * @returns VBox status code1457 * @returns Pointer to the virtual handler structure if found, otherwise NULL. 1458 1458 * @param pVM Pointer to the VM. 1459 1459 * @param GCPhys GC physical address to search for. 1460 * @param ppVirt Where to store the pointer to the virtual handler structure.1461 1460 * @param piPage Where to store the pointer to the index of the cached physical page. 1462 1461 */ 1463 int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)1462 PPGMVIRTHANDLER pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, unsigned *piPage) 1464 1463 { 1465 1464 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a); 1466 Assert(ppVirt);1467 1465 1468 1466 pgmLock(pVM); … … 1472 1470 { 1473 1471 /* found a match! */ 1474 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);1475 *piPage = pCur - & (*ppVirt)->aPhysToVirt[0];1472 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler); 1473 *piPage = pCur - &pVirt->aPhysToVirt[0]; 1476 1474 pgmUnlock(pVM); 1477 1475 … … 1479 1477 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD); 1480 1478 #endif 1481 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));1479 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, pVirt->Core.Key, *piPage)); 1482 1480 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a); 1483 return VINF_SUCCESS;1481 return pVirt; 1484 1482 } 1485 1483 1486 1484 pgmUnlock(pVM); 1487 *ppVirt = NULL;1488 1485 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a); 1489 return VERR_PGM_HANDLER_NOT_FOUND;1486 return NULL; 1490 1487 } 1491 1488 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r55909 r55966 66 66 * @param pvUser User argument. 67 67 */ 68 VMMDECL( int) pgmPhysPfHandlerRedirectToHC(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,69 RTGCPHYS GCPhysFault, void *pvUser)68 VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 69 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 70 70 { 71 71 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser); … … 88 88 * @param pvUser User argument. Pointer to the ROM range structure. 89 89 */ 90 DECLEXPORT( int) pgmPhysRomWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,91 RTGCPHYS GCPhysFault, void *pvUser)90 DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 91 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 92 92 { 93 93 int rc; … … 167 167 * @param pvUser User argument. 168 168 */ 169 PGM_ALL_CB2_DECL(int) pgmPhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 170 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 169 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 170 pgmPhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 171 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 171 172 { 172 173 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser; … … 2180 2181 * Deals with reading from a page with one or more ALL access handlers. 2181 2182 * 2182 * @returns VBox status code. Can be ignoredin ring-3.2183 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3. 2183 2184 * @retval VINF_SUCCESS. 2184 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3. 2185 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3 or with 2186 * PGMACCESSORIGIN_IEM. 2185 2187 * 2186 2188 * @param pVM Pointer to the VM. … … 2191 2193 * @param enmOrigin The origin of this call. 2192 2194 */ 2193 static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb, PGMACCESSORIGIN enmOrigin) 2195 static VBOXSTRICTRC pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb, 2196 PGMACCESSORIGIN enmOrigin) 2194 2197 { 2195 2198 /* … … 2211 2214 return VINF_SUCCESS; 2212 2215 } 2213 rc = VINF_PGM_HANDLER_DO_DEFAULT; 2216 2217 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT; 2214 2218 2215 2219 /* … … 2238 2242 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2239 2243 pgmUnlock(pVM); 2240 rc = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);2244 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser); 2241 2245 pgmLock(pVM); 2242 2246 # ifdef VBOX_WITH_STATISTICS … … 2247 2251 pPhys = NULL; /* might not be valid anymore. */ 2248 2252 # endif 2249 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys)); 2253 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT, 2254 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys)); 2255 2250 2256 #else 2251 2257 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */ … … 2262 2268 { 2263 2269 unsigned iPage; 2264 PPGMVIRTHANDLER pVirt; 2265 2266 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage); 2267 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2)); 2270 PPGMVIRTHANDLER pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iPage); 2271 AssertReleaseMsg(pVirt, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb)); 2268 2272 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0); 2269 2273 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); … … 2283 2287 2284 2288 STAM_PROFILE_START(&pVirt->Stat, h); 2285 rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin,2286 pVirt->CTX_SUFF(pvUser));2289 VBOXSTRICTRC rcStrict2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb, 2290 PGMACCESSTYPE_READ, enmOrigin, pVirt->CTX_SUFF(pvUser)); 2287 2291 STAM_PROFILE_STOP(&pVirt->Stat, h); 2288 if (rc2 == VINF_SUCCESS) 2289 rc = VINF_SUCCESS; 2290 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc)); 2292 if (rcStrict2 == VINF_SUCCESS) 2293 rcStrict = rcStrict == VINF_PGM_HANDLER_DO_DEFAULT ? VINF_SUCCESS : rcStrict; 2294 else if (rcStrict2 != VINF_PGM_HANDLER_DO_DEFAULT) 2295 { 2296 AssertLogRelMsgFailed(("rcStrict2=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2297 VBOXSTRICTRC_VAL(rcStrict2), GCPhys, pPage, pVirt->pszDesc)); 2298 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict2 < rcStrict) 2299 rcStrict = rcStrict2; 2300 } 2291 2301 } 2292 2302 else … … 2303 2313 * Take the default action. 2304 2314 */ 2305 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 2315 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT) 2316 { 2306 2317 memcpy(pvBuf, pvSrc, cb); 2318 rcStrict = VINF_SUCCESS; 2319 } 2307 2320 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2308 return rc ;2321 return rcStrict; 2309 2322 } 2310 2323 … … 2358 2371 2359 2372 /* 2360 * Any ALL access handlers?2373 * Normal page? Get the pointer to it. 2361 2374 */ 2362 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) 2363 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))) 2364 { 2365 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin); 2366 if (RT_FAILURE(rc)) 2367 { 2368 pgmUnlock(pVM); 2369 return rc; 2370 } 2371 } 2372 else 2375 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) 2376 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) 2373 2377 { 2374 2378 /* … … 2390 2394 } 2391 2395 } 2396 /* 2397 * Have ALL/MMIO access handlers. 2398 */ 2399 else 2400 { 2401 VBOXSTRICTRC rcStrict = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin); 2402 if (rcStrict != VINF_SUCCESS) 2403 { 2404 pgmUnlock(pVM); 2405 return VBOXSTRICTRC_TODO(rcStrict); 2406 } 2407 } 2392 2408 2393 2409 /* next page */ … … 2437 2453 * Deals with writing to a page with one or more WRITE or ALL access handlers. 2438 2454 * 2439 * @returns VBox status code. Can be ignoredin ring-3.2455 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3. 2440 2456 * @retval VINF_SUCCESS. 2441 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3. 2457 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3 or with 2458 * PGMACCESSORIGIN_IEM. 2442 2459 * 2443 2460 * @param pVM Pointer to the VM. … … 2446 2463 * @param pvBuf What to write. 2447 2464 * @param cbWrite How much to write - less or equal to a page. 2448 * @param enmOrigin 2449 */ 2450 static intpgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,2451 PGMACCESSORIGIN enmOrigin)2465 * @param enmOrigin The origin of this call. 2466 */ 2467 static VBOXSTRICTRC pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite, 2468 PGMACCESSORIGIN enmOrigin) 2452 2469 { 2453 2470 PGMPAGEMAPLOCK PgMpLck; 2454 2471 void *pvDst = NULL; 2455 int rc;2472 VBOXSTRICTRC rcStrict; 2456 2473 2457 2474 /* … … 2486 2503 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) )); 2487 2504 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)) 2488 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);2505 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck); 2489 2506 else 2490 rc = VINF_SUCCESS;2491 if (RT_SUCCESS(rc ))2507 rcStrict = VINF_SUCCESS; 2508 if (RT_SUCCESS(rcStrict)) 2492 2509 { 2493 2510 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler); … … 2498 2515 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2499 2516 pgmUnlock(pVM); 2500 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);2517 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2501 2518 pgmLock(pVM); 2502 2519 # ifdef VBOX_WITH_STATISTICS … … 2507 2524 pCur = NULL; /* might not be valid anymore. */ 2508 2525 # endif 2509 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)2526 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT) 2510 2527 { 2511 2528 if (pvDst) 2512 2529 memcpy(pvDst, pvBuf, cbRange); 2530 rcStrict = VINF_SUCCESS; 2513 2531 } 2514 2532 else 2515 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, 2516 ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur ? pCur->pszDesc : "")); 2533 AssertLogRelMsg(rcStrict == VINF_SUCCESS, 2534 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2535 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? pCur->pszDesc : "")); 2517 2536 } 2518 2537 else 2519 2538 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", 2520 GCPhys, pPage, rc), rc);2521 if (RT_LIKELY(cbRange == cbWrite) )2539 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 2540 if (RT_LIKELY(cbRange == cbWrite) || rcStrict != VINF_SUCCESS) 2522 2541 { 2523 2542 if (pvDst) 2524 2543 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2525 return VINF_SUCCESS;2544 return rcStrict; 2526 2545 } 2527 2546 … … 2543 2562 { 2544 2563 unsigned iPage; 2545 PPGMVIRTHANDLER pCur; 2546 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage); 2547 if (RT_SUCCESS(rc)) 2548 { 2549 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur); 2550 2551 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1; 2564 PPGMVIRTHANDLER pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iPage); 2565 if (pVirt) 2566 { 2567 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt); 2568 2569 size_t cbRange = (PAGE_OFFSET_MASK & pVirt->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1; 2552 2570 if (cbRange > cbWrite) 2553 2571 cbRange = cbWrite; … … 2561 2579 #else /* IN_RING3 */ 2562 2580 2563 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) )); 2564 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck); 2565 if (RT_SUCCESS(rc)) 2581 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", 2582 GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) )); 2583 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck); 2584 if (RT_SUCCESS(rcStrict)) 2566 2585 { 2567 rc = VINF_PGM_HANDLER_DO_DEFAULT; 2568 if (pCurType->pfnHandlerR3) 2586 if (pVirtType->pfnHandlerR3) 2569 2587 { 2570 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)p Cur->Core.Key & PAGE_BASE_GC_MASK)2588 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK) 2571 2589 + (iPage << PAGE_SHIFT) 2572 2590 + (GCPhys & PAGE_OFFSET_MASK); 2573 2591 2574 STAM_PROFILE_START(&p Cur->Stat, h);2575 rc = pCurType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,2576 enmOrigin, pCur->CTX_SUFF(pvUser));2577 STAM_PROFILE_STOP(&p Cur->Stat, h);2592 STAM_PROFILE_START(&pVirt->Stat, h); 2593 rcStrict = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, 2594 PGMACCESSTYPE_WRITE, enmOrigin, pVirt->CTX_SUFF(pvUser)); 2595 STAM_PROFILE_STOP(&pVirt->Stat, h); 2578 2596 } 2579 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 2597 else 2598 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT; 2599 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT) 2600 { 2580 2601 memcpy(pvDst, pvBuf, cbRange); 2602 rcStrict = VINF_SUCCESS; 2603 } 2581 2604 else 2582 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc)); 2605 AssertLogRelMsg(rcStrict == VINF_SUCCESS, 2606 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2607 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pVirt->pszDesc)); 2583 2608 } 2584 2609 else 2585 2610 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", 2586 GCPhys, pPage, rc), rc);2587 if (RT_LIKELY(cbRange == cbWrite) )2611 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 2612 if (RT_LIKELY(cbRange == cbWrite) || rcStrict != VINF_SUCCESS) 2588 2613 { 2589 2614 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2590 return VINF_SUCCESS;2615 return rcStrict; 2591 2616 } 2592 2617 … … 2608 2633 if (!pvDst) 2609 2634 { 2610 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);2611 AssertLogRelMsgReturn(RT_SUCCESS(rc ),2635 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck); 2636 AssertLogRelMsgReturn(RT_SUCCESS(rcStrict), 2612 2637 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", 2613 GCPhys, pPage, rc), rc);2638 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 2614 2639 } 2615 2640 … … 2634 2659 if (fMoreVirt && !pVirt) 2635 2660 { 2636 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);2637 if ( RT_SUCCESS(rc))2661 pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iVirtPage); 2662 if (pVirt) 2638 2663 { 2639 2664 offVirt = 0; … … 2696 2721 * Handle access to space without handlers (that's easy). 2697 2722 */ 2698 rc = VINF_PGM_HANDLER_DO_DEFAULT;2723 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT; 2699 2724 uint32_t cbRange = (uint32_t)cbWrite; 2700 2725 if (offPhys && offVirt) … … 2724 2749 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2725 2750 pgmUnlock(pVM); 2726 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);2751 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2727 2752 pgmLock(pVM); 2728 2753 # ifdef VBOX_WITH_STATISTICS … … 2733 2758 pPhys = NULL; /* might not be valid anymore. */ 2734 2759 # endif 2735 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : "")); 2760 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT, 2761 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2762 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, (pPhys) ? pPhys->pszDesc : "")); 2736 2763 #else 2737 2764 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */ … … 2761 2788 + (GCPhys & PAGE_OFFSET_MASK); 2762 2789 STAM_PROFILE_START(&pVirt->Stat, h); 2763 rc = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,2764 enmOrigin, pVirt->CTX_SUFF(pvUser));2790 rcStrict = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, 2791 enmOrigin, pVirt->CTX_SUFF(pvUser)); 2765 2792 STAM_PROFILE_STOP(&pVirt->Stat, h); 2766 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc)); 2793 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT, 2794 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2795 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pVirt->pszDesc)); 2767 2796 } 2768 2797 pVirt = NULL; … … 2799 2828 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2800 2829 pgmUnlock(pVM); 2801 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);2830 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2802 2831 pgmLock(pVM); 2803 2832 # ifdef VBOX_WITH_STATISTICS … … 2808 2837 pPhys = NULL; /* might not be valid anymore. */ 2809 2838 # endif 2810 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : "")); 2839 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT, 2840 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2841 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, (pPhys) ? pPhys->pszDesc : "")); 2811 2842 if (pVirtType->pfnHandlerR3) 2812 2843 { … … 2816 2847 + (GCPhys & PAGE_OFFSET_MASK); 2817 2848 STAM_PROFILE_START(&pVirt->Stat, h2); 2818 int rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,2819 enmOrigin, pVirt->CTX_SUFF(pvUser));2849 VBOXSTRICTRC rcStrict2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, 2850 PGMACCESSTYPE_WRITE, enmOrigin, pVirt->CTX_SUFF(pvUser)); 2820 2851 STAM_PROFILE_STOP(&pVirt->Stat, h2); 2821 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT) 2822 rc = VINF_SUCCESS; 2823 else 2824 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc)); 2852 if (rcStrict2 == VINF_SUCCESS) 2853 rcStrict = rcStrict == VINF_PGM_HANDLER_DO_DEFAULT ? VINF_SUCCESS : rcStrict; 2854 else if (rcStrict2 != VINF_PGM_HANDLER_DO_DEFAULT) 2855 { 2856 AssertLogRelMsgFailed(("rcStrict2=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", 2857 VBOXSTRICTRC_VAL(rcStrict2), GCPhys, pPage, pVirt->pszDesc)); 2858 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict2 < rcStrict) 2859 rcStrict = rcStrict2; 2860 } 2825 2861 } 2826 2862 pPhys = NULL; … … 2834 2870 #endif 2835 2871 } 2836 if (rc == VINF_PGM_HANDLER_DO_DEFAULT) 2872 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT) 2873 { 2837 2874 memcpy(pvDst, pvBuf, cbRange); 2875 rcStrict = VINF_SUCCESS; 2876 } 2838 2877 2839 2878 /* 2840 2879 * Advance if we've got more stuff to do. 2841 2880 */ 2842 if (cbRange >= cbWrite )2881 if (cbRange >= cbWrite || rcStrict != VINF_SUCCESS) 2843 2882 { 2844 2883 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2845 return VINF_SUCCESS;2884 return rcStrict; 2846 2885 } 2847 2886 … … 2908 2947 2909 2948 /* 2910 * Any active WRITE or ALL access handlers?2949 * Normal page? Get the pointer to it. 2911 2950 */ 2912 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)2913 ||PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))2951 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) 2952 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) 2914 2953 { 2915 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);2916 if (RT_FAILURE(rc))2917 {2918 pgmUnlock(pVM);2919 return rc;2920 }2921 }2922 else2923 {2924 /*2925 * Get the pointer to the page.2926 */2927 2954 PGMPAGEMAPLOCK PgMpLck; 2928 2955 void *pvDst; … … 2938 2965 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", 2939 2966 pRam->GCPhys + off, pPage, rc)); 2967 } 2968 /* 2969 * Active WRITE or ALL access handlers. 2970 */ 2971 else 2972 { 2973 VBOXSTRICTRC rcStrict = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin); 2974 if (rcStrict != VINF_SUCCESS) 2975 { 2976 pgmUnlock(pVM); 2977 return VBOXSTRICTRC_TODO(rcStrict); 2978 } 2940 2979 } 2941 2980 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r55910 r55966 1055 1055 * @param pvUser User argument. 1056 1056 */ 1057 DECLEXPORT( int) pgmPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,1058 RTGCPHYS GCPhysFault, void *pvUser)1057 DECLEXPORT(VBOXSTRICTRC) pgmPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 1058 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 1059 1059 { 1060 1060 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a); … … 1370 1370 * @param pvUser User argument. 1371 1371 */ 1372 PGM_ALL_CB2_DECL(int) pgmPoolAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 1373 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1372 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 1373 pgmPoolAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 1374 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1374 1375 { 1375 1376 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
Note:
See TracChangeset
for help on using the changeset viewer.