Changeset 92162 in vbox for trunk/src/VBox
- Timestamp:
- Oct 31, 2021 11:34:31 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 147985
- Location:
- trunk/src/VBox
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r91930 r92162 309 309 { 310 310 AssertMsg(offVRAM < pThis->vram_size, ("offVRAM = %p, pThis->vram_size = %p\n", offVRAM, pThis->vram_size)); 311 ASMBitSet(&pThis-> au32DirtyBitmap[0], offVRAM >> PAGE_SHIFT);311 ASMBitSet(&pThis->bmDirtyBitmap[0], offVRAM >> PAGE_SHIFT); 312 312 pThis->fHasDirtyBits = true; 313 313 } … … 324 324 { 325 325 AssertMsg(offVRAM < pThis->vram_size, ("offVRAM = %p, pThis->vram_size = %p\n", offVRAM, pThis->vram_size)); 326 return ASMBitTest(&pThis-> au32DirtyBitmap[0], offVRAM >> PAGE_SHIFT);326 return ASMBitTest(&pThis->bmDirtyBitmap[0], offVRAM >> PAGE_SHIFT); 327 327 } 328 328 329 329 #ifdef IN_RING3 330 330 331 /** 331 332 * Reset dirty flags in a give range. … … 340 341 Assert(offVRAMEnd <= pThis->vram_size); 341 342 Assert(offVRAMStart < offVRAMEnd); 342 ASMBitClearRange(&pThis->au32DirtyBitmap[0], offVRAMStart >> PAGE_SHIFT, offVRAMEnd >> PAGE_SHIFT); 343 } 343 ASMBitClearRange(&pThis->bmDirtyBitmap[0], offVRAMStart >> PAGE_SHIFT, offVRAMEnd >> PAGE_SHIFT); 344 } 345 346 347 /** 348 * Queries the VRAM dirty bits and resets the monitoring. 349 */ 350 static void vgaR3UpdateDirtyBitsAndResetMonitoring(PPDMDEVINS pDevIns, PVGASTATE pThis) 351 { 352 size_t const cbBitmap = RT_ALIGN_Z(RT_MIN(pThis->vram_size, VGA_VRAM_MAX), PAGE_SIZE * 64) / PAGE_SIZE / 8; 353 354 /* 355 * If we don't have any dirty bits from MMIO accesses, we can just query 356 * straight into the dirty buffer. 357 */ 358 if (!pThis->fHasDirtyBits) 359 { 360 int rc = PDMDevHlpMmio2QueryAndResetDirtyBitmap(pDevIns, pThis->hMmio2VRam, pThis->bmDirtyBitmap, cbBitmap); 361 AssertRC(rc); 362 } 363 /* 364 * Otherwise we'll have to query and merge the two. 365 */ 366 else 367 { 368 uint64_t bmDirtyPages[VGA_VRAM_MAX / PAGE_SIZE / 64]; /* (256 MB VRAM -> 8KB bitmap) */ 369 int rc = PDMDevHlpMmio2QueryAndResetDirtyBitmap(pDevIns, pThis->hMmio2VRam, bmDirtyPages, cbBitmap); 370 if (RT_SUCCESS(rc)) 371 { 372 /** @todo could use ORPS or VORPS here, I think. */ 373 uint64_t *pbmDst = pThis->bmDirtyBitmap; 374 size_t const cTodo = cbBitmap / sizeof(uint64_t); 375 376 /* Do 64 bytes at a time first. */ 377 size_t const cTodoFirst = cTodo & ~(size_t)7; 378 size_t idx; 379 for (idx = 0; idx < cTodoFirst; idx += 8) 380 { 381 pbmDst[idx + 0] |= bmDirtyPages[idx + 0]; 382 pbmDst[idx + 1] |= bmDirtyPages[idx + 1]; 383 pbmDst[idx + 2] |= bmDirtyPages[idx + 2]; 384 pbmDst[idx + 3] |= bmDirtyPages[idx + 3]; 385 pbmDst[idx + 4] |= bmDirtyPages[idx + 4]; 386 pbmDst[idx + 5] |= bmDirtyPages[idx + 5]; 387 pbmDst[idx + 6] |= bmDirtyPages[idx + 6]; 388 pbmDst[idx + 7] |= bmDirtyPages[idx + 7]; 389 } 390 391 /* Then do a mopup of anything remaining. */ 392 switch (cTodo - idx) 393 { 394 case 7: pbmDst[idx + 6] |= bmDirtyPages[idx + 6]; RT_FALL_THRU(); 395 case 6: pbmDst[idx + 5] |= bmDirtyPages[idx + 5]; RT_FALL_THRU(); 396 case 5: pbmDst[idx + 4] |= bmDirtyPages[idx + 4]; RT_FALL_THRU(); 397 case 4: pbmDst[idx + 3] |= bmDirtyPages[idx + 3]; RT_FALL_THRU(); 398 case 3: pbmDst[idx + 2] |= bmDirtyPages[idx + 2]; RT_FALL_THRU(); 399 case 2: pbmDst[idx + 1] |= bmDirtyPages[idx + 1]; RT_FALL_THRU(); 400 case 1: pbmDst[idx] |= bmDirtyPages[idx]; break; 401 case 0: break; 402 default: AssertFailedBreak(); 403 } 404 405 pThis->fHasDirtyBits = false; 406 } 407 } 408 } 409 344 410 #endif /* IN_RING3 */ 345 411 … … 2480 2546 /* round up page_max by one page, as otherwise this can be -PAGE_SIZE, 2481 2547 * which causes assertion trouble in vgaR3ResetDirty. */ 2482 page_max = ( pThis->start_addr * 4 + pThis->line_offset * pThis->last_scr_height 2483 - 1 + PAGE_SIZE) & ~PAGE_OFFSET_MASK; 2548 page_max = (pThis->start_addr * 4 + pThis->line_offset * pThis->last_scr_height - 1 + PAGE_SIZE) & ~PAGE_OFFSET_MASK; 2484 2549 vgaR3ResetDirty(pThis, page_min, page_max + PAGE_SIZE); 2485 2550 } … … 2516 2581 2517 2582 /** 2518 * Worker for vgaR3PortUpdateDisplay(), v boxR3UpdateDisplayAllInternal() and2583 * Worker for vgaR3PortUpdateDisplay(), vgaR3UpdateDisplayAllInternal() and 2519 2584 * vgaR3PortTakeScreenshot(). 2520 2585 */ … … 3614 3679 3615 3680 3616 /**3617 * Handle LFB access.3618 *3619 * @returns Strict VBox status code.3620 * @param pVM VM handle.3621 * @param pDevIns The device instance.3622 * @param pThis The shared VGA instance data.3623 * @param GCPhys The access physical address.3624 * @param GCPtr The access virtual address (only GC).3625 */3626 static VBOXSTRICTRC vgaLFBAccess(PVMCC pVM, PPDMDEVINS pDevIns, PVGASTATE pThis, RTGCPHYS GCPhys, RTGCPTR GCPtr)3627 {3628 RT_NOREF(pVM);3629 3630 VBOXSTRICTRC rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_EM_RAW_EMULATE_INSTR);3631 if (rc == VINF_SUCCESS)3632 {3633 /*3634 * Set page dirty bit.3635 */3636 vgaR3MarkDirty(pThis, GCPhys - pThis->GCPhysVRAM);3637 pThis->fLFBUpdated = true;3638 3639 /*3640 * Turn of the write handler for this particular page and make it R/W.3641 * Then return telling the caller to restart the guest instruction.3642 * ASSUME: the guest always maps video memory RW.3643 */3644 rc = PDMDevHlpPGMHandlerPhysicalPageTempOff(pDevIns, pThis->GCPhysVRAM, GCPhys);3645 if (RT_SUCCESS(rc))3646 {3647 #ifndef IN_RING33648 rc = PGMShwMakePageWritable(PDMDevHlpGetVMCPU(pDevIns), GCPtr,3649 PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);3650 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);3651 AssertMsgReturn( rc == VINF_SUCCESS3652 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */3653 || rc == VERR_PAGE_TABLE_NOT_PRESENT3654 || rc == VERR_PAGE_NOT_PRESENT,3655 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, VBOXSTRICTRC_VAL(rc)),3656 rc);3657 #else /* IN_RING3 - We don't have any virtual page address of the access here. */3658 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);3659 Assert(GCPtr == 0);3660 RT_NOREF1(GCPtr);3661 #endif3662 return VINF_SUCCESS;3663 }3664 3665 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);3666 AssertMsgFailed(("PGMHandlerPhysicalPageTempOff -> rc=%d\n", VBOXSTRICTRC_VAL(rc)));3667 }3668 return rc;3669 }3670 3671 3672 #ifndef IN_RING33673 /**3674 * @callback_method_impl{FNPGMRCPHYSHANDLER, \#PF Handler for VBE LFB access.}3675 */3676 PDMBOTHCBDECL(VBOXSTRICTRC) vgaLbfAccessPfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,3677 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)3678 {3679 PPDMDEVINS pDevIns = (PPDMDEVINS)pvUser;3680 PVGASTATE pThis = PDMDEVINS_2_DATA(pDevIns, PVGASTATE);3681 //PVGASTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVGASTATECC);3682 Assert(GCPhysFault >= pThis->GCPhysVRAM);3683 AssertMsg(uErrorCode & X86_TRAP_PF_RW, ("uErrorCode=%#x\n", uErrorCode));3684 RT_NOREF3(pVCpu, pRegFrame, uErrorCode);3685 3686 return vgaLFBAccess(pVM, pDevIns, pThis, GCPhysFault, pvFault);3687 }3688 #endif /* !IN_RING3 */3689 3690 3691 /**3692 * @callback_method_impl{FNPGMPHYSHANDLER,3693 * VBE LFB write access handler for the dirty tracking.}3694 */3695 PGM_ALL_CB_DECL(VBOXSTRICTRC) vgaLFBAccessHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys,3696 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,3697 PGMACCESSORIGIN enmOrigin, void *pvUser)3698 {3699 PPDMDEVINS pDevIns = (PPDMDEVINS)pvUser;3700 PVGASTATE pThis = PDMDEVINS_2_DATA(pDevIns, PVGASTATE);3701 //PVGASTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVGASTATECC);3702 Assert(GCPhys >= pThis->GCPhysVRAM);3703 RT_NOREF(pVCpu, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);3704 3705 VBOXSTRICTRC rc = vgaLFBAccess(pVM, pDevIns, pThis, GCPhys, 0);3706 if (rc == VINF_SUCCESS)3707 rc = VINF_PGM_HANDLER_DO_DEFAULT;3708 #ifdef IN_RING33709 else3710 AssertMsg(rc < VINF_SUCCESS, ("rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));3711 #endif3712 return rc;3713 }3714 3715 3716 3681 /* -=-=-=-=-=- All rings: VGA BIOS I/Os -=-=-=-=-=- */ 3717 3682 … … 4785 4750 4786 4751 STAM_COUNTER_INC(&pThis->StatUpdateDisp); 4787 if (pThis->fHasDirtyBits && pThis->GCPhysVRAM && pThis->GCPhysVRAM != NIL_RTGCPHYS) 4788 { 4789 PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM); 4790 pThis->fHasDirtyBits = false; 4791 } 4752 4753 if (pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS) 4754 vgaR3UpdateDirtyBitsAndResetMonitoring(pDevIns, pThis); 4755 4792 4756 if (pThis->fRemappedVGA) 4793 4757 { … … 4806 4770 * Internal vgaR3PortUpdateDisplayAll worker called under pThis->CritSect. 4807 4771 */ 4808 /** @todo Why the 'vboxR3' prefix? */ 4809 static int vboxR3UpdateDisplayAllInternal(PPDMDEVINS pDevIns, PVGASTATE pThis, PVGASTATECC pThisCC, bool fFailOnResize) 4772 static int vgaR3UpdateDisplayAllInternal(PPDMDEVINS pDevIns, PVGASTATE pThis, PVGASTATECC pThisCC, bool fFailOnResize) 4810 4773 { 4811 4774 # ifdef VBOX_WITH_VMSVGA … … 4814 4777 # endif 4815 4778 { 4816 /* The dirty bits array has been just cleared, reset handlers as well. */ 4817 if (pThis->GCPhysVRAM && pThis->GCPhysVRAM != NIL_RTGCPHYS) 4818 PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM); 4819 } 4779 /* Update the dirty bits. */ 4780 if (pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS) 4781 vgaR3UpdateDirtyBitsAndResetMonitoring(pDevIns, pThis); 4782 } 4783 4820 4784 if (pThis->fRemappedVGA) 4821 4785 { … … 4849 4813 AssertRCReturn(rc, rc); 4850 4814 4851 rc = v boxR3UpdateDisplayAllInternal(pDevIns, pThis, pThisCC, fFailOnResize);4815 rc = vgaR3UpdateDisplayAllInternal(pDevIns, pThis, pThisCC, fFailOnResize); 4852 4816 4853 4817 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); … … 4926 4890 /* 4927 4891 * Get screenshot. This function will fail if a resize is required. 4928 * So there is not need to do a 'v boxR3UpdateDisplayAllInternal' before taking screenshot.4892 * So there is not need to do a 'vgaR3UpdateDisplayAllInternal' before taking screenshot. 4929 4893 */ 4930 4894 … … 5500 5464 int vgaR3RegisterVRAMHandler(PPDMDEVINS pDevIns, PVGASTATE pThis, uint64_t cbFrameBuffer) 5501 5465 { 5502 Assert(pThis->GCPhysVRAM); 5503 int rc = PDMDevHlpPGMHandlerPhysicalRegister(pDevIns, 5504 pThis->GCPhysVRAM, pThis->GCPhysVRAM + (cbFrameBuffer - 1), 5505 pThis->hLfbAccessHandlerType, pDevIns, pDevIns->pDevInsR0RemoveMe, 5506 pDevIns->pDevInsForRC, "VGA LFB"); 5507 5466 Assert(pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS); 5467 int rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, true /*fEnabled*/); 5468 RT_NOREF(cbFrameBuffer); 5508 5469 AssertRC(rc); 5509 5470 return rc; … … 5516 5477 int vgaR3UnregisterVRAMHandler(PPDMDEVINS pDevIns, PVGASTATE pThis) 5517 5478 { 5518 Assert(pThis->GCPhysVRAM );5519 int rc = PDMDevHlp PGMHandlerPhysicalDeregister(pDevIns, pThis->GCPhysVRAM);5479 Assert(pThis->GCPhysVRAM != 0 && pThis->GCPhysVRAM != NIL_RTGCPHYS); 5480 int rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, false /*fEnabled*/); 5520 5481 AssertRC(rc); 5521 5482 return rc; … … 5557 5518 { 5558 5519 /* 5559 * Mapping the VRAM. 5520 * Make sure the dirty page tracking state is up to date before mapping it. 5521 */ 5522 # ifdef VBOX_WITH_VMSVGA 5523 rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, 5524 !pThis->svga.fEnabled ||(pThis->svga.fEnabled && pThis->svga.fVRAMTracking)); 5525 # else 5526 rc = PDMDevHlpMmio2ControlDirtyPageTracking(pDevIns, pThis->hMmio2VRam, true /*fEnabled*/); 5527 # endif 5528 AssertLogRelRC(rc); 5529 5530 /* 5531 * Map the VRAM. 5560 5532 */ 5561 5533 rc = PDMDevHlpMmio2Map(pDevIns, pThis->hMmio2VRam, GCPhysAddress); … … 5563 5535 if (RT_SUCCESS(rc)) 5564 5536 { 5565 # ifdef VBOX_WITH_VMSVGA5566 if ( !pThis->svga.fEnabled5567 || ( pThis->svga.fEnabled5568 && pThis->svga.fVRAMTracking5569 )5570 )5571 # endif5572 {5573 rc = PDMDevHlpPGMHandlerPhysicalRegister(pDevIns, GCPhysAddress, GCPhysAddress + (pThis->vram_size - 1),5574 pThis->hLfbAccessHandlerType, pDevIns, pDevIns->pDevInsR0RemoveMe,5575 pDevIns->pDevInsForRC, "VGA LFB");5576 AssertLogRelRC(rc);5577 }5578 5579 5537 pThis->GCPhysVRAM = GCPhysAddress; 5580 5538 pThis->vbe_regs[VBE_DISPI_INDEX_FB_BASE_HI] = GCPhysAddress >> 16; … … 5587 5545 /* 5588 5546 * Unmapping of the VRAM in progress (caller will do that). 5589 * Deregister the access handler so PGM doesn't get upset.5590 5547 */ 5591 5548 Assert(pThis->GCPhysVRAM); 5592 # ifdef VBOX_WITH_VMSVGA5593 if ( !pThis->svga.fEnabled5594 || ( pThis->svga.fEnabled5595 && pThis->svga.fVRAMTracking5596 )5597 )5598 # endif5599 {5600 rc = PDMDevHlpPGMHandlerPhysicalDeregister(pDevIns, pThis->GCPhysVRAM);5601 AssertRC(rc);5602 }5603 # ifdef VBOX_WITH_VMSVGA5604 else5605 rc = VINF_SUCCESS;5606 # endif5607 5549 pThis->GCPhysVRAM = 0; 5550 rc = VINF_SUCCESS; 5608 5551 /* NB: VBE_DISPI_INDEX_FB_BASE_HI is left unchanged here. */ 5609 5552 } … … 6049 5992 * Reset the LFB mapping. 6050 5993 */ 6051 pThis->fLFBUpdated = false; 6052 if ( ( pDevIns->fRCEnabled 6053 || pDevIns->fR0Enabled) 6054 && pThis->GCPhysVRAM 6055 && pThis->GCPhysVRAM != NIL_RTGCPHYS) 6056 { 6057 int rc = PDMDevHlpPGMHandlerPhysicalReset(pDevIns, pThis->GCPhysVRAM); 5994 if ( ( pDevIns->fRCEnabled 5995 || pDevIns->fR0Enabled) 5996 && pThis->GCPhysVRAM != 0 5997 && pThis->GCPhysVRAM != NIL_RTGCPHYS) 5998 { 5999 /** @todo r=bird: This used to be a PDMDevHlpPGMHandlerPhysicalReset call. 6000 * Not quite sure if it was/is needed. Besides, where do we reset the 6001 * dirty bitmap (bmDirtyBitmap)? */ 6002 int rc = PDMDevHlpMmio2ResetDirtyBitmap(pDevIns, pThis->hMmio2VRam); 6058 6003 AssertRC(rc); 6059 6004 } … … 6606 6551 */ 6607 6552 rc = PDMDevHlpPCIIORegionCreateMmio2Ex(pDevIns, pThis->pciRegions.iVRAM, pThis->vram_size, 6608 PCI_ADDRESS_SPACE_MEM_PREFETCH, 0 /*fFlags*/, vgaR3PciIORegionVRamMapUnmap,6609 "VRam", (void **)&pThisCC->pbVRam, &pThis->hMmio2VRam);6553 PCI_ADDRESS_SPACE_MEM_PREFETCH, PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES, 6554 vgaR3PciIORegionVRamMapUnmap, "VRam", (void **)&pThisCC->pbVRam, &pThis->hMmio2VRam); 6610 6555 AssertLogRelRCReturn(rc, PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 6611 6556 N_("Failed to allocate %u bytes of VRAM"), pThis->vram_size)); 6612 6613 /*6614 * Register access handler types for tracking dirty VRAM pages.6615 */6616 rc = PDMDevHlpPGMHandlerPhysicalTypeRegister(pDevIns, PGMPHYSHANDLERKIND_WRITE,6617 vgaLFBAccessHandler,6618 "vgaLFBAccessHandler", "vgaLbfAccessPfHandler",6619 "vgaLFBAccessHandler", "vgaLbfAccessPfHandler",6620 "VGA LFB", &pThis->hLfbAccessHandlerType);6621 AssertRCReturn(rc, rc);6622 6557 6623 6558 /* -
trunk/src/VBox/Devices/Graphics/DevVGA.h
r87105 r92162 349 349 uint32_t cMilliesRefreshInterval; 350 350 /** Bitmap tracking dirty pages. */ 351 uint 32_t au32DirtyBitmap[VGA_VRAM_MAX / PAGE_SIZE / 32];351 uint64_t bmDirtyBitmap[VGA_VRAM_MAX / PAGE_SIZE / 64]; 352 352 353 353 /** Flag indicating that there are dirty bits. This is used to optimize the handler resetting. */ 354 354 bool fHasDirtyBits; 355 /** LFB was updated flag. */356 bool fLFBUpdated;357 355 /** Flag indicating that the VGA memory in the 0xa0000-0xbffff region has been remapped to allow direct access. */ 358 356 bool fRemappedVGA; … … 369 367 bool fVMSVGAPciId; 370 368 bool fVMSVGAPciBarLayout; 371 bool Padding4[ 2];369 bool Padding4[3]; 372 370 #else 373 bool Padding4[4+ 2];371 bool Padding4[4+3]; 374 372 #endif 375 373 … … 382 380 #endif 383 381 } pciRegions; 384 385 /** Physical access type for the linear frame buffer dirty page tracking. */386 PGMPHYSHANDLERTYPE hLfbAccessHandlerType;387 382 388 383 /** The physical address the VRAM was assigned. */ -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r92046 r92162 312 312 if (pCurType->CTX_SUFF(pfnPfHandler)) 313 313 { 314 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);315 void *pvUser = pCur->CTX_SUFF(pvUser);316 317 314 STAM_PROFILE_START(&pCur->Stat, h); 318 if (pCur->hType != pPool->hAccessHandlerType) 315 316 if (pCurType->fKeepPgmLock) 319 317 { 318 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, 319 pCur->CTX_SUFF(pvUser)); 320 321 # ifdef VBOX_WITH_STATISTICS 322 pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault); /* paranoia in case the handler deregistered itself */ 323 if (pCur) 324 STAM_PROFILE_STOP(&pCur->Stat, h); 325 # endif 326 } 327 else 328 { 329 void * const pvUser = pCur->CTX_SUFF(pvUser); 320 330 PGM_UNLOCK(pVM); 321 331 *pfLockTaken = false; 332 333 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser); 334 335 # ifdef VBOX_WITH_STATISTICS 336 PGM_LOCK_VOID(pVM); 337 pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault); 338 if (pCur) 339 STAM_PROFILE_STOP(&pCur->Stat, h); 340 PGM_UNLOCK(pVM); 341 # endif 322 342 } 323 324 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);325 326 # ifdef VBOX_WITH_STATISTICS327 PGM_LOCK_VOID(pVM);328 pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault);329 if (pCur)330 STAM_PROFILE_STOP(&pCur->Stat, h);331 PGM_UNLOCK(pVM);332 # endif333 343 } 334 344 else -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r92157 r92162 49 49 * Internal Functions * 50 50 *********************************************************************************************************************************/ 51 static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam); 51 static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam, 52 void *pvBitmap, uint32_t offBitmap); 52 53 static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur); 53 54 static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur); … … 286 287 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core)) 287 288 { 288 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam );289 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/); 289 290 if (rc == VINF_PGM_SYNC_CR3) 290 291 rc = VINF_PGM_GCPHYS_ALIASED; … … 365 366 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because 366 367 * the guest page aliased or/and mapped by multiple PTs. FFs set. 367 * @param pVM The cross context VM structure. 368 * @param pCur The physical handler. 369 * @param pRam The RAM range. 370 */ 371 static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam) 368 * @param pVM The cross context VM structure. 369 * @param pCur The physical handler. 370 * @param pRam The RAM range. 371 * @param pvBitmap Dirty bitmap. Optional. 372 * @param offBitmap Dirty bitmap offset. 373 */ 374 static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam, 375 void *pvBitmap, uint32_t offBitmap) 372 376 { 373 377 /* … … 410 414 } 411 415 #endif 416 if (pvBitmap) 417 ASMBitSet(pvBitmap, offBitmap); 412 418 } 413 419 … … 416 422 break; 417 423 i++; 424 offBitmap++; 418 425 } 419 426 … … 905 912 * Set ram flags, flush shadow PT entries and finally tell REM about this. 906 913 */ 907 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam );914 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0); 908 915 909 916 /** @todo NEM: not sure we need this notification... */ … … 1215 1222 * Set the flags and flush shadow PT entries. 1216 1223 */ 1217 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam );1224 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/); 1218 1225 } 1219 1226 … … 1241 1248 1242 1249 PGM_UNLOCK(pVM); 1250 return rc; 1251 } 1252 1253 1254 /** 1255 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page 1256 * tracking. 1257 * 1258 * @returns VBox status code. 1259 * @param pVM The cross context VM structure. 1260 * @param GCPhys The start address of the handler region. 1261 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only 1262 * dirty bits will be set. Caller also made sure it's big 1263 * enough. 1264 * @param offBitmap Dirty bitmap offset. 1265 * @remarks Caller must own the PGM critical section. 1266 */ 1267 DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap) 1268 { 1269 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys)); 1270 PGM_LOCK_ASSERT_OWNER(pVM); 1271 1272 /* 1273 * Find the handler. 1274 */ 1275 int rc; 1276 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 1277 if (RT_LIKELY(pCur)) 1278 { 1279 /* 1280 * Validate kind. 1281 */ 1282 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur); 1283 if (pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE) 1284 { 1285 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); 1286 1287 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 1288 Assert(pRam); 1289 Assert(pRam->GCPhys <= pCur->Core.Key); 1290 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast); 1291 1292 /* 1293 * Set the flags and flush shadow PT entries. 1294 */ 1295 if (pCur->cTmpOffPages > 0) 1296 { 1297 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap); 1298 pCur->cTmpOffPages = 0; 1299 } 1300 else 1301 rc = VINF_SUCCESS; 1302 } 1303 else 1304 { 1305 AssertFailed(); 1306 rc = VERR_WRONG_TYPE; 1307 } 1308 } 1309 else 1310 { 1311 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys)); 1312 rc = VERR_PGM_HANDLER_NOT_FOUND; 1313 } 1314 1243 1315 return rc; 1244 1316 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r91855 r92162 322 322 } 323 323 } 324 } 325 326 327 /** 328 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler. 329 */ 330 static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uintptr_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr) 331 { 332 /* 333 * Get the MMIO2 range. 334 */ 335 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3); 336 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3); 337 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1]; 338 Assert(pMmio2->idMmio2 == hMmio2); 339 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, 340 VERR_INTERNAL_ERROR_4); 341 342 /* 343 * Get the page and make sure it's an MMIO2 page. 344 */ 345 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 346 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR); 347 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR); 348 349 /* 350 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty. 351 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single 352 * page is dirty, saving the need for additional storage (bitmap).) 353 */ 354 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY; 355 356 /* 357 * Disable the handler for this page. 358 */ 359 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK); 360 AssertRC(rc); 361 #ifndef IN_RING3 362 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0) 363 { 364 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT); 365 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc); 366 } 367 #else 368 RT_NOREF(pVCpu, GCPtr); 369 #endif 370 return VINF_SUCCESS; 371 } 372 373 374 #ifndef IN_RING3 375 /** 376 * @callback_method_impl{FNPGMRZPHYSPFHANDLER, 377 * \#PF access handler callback for guest MMIO2 dirty page tracing.} 378 * 379 * @remarks The @a pvUser is the MMIO2 index. 380 */ 381 DECLEXPORT(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 382 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 383 { 384 RT_NOREF(pVCpu, uErrorCode, pRegFrame); 385 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */ 386 if (RT_SUCCESS(rcStrict)) 387 { 388 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhysFault, pvFault); 389 PGM_UNLOCK(pVM); 390 } 391 return rcStrict; 392 } 393 #endif /* !IN_RING3 */ 394 395 396 /** 397 * @callback_method_impl{FNPGMPHYSHANDLER, 398 * Access handler callback for MMIO2 dirty page tracing.} 399 * 400 * @remarks The @a pvUser is the MMIO2 index. 401 */ 402 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 403 pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 404 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 405 { 406 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */ 407 if (RT_SUCCESS(rcStrict)) 408 { 409 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhys, ~(RTGCPTR)0); 410 PGM_UNLOCK(pVM); 411 if (rcStrict == VINF_SUCCESS) 412 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT; 413 } 414 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin); 415 return rcStrict; 324 416 } 325 417 … … 2577 2669 if (RT_SUCCESS(rcStrict)) 2578 2670 { 2579 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler); 2580 void *pvUser = pCur->CTX_SUFF(pvUser); 2671 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur); 2672 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler); 2673 void * const pvUser = pCur->CTX_SUFF(pvUser); 2581 2674 STAM_PROFILE_START(&pCur->Stat, h); 2582 2675 2583 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2676 /* Most handlers will want to release the PGM lock for deadlock prevention 2677 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2 2678 dirty page trackers will want to keep it for performance reasons. */ 2584 2679 PGM_LOCK_ASSERT_OWNER(pVM); 2585 PGM_UNLOCK(pVM); 2586 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2587 PGM_LOCK_VOID(pVM); 2680 if (pCurType->fKeepPgmLock) 2681 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2682 else 2683 { 2684 PGM_UNLOCK(pVM); 2685 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2686 PGM_LOCK_VOID(pVM); 2687 } 2588 2688 2589 2689 #ifdef VBOX_WITH_STATISTICS … … 2694 2794 cbRange = offPhysLast + 1; 2695 2795 2696 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); 2697 void *pvUser = pPhys->CTX_SUFF(pvUser); 2796 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys); 2797 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler); 2798 void * const pvUser = pPhys->CTX_SUFF(pvUser); 2698 2799 2699 2800 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) )); 2700 2801 STAM_PROFILE_START(&pPhys->Stat, h); 2701 2802 2702 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2803 /* Most handlers will want to release the PGM lock for deadlock prevention 2804 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2 2805 dirty page trackers will want to keep it for performance reasons. */ 2703 2806 PGM_LOCK_ASSERT_OWNER(pVM); 2704 PGM_UNLOCK(pVM); 2705 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2706 PGM_LOCK_VOID(pVM); 2807 if (pCurType->fKeepPgmLock) 2808 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2809 else 2810 { 2811 PGM_UNLOCK(pVM); 2812 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser); 2813 PGM_LOCK_VOID(pVM); 2814 } 2707 2815 2708 2816 #ifdef VBOX_WITH_STATISTICS -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r91263 r92162 172 172 { 173 173 PVMCPU pVCpu0 = pVM->apCpusR3[0]; 174 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, iemVmxApicAccessPageHandler, 174 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, false /*fKeepPgmLock*/, 175 iemVmxApicAccessPageHandler, 175 176 NULL /* pszModR0 */, 176 177 "iemVmxApicAccessPageHandler", NULL /* pszPfHandlerR0 */, -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r82968 r92162 159 159 * Register the MMIO access handler type. 160 160 */ 161 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, 161 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, false /*fKeepPgmLock*/, 162 162 iomMmioHandlerNew, 163 163 NULL, "iomMmioHandlerNew", "iomMmioPfHandlerNew", -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r92071 r92162 348 348 PDMDEV_ASSERT_DEVINS(pDevIns); 349 349 PVM pVM = pDevIns->Internal.s.pVMR3; 350 LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX6 r\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));350 LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion)); 351 351 VM_ASSERT_EMT0_RETURN(pVM, NIL_RTGCPHYS); 352 352 … … 356 356 return GCPhys; 357 357 } 358 359 360 /** @interface_method_impl{PDMDEVHLPR3,pfnMmio2QueryAndResetDirtyBitmap} */ 361 static DECLCALLBACK(int) pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, 362 void *pvBitmap, size_t cbBitmap) 363 { 364 PDMDEV_ASSERT_DEVINS(pDevIns); 365 PVM pVM = pDevIns->Internal.s.pVMR3; 366 LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: hRegion=%#RX64 pvBitmap=%p cbBitmap=%#zx\n", 367 pDevIns->pReg->szName, pDevIns->iInstance, hRegion, pvBitmap, cbBitmap)); 368 369 int rc = PGMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pDevIns, hRegion, pvBitmap, cbBitmap); 370 371 LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 372 return rc; 373 } 374 375 376 /** @interface_method_impl{PDMDEVHLPR3,pfnMmio2ControlDirtyPageTracking} */ 377 static DECLCALLBACK(int) pdmR3DevHlp_Mmio2ControlDirtyPageTracking(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, bool fEnabled) 378 { 379 PDMDEV_ASSERT_DEVINS(pDevIns); 380 PVM pVM = pDevIns->Internal.s.pVMR3; 381 LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: hRegion=%#RX64 fEnabled=%RTbool\n", 382 pDevIns->pReg->szName, pDevIns->iInstance, hRegion, fEnabled)); 383 384 int rc = PGMR3PhysMmio2ControlDirtyPageTracking(pVM, pDevIns, hRegion, fEnabled); 385 386 LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 387 return rc; 388 } 389 358 390 359 391 /** … … 364 396 PDMDEV_ASSERT_DEVINS(pDevIns); 365 397 PVM pVM = pDevIns->Internal.s.pVMR3; 366 LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX6 riNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion));398 LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX64 iNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion)); 367 399 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 368 400 … … 841 873 pszDesc, pszDesc, phType)); 842 874 843 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, pfnHandlerR3,875 int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind, false /*fKeepPgmLock*/, pfnHandlerR3, 844 876 pDevIns->pReg->pszR0Mod, pszHandlerR0, pszPfHandlerR0, 845 877 pDevIns->pReg->pszRCMod, pszHandlerRC, pszPfHandlerRC, … … 4799 4831 pdmR3DevHlp_Mmio2Reduce, 4800 4832 pdmR3DevHlp_Mmio2GetMappingAddress, 4833 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 4834 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 4801 4835 pdmR3DevHlp_Mmio2ChangeRegionNo, 4802 4836 pdmR3DevHlp_MmioMapMmio2Page, … … 5193 5227 pdmR3DevHlp_Mmio2Reduce, 5194 5228 pdmR3DevHlp_Mmio2GetMappingAddress, 5229 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 5230 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 5195 5231 pdmR3DevHlp_Mmio2ChangeRegionNo, 5196 5232 pdmR3DevHlp_MmioMapMmio2Page, … … 5901 5937 pdmR3DevHlp_Mmio2Reduce, 5902 5938 pdmR3DevHlp_Mmio2GetMappingAddress, 5939 pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap, 5940 pdmR3DevHlp_Mmio2ControlDirtyPageTracking, 5903 5941 pdmR3DevHlp_Mmio2ChangeRegionNo, 5904 5942 pdmR3DevHlp_MmioMapMmio2Page, -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r92046 r92162 950 950 */ 951 951 if (RT_SUCCESS(rc)) 952 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 952 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */ 953 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, false /*fKeepPgmLock*/, 953 954 pgmPhysRomWriteHandler, 954 955 NULL, NULL, "pgmPhysRomWritePfHandler", … … 956 957 "ROM write protection", 957 958 &pVM->pgm.s.hRomPhysHandlerType); 959 960 /* 961 * Register the physical access handler doing dirty MMIO2 tracing. 962 */ 963 if (RT_SUCCESS(rc)) 964 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/, 965 pgmPhysMmio2WriteHandler, 966 NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler", 967 NULL, "pgmPhysMmio2WriteHandler", "pgmPhysMmio2WritePfHandler", 968 "MMIO2 dirty page tracing", 969 &pVM->pgm.s.hMmio2DirtyPhysHandlerType); 958 970 959 971 /* -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r91848 r92162 65 65 * @param pVM The cross context VM structure. 66 66 * @param enmKind The kind of access handler. 67 * @param fKeepPgmLock Whether to hold the PGM lock while calling the 68 * handler or not. Mainly for PGM callers. 67 69 * @param pfnHandlerR3 Pointer to the ring-3 handler callback. 68 70 * @param pfnHandlerR0 Pointer to the ring-0 handler callback. … … 73 75 * safe). 74 76 */ 75 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, 77 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegisterEx(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock, 76 78 PFNPGMPHYSHANDLER pfnHandlerR3, 77 79 R0PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR0, … … 97 99 pType->uState = enmKind == PGMPHYSHANDLERKIND_WRITE 98 100 ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL; 101 pType->fKeepPgmLock = fKeepPgmLock; 99 102 pType->pfnHandlerR3 = pfnHandlerR3; 100 103 pType->pfnHandlerR0 = pfnHandlerR0; … … 122 125 * @param pVM The cross context VM structure. 123 126 * @param enmKind The kind of access handler. 127 * @param fKeepPgmLock Whether to hold the PGM lock while calling the 128 * handler or not. Mainly for PGM callers. 124 129 * @param pfnHandlerR3 Pointer to the ring-3 handler callback. 125 130 * @param pszModR0 The name of the ring-0 module, NULL is an alias for … … 139 144 * safe). 140 145 */ 141 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, 146 VMMR3DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, bool fKeepPgmLock, 142 147 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3, 143 148 const char *pszModR0, const char *pszHandlerR0, const char *pszPfHandlerR0, … … 194 199 } 195 200 if (RT_SUCCESS(rc)) 196 return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, pfnHandlerR3,201 return PGMR3HandlerPhysicalTypeRegisterEx(pVM, enmKind, fKeepPgmLock, pfnHandlerR3, 197 202 pfnHandlerR0, pfnPfHandlerR0, pszDesc, phType); 198 203 } -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r92157 r92162 2685 2685 2686 2686 2687 2688 /********************************************************************************************************************************* 2689 * MMIO2 * 2690 *********************************************************************************************************************************/ 2691 2687 2692 /** 2688 2693 * Locate a MMIO2 range. … … 2738 2743 2739 2744 /** 2745 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map. 2746 */ 2747 static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2) 2748 { 2749 int rc = VINF_SUCCESS; 2750 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3) 2751 { 2752 Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)); 2753 int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys, 2754 pCurMmio2->RamRange.GCPhysLast); 2755 AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast, 2756 pCurMmio2->RamRange.pszDesc, rc2)); 2757 if (RT_SUCCESS(rc2)) 2758 pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING; 2759 else if (RT_SUCCESS(rc)) 2760 rc = rc2; 2761 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2762 return rc; 2763 } 2764 AssertFailed(); 2765 return rc; 2766 } 2767 2768 2769 /** 2770 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap. 2771 */ 2772 static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2) 2773 { 2774 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3) 2775 { 2776 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING) 2777 { 2778 int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3); 2779 AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast, 2780 pCurMmio2->RamRange.pszDesc, rc2)); 2781 pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING; 2782 } 2783 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2784 return VINF_SUCCESS; 2785 } 2786 AssertFailed(); 2787 return VINF_SUCCESS; 2788 2789 } 2790 2791 2792 /** 2740 2793 * Calculates the number of chunks 2741 2794 * … … 2757 2810 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving 2758 2811 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB). 2812 * 2813 * P.S. If we want to include a dirty bitmap, we'd have to drop down to 1040384 pages. 2759 2814 */ 2760 2815 uint32_t cbChunk = 16U*_1M; 2761 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */ 2762 AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2); 2816 uint32_t cPagesPerChunk = 1048000; /* max ~1048059 */ 2817 Assert(cPagesPerChunk / 64 * 64 == cPagesPerChunk); /* (NEM requirement) */ 2818 AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048000 < 16U*_1M - PAGE_SIZE * 2); 2763 2819 AssertRelease(cPagesPerChunk <= PGM_MMIO2_MAX_PAGE_COUNT); /* See above note. */ 2764 2820 AssertRelease(RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk); … … 2795 2851 * UINT8_MAX. 2796 2852 * @param cb The size of the region. Must be page aligned. 2853 * @param fFlags PGMPHYS_MMIO2_FLAGS_XXX. 2854 * @param idMmio2 The MMIO2 ID for the first chunk. 2797 2855 * @param pszDesc The description. 2798 2856 * @param ppHeadRet Where to return the pointer to the first … … 2801 2859 * @thread EMT 2802 2860 */ 2803 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, 2804 const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)2861 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, 2862 uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet) 2805 2863 { 2806 2864 /* … … 2819 2877 int rc = VINF_SUCCESS; 2820 2878 uint32_t cPagesLeft = cb >> X86_PAGE_SHIFT; 2821 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++ )2879 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++) 2822 2880 { 2823 2881 /* … … 2875 2933 //pNew->pvR3 = NULL; 2876 2934 //pNew->pNext = NULL; 2877 //pNew->fFlags = 0;2878 2935 if (iChunk == 0) 2879 2936 pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK; 2880 2937 if (iChunk + 1 == cChunks) 2881 2938 pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK; 2939 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2940 pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES; 2882 2941 pNew->iSubDev = iSubDev; 2883 2942 pNew->iRegion = iRegion; 2884 2943 pNew->idSavedState = UINT8_MAX; 2885 pNew->idMmio2 = UINT8_MAX;2944 pNew->idMmio2 = idMmio2; 2886 2945 //pNew->pPhysHandlerR3 = NULL; 2887 2946 //pNew->paLSPages = NULL; … … 2898 2957 cPagesLeft -= cPagesTrackedByChunk; 2899 2958 ppNext = &pNew->pNextR3; 2959 2960 /* 2961 * Pre-allocate a handler if we're tracking dirty pages. 2962 */ 2963 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2964 { 2965 rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, 2966 (RTR3PTR)(uintptr_t)idMmio2, idMmio2, idMmio2, pszDesc, &pNew->pPhysHandlerR3); 2967 AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2)); 2968 } 2900 2969 } 2901 2970 Assert(cPagesLeft == 0); … … 2914 2983 PPGMREGMMIO2RANGE pFree = *ppHeadRet; 2915 2984 *ppHeadRet = pFree->pNextR3; 2985 2986 if (pFree->pPhysHandlerR3) 2987 { 2988 pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3); 2989 pFree->pPhysHandlerR3 = NULL; 2990 } 2916 2991 2917 2992 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) … … 3036 3111 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3037 3112 AssertReturn(cb, VERR_INVALID_PARAMETER); 3038 AssertReturn(! fFlags, VERR_INVALID_PARAMETER);3113 AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS); 3039 3114 3040 3115 const uint32_t cPages = cb >> PAGE_SHIFT; … … 3060 3135 */ 3061 3136 unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL); 3137 3062 3138 PGM_LOCK_VOID(pVM); 3063 uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1; 3064 unsigned cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks; 3139 AssertCompile(PGM_MMIO2_MAX_RANGES < 255); 3140 uint8_t const idMmio2 = pVM->pgm.s.cMmio2Regions + 1; 3141 unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks; 3065 3142 if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES) 3066 3143 { … … 3096 3173 */ 3097 3174 PPGMREGMMIO2RANGE pNew; 3098 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, pszDesc, &pNew);3175 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew); 3099 3176 if (RT_SUCCESS(rc)) 3100 3177 { … … 3111 3188 #endif 3112 3189 pCur->RamRange.pvR3 = pbCurPages; 3113 pCur->idMmio2 = idMmio2;3114 3190 3115 3191 uint32_t iDstPage = pCur->RamRange.cb >> X86_PAGE_SHIFT; … … 3125 3201 iSrcPage += pCur->RamRange.cb >> X86_PAGE_SHIFT; 3126 3202 pbCurPages += pCur->RamRange.cb; 3127 idMmio2++;3128 3203 } 3129 3204 … … 3214 3289 3215 3290 uint8_t idMmio2 = pCur->idMmio2; 3216 if (idMmio2 != UINT8_MAX) 3291 Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)); 3292 if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)) 3217 3293 { 3218 3294 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur); … … 3234 3310 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3235 3311 rc = rc2; 3312 3313 if (pCur->pPhysHandlerR3) 3314 { 3315 pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3); 3316 pCur->pPhysHandlerR3 = NULL; 3317 } 3236 3318 3237 3319 /* we're leaking hyper memory here if done at runtime. */ … … 3517 3599 } 3518 3600 3519 #if 0 /* will be reused */ 3520 /* 3521 * Register the access handler if plain MMIO. 3601 /* 3602 * If the range have dirty page monitoring enabled, enable that. 3522 3603 * 3523 * We must register access handlers for each range since the access handler 3524 * code refuses to deal with multiple ranges (and we can). 3525 */ 3526 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3527 { 3528 AssertFailed(); 3529 int rc = VINF_SUCCESS; 3530 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3531 { 3532 Assert(!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)); 3533 rc = pgmHandlerPhysicalExRegister(pVM, pCurMmio->pPhysHandlerR3, pCurMmio->RamRange.GCPhys, 3534 pCurMmio->RamRange.GCPhysLast); 3535 if (RT_FAILURE(rc)) 3536 break; 3537 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Use this to mark that the handler is registered. */ 3538 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3539 break; 3540 } 3541 if (RT_FAILURE(rc)) 3542 { 3543 /* Almost impossible, but try clean up properly and get out of here. */ 3544 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3545 { 3546 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 3547 { 3548 pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED; 3549 pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3); 3550 } 3551 3552 if (!fRamExists) 3553 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange); 3554 else 3555 { 3556 Assert(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */ 3557 3558 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT; 3559 PPGMPAGE pPageDst = &pRam->aPages[(pCurMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 3560 while (cPagesLeft-- > 0) 3561 { 3562 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM); 3563 pPageDst++; 3564 } 3565 } 3566 3567 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS; 3568 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS; 3569 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3570 break; 3571 } 3572 3573 /** @todo NEM notification cleanup */ 3574 PGM_UNLOCK(pVM); 3575 return rc; 3576 } 3577 } 3578 #endif 3604 * We ignore failures here for now because if we fail, the whole mapping 3605 * will have to be reversed and we'll end up with nothing at all on the 3606 * screen and a grumpy guest, whereas if we just go on, we'll only have 3607 * visual distortions to gripe about. There will be something in the 3608 * release log. 3609 */ 3610 if ( pFirstMmio->pPhysHandlerR3 3611 && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3612 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio); 3579 3613 3580 3614 /* … … 3674 3708 AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER); 3675 3709 3676 #if 0 /* will be reused */ 3677 /* 3678 * If plain MMIO, we must deregister the handlers first. 3679 */ 3680 if (!(fOldFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3681 { 3682 AssertFailed(); 3683 3684 PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; 3685 rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3); 3686 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc); 3687 while (!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)) 3688 { 3689 pCurMmio = pCurMmio->pNextR3; 3690 rc = pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3); 3691 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), VERR_PGM_PHYS_MMIO_EX_IPE); 3692 } 3693 } 3694 #endif 3710 /* 3711 * If monitoring dirty pages, we must deregister the handlers first. 3712 */ 3713 if ( pFirstMmio->pPhysHandlerR3 3714 && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3715 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio); 3695 3716 3696 3717 /* … … 3842 3863 ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags), 3843 3864 rc = VERR_NOT_SUPPORTED); 3865 3866 #ifdef VBOX_WITH_PGM_NEM_MODE 3867 /* 3868 * Currently not supported for NEM in simple memory mode. 3869 */ 3870 /** @todo implement this for NEM. */ 3871 if (RT_SUCCESS(rc)) 3872 AssertLogRelMsgStmt(VM_IS_NEM_ENABLED(pVM), ("%s: %#x\n", pFirstMmio->RamRange.pszDesc), 3873 rc = VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 3874 #endif 3844 3875 if (RT_SUCCESS(rc)) 3845 3876 { … … 3918 3949 return NIL_RTGCPHYS; 3919 3950 } 3951 3952 3953 /** 3954 * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap. 3955 * 3956 * Called holding the PGM lock. 3957 */ 3958 static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, 3959 void *pvBitmap, size_t cbBitmap) 3960 { 3961 /* 3962 * Continue validation. 3963 */ 3964 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3965 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 3966 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3967 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK), 3968 VERR_INVALID_FUNCTION); 3969 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 3970 3971 RTGCPHYS cbTotal = 0; 3972 uint16_t fTotalDirty = 0; 3973 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 3974 { 3975 cbTotal += pCur->cbReal; /** @todo good question for NEM... */ 3976 fTotalDirty |= pCur->fFlags; 3977 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3978 break; 3979 pCur = pCur->pNextR3; 3980 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 3981 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3982 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, 3983 VERR_INTERNAL_ERROR_4); 3984 } 3985 size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, PAGE_SIZE * 64, RTGCPHYS) / PAGE_SIZE / 8; 3986 3987 if (cbBitmap) 3988 { 3989 AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER); 3990 AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER); 3991 AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER); 3992 } 3993 3994 /* 3995 * Do the work. 3996 */ 3997 int rc = VINF_SUCCESS; 3998 if (pvBitmap) 3999 { 4000 if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY) 4001 { 4002 if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4003 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4004 { 4005 /* 4006 * Reset each chunk, gathering dirty bits. 4007 */ 4008 RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */ 4009 uint32_t iPageNo = 0; 4010 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4011 { 4012 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4013 { 4014 int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pCur->RamRange.GCPhys, pvBitmap, iPageNo); 4015 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 4016 rc = rc2; 4017 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4018 } 4019 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4020 break; 4021 iPageNo += pCur->RamRange.cb >> PAGE_SHIFT; 4022 } 4023 } 4024 else 4025 { 4026 /* 4027 * If not mapped or tracking is disabled, we return the 4028 * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages. We cannot 4029 * get more accurate data than that after unmapping or disabling. 4030 */ 4031 RT_BZERO(pvBitmap, cbBitmap); 4032 uint32_t iPageNo = 0; 4033 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4034 { 4035 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4036 { 4037 ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pCur->RamRange.cb >> PAGE_SHIFT)); 4038 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4039 } 4040 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4041 break; 4042 iPageNo += pCur->RamRange.cb >> PAGE_SHIFT; 4043 } 4044 } 4045 } 4046 /* 4047 * No dirty chunks. 4048 */ 4049 else 4050 RT_BZERO(pvBitmap, cbBitmap); 4051 } 4052 /* 4053 * No bitmap. Reset the region if tracking is currently enabled. 4054 */ 4055 else if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4056 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4057 rc = PGMHandlerPhysicalReset(pVM, pFirstRegMmio->RamRange.GCPhys); 4058 4059 return rc; 4060 } 4061 4062 4063 /** 4064 * Queries the dirty page bitmap and resets the monitoring. 4065 * 4066 * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when 4067 * creating the range for this to work. 4068 * 4069 * @returns VBox status code. 4070 * @retval VERR_INVALID_FUNCTION if not created using 4071 * PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES. 4072 * @param pVM The cross context VM structure. 4073 * @param pDevIns The device owning the MMIO2 handle. 4074 * @param hMmio2 The region handle. 4075 * @param pvBitmap The output bitmap. Must be 8-byte aligned. Ignored 4076 * when @a cbBitmap is zero. 4077 * @param cbBitmap The size of the bitmap. Must be the size of the whole 4078 * MMIO2 range, rounded up to the nearest 8 bytes. 4079 * When zero only a reset is done. 4080 */ 4081 VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, 4082 void *pvBitmap, size_t cbBitmap) 4083 { 4084 /* 4085 * Do some basic validation before grapping the PGM lock and continuing. 4086 */ 4087 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER); 4088 AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER); 4089 int rc = PGM_LOCK(pVM); 4090 if (RT_SUCCESS(rc)) 4091 { 4092 rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap); 4093 PGM_UNLOCK(pVM); 4094 } 4095 return rc; 4096 } 4097 4098 /** 4099 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking 4100 * 4101 * Called owning the PGM lock. 4102 */ 4103 static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled) 4104 { 4105 /* 4106 * Continue validation. 4107 */ 4108 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 4109 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 4110 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4111 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK) 4112 , VERR_INVALID_FUNCTION); 4113 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 4114 4115 /* 4116 * Anyting needing doing? 4117 */ 4118 if (fEnabled != RT_BOOL(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4119 { 4120 LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pFirstRegMmio->RamRange.pszDesc)); 4121 4122 /* 4123 * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag. 4124 */ 4125 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 4126 { 4127 if (fEnabled) 4128 pCur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4129 else 4130 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4131 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4132 break; 4133 pCur = pCur->pNextR3; 4134 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 4135 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4136 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES 4137 , VERR_INTERNAL_ERROR_4); 4138 } 4139 4140 /* 4141 * Enable/disable handlers if currently mapped. 4142 * 4143 * We ignore status codes here as we've already changed the flags and 4144 * returning a failure status now would be confusing. Besides, the two 4145 * functions will continue past failures. As argued in the mapping code, 4146 * it's in the release log. 4147 */ 4148 if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 4149 { 4150 if (fEnabled) 4151 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio); 4152 else 4153 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio); 4154 } 4155 } 4156 else 4157 LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pFirstRegMmio->RamRange.pszDesc)); 4158 4159 return VINF_SUCCESS; 4160 } 4161 4162 4163 /** 4164 * Controls the dirty page tracking for an MMIO2 range. 4165 * 4166 * @returns VBox status code. 4167 * @param pVM The cross context VM structure. 4168 * @param pDevIns The device owning the MMIO2 memory. 4169 * @param hMmio2 The handle of the region. 4170 * @param fEnabled The new tracking state. 4171 */ 4172 VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled) 4173 { 4174 /* 4175 * Do some basic validation before grapping the PGM lock and continuing. 4176 */ 4177 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER); 4178 int rc = PGM_LOCK(pVM); 4179 if (RT_SUCCESS(rc)) 4180 { 4181 rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled); 4182 PGM_UNLOCK(pVM); 4183 } 4184 return rc; 4185 } 4186 3920 4187 3921 4188 /** -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r91854 r92162 280 280 281 281 pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE; 282 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 282 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/, 283 283 pgmPoolAccessHandler, 284 284 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler", -
trunk/src/VBox/VMM/include/PGMInternal.h
r92157 r92162 489 489 PGMPHYSHANDLERKIND enmKind; 490 490 /** The PGM_PAGE_HNDL_PHYS_STATE_XXX value corresponding to enmKind. */ 491 uint32_t uState; 491 uint8_t uState; 492 /** Whether to keep the PGM lock when calling the handler. */ 493 bool fKeepPgmLock; 494 bool afPadding[2]; 492 495 /** Pointer to R3 callback function. */ 493 496 R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandlerR3; … … 1432 1435 * @{ */ 1433 1436 /** Set if this is the first chunk in the MMIO2 range. */ 1434 #define PGMREGMMIO2RANGE_F_FIRST_CHUNK UINT16_C(0x0001)1437 #define PGMREGMMIO2RANGE_F_FIRST_CHUNK UINT16_C(0x0001) 1435 1438 /** Set if this is the last chunk in the MMIO2 range. */ 1436 #define PGMREGMMIO2RANGE_F_LAST_CHUNK UINT16_C(0x0002)1439 #define PGMREGMMIO2RANGE_F_LAST_CHUNK UINT16_C(0x0002) 1437 1440 /** Set if the whole range is mapped. */ 1438 #define PGMREGMMIO2RANGE_F_MAPPED UINT16_C(0x0004)1441 #define PGMREGMMIO2RANGE_F_MAPPED UINT16_C(0x0004) 1439 1442 /** Set if it's overlapping, clear if not. */ 1440 #define PGMREGMMIO2RANGE_F_OVERLAPPING UINT16_C(0x0008) 1443 #define PGMREGMMIO2RANGE_F_OVERLAPPING UINT16_C(0x0008) 1444 /** This mirrors the PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES creation flag.*/ 1445 #define PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES UINT16_C(0x0010) 1446 /** Set if the access handler is registered. */ 1447 #define PGMREGMMIO2RANGE_F_IS_TRACKING UINT16_C(0x0020) 1448 /** Set if dirty page tracking is currently enabled. */ 1449 #define PGMREGMMIO2RANGE_F_TRACKING_ENABLED UINT16_C(0x0040) 1450 /** Set if there are dirty pages in the range. */ 1451 #define PGMREGMMIO2RANGE_F_IS_DIRTY UINT16_C(0x0080) 1441 1452 /** @} */ 1442 1453 … … 3043 3054 /** Physical access handler type for ROM protection. */ 3044 3055 PGMPHYSHANDLERTYPE hRomPhysHandlerType; 3045 /** Alignment padding.*/3046 uint32_t u32Padding;3056 /** Physical access handler type for MMIO2 dirty page tracing. */ 3057 PGMPHYSHANDLERTYPE hMmio2DirtyPhysHandlerType; 3047 3058 3048 3059 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */ … … 3739 3750 bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys); 3740 3751 void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting); 3752 DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap); 3741 3753 DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 3742 3754 int pgmR3InitSavedState(PVM pVM, uint64_t cbRam); … … 3760 3772 void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock); 3761 3773 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPhysRomWriteHandler; 3774 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPhysMmio2WriteHandler; 3762 3775 #ifndef IN_RING3 3763 3776 DECLEXPORT(FNPGMPHYSHANDLER) pgmPhysHandlerRedirectToHC; 3764 3777 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPhysPfHandlerRedirectToHC; 3765 3778 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPhysRomWritePfHandler; 3779 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPhysMmio2WritePfHandler; 3766 3780 #endif 3767 3781 int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
Note:
See TracChangeset
for help on using the changeset viewer.