- Timestamp:
- Jun 5, 2024 12:59:51 AM (8 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r104767 r104840 1741 1741 LogFlow(("SyncPageWorkerTrackDeref(%d,%d): Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", 1742 1742 PGM_SHW_TYPE, PGM_GST_TYPE, HCPhys, pShwPage->idx)); 1743 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 1744 pRam; 1745 pRam = pRam->CTX_SUFF(pNext)) 1746 { 1743 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 1744 Assert(pVM->pgm.s.apRamRanges[0] == NULL); 1745 for (uint32_t idx = 1; idx <= idRamRangeMax; idx++) 1746 { 1747 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idx]; 1748 AssertContinue(pRam); 1747 1749 unsigned iPage = pRam->cb >> GUEST_PAGE_SHIFT; 1748 1750 while (iPage-- > 0) … … 3675 3677 GCPtrPage, PdeSrc.u & X86_PDE_P, !!(PdeSrc.u & X86_PDE_RW), !!(PdeSrc.u & X86_PDE_US), (uint64_t)PdeSrc.u, GCPtr, 3676 3678 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : "")); 3677 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);3678 3679 unsigned iPTDst = 0; 3679 3680 while ( iPTDst < RT_ELEMENTS(pPTDst->a) 3680 3681 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) 3681 3682 { 3683 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 3682 3684 if (pRam && GCPhys >= pRam->GCPhys) 3683 3685 { … … 3755 3757 } while ( iPTDst < RT_ELEMENTS(pPTDst->a) 3756 3758 && GCPhys <= pRam->GCPhysLast); 3757 3758 /* Advance ram range list. */3759 while (pRam && GCPhys > pRam->GCPhysLast)3760 pRam = pRam->CTX_SUFF(pNext);3761 3759 } 3762 3760 else if (pRam) -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r103374 r104840 1648 1648 /* Only works if the handle is in the handle table! */ 1649 1649 AssertReturn(hMmio2 != 0, NULL); 1650 hMmio2--; 1650 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1651 AssertReturn(hMmio2 <= cMmio2Ranges, NULL); 1652 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1653 #ifdef IN_RING0 1654 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1655 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.acMmio2RangePages) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1656 #endif 1657 uint32_t const idxFirst = hMmio2 - 1U; 1651 1658 1652 1659 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */ 1653 AssertReturn( hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);1654 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2]; 1655 AssertReturn(p Cur, NULL);1656 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL); 1660 AssertReturn(pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL); 1661 #ifdef IN_RING0 1662 AssertReturn(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] != NIL_RTR0MEMOBJ, NULL); /* Only the first chunk has a backing object. */ 1663 #endif 1657 1664 1658 1665 /* Loop thru the sub-ranges till we find the one covering offMmio2. */ 1659 for ( ;;)1666 for (uint32_t idx = idxFirst; idx < cMmio2Ranges; idx++) 1660 1667 { 1661 1668 #ifdef IN_RING3 1662 AssertReturn(p Cur->pDevInsR3 == pDevIns, NULL);1669 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, NULL); 1663 1670 #else 1664 AssertReturn(p Cur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);1671 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns->pDevInsForR3, NULL); 1665 1672 #endif 1666 1673 1667 1674 /* Does it match the offset? */ 1668 if (offMmio2Page < pCur->cbReal) 1669 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT]; 1670 1671 /* Advance if we can. */ 1672 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL); 1673 offMmio2Page -= pCur->cbReal; 1674 hMmio2++; 1675 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL); 1676 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2]; 1677 AssertReturn(pCur, NULL); 1678 } 1675 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx]; 1676 AssertReturn(pRamRange, NULL); 1677 #ifdef IN_RING3 1678 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, pVM->pgm.s.aMmio2Ranges[idx].cbReal); 1679 #else 1680 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, (RTGCPHYS)pVM->pgmr0.s.acMmio2RangePages[idx] << GUEST_PAGE_SHIFT); 1681 #endif 1682 if (offMmio2Page < cbRange) 1683 return &pRamRange->aPages[offMmio2Page >> GUEST_PAGE_SHIFT]; 1684 1685 /* Advance. */ 1686 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL); 1687 offMmio2Page -= cbRange; 1688 } 1689 AssertFailed(); 1690 return NULL; 1679 1691 } 1680 1692 … … 2082 2094 VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM) 2083 2095 { 2084 PPGM pPGM = &pVM->pgm.s;2085 2096 PGMAHAFIS State; 2086 2097 State.GCPhys = 0; … … 2094 2105 */ 2095 2106 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree; 2096 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext)) 2097 { 2107 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, 2108 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2109 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 2110 { 2111 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2112 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2113 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 2114 AssertContinue(pRam); 2098 2115 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT; 2099 2116 for (uint32_t iPage = 0; iPage < cPages; iPage++) -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r104548 r104840 49 49 #ifdef IN_RING3 50 50 # include <iprt/thread.h> 51 #elif defined(IN_RING0) 52 # include <iprt/mem.h> 53 # include <iprt/memobj.h> 51 54 #endif 52 55 … … 181 184 182 185 183 /** 184 * Looks up a ROM range by its PGMROMRANGE::GCPhys value. 185 */ 186 DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys) 187 { 188 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext)) 189 if (pRom->GCPhys == GCPhys) 190 return pRom; 191 return NULL; 192 } 186 187 /********************************************************************************************************************************* 188 * Access Handlers for ROM and MMIO2 * 189 *********************************************************************************************************************************/ 193 190 194 191 #ifndef IN_RING3 … … 204 201 205 202 { 206 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser); 203 AssertReturn(uUser < RT_ELEMENTS(pVM->pgmr0.s.apRomRanges), VINF_EM_RAW_EMULATE_INSTR); 204 PPGMROMRANGE const pRom = pVM->pgmr0.s.apRomRanges[uUser]; 207 205 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR); 206 208 207 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT; 209 int rc; 208 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_3); 209 #ifdef IN_RING0 210 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2); 211 #endif 212 210 213 RT_NOREF(uErrorCode, pvFault); 211 212 214 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */ 213 215 214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));216 int rc; 215 217 switch (pRom->aPages[iPage].enmProt) 216 218 { … … 244 246 case PGMROMPROT_READ_RAM_WRITE_RAM: 245 247 pRom->aPages[iPage].LiveSave.fWrittenTo = true; 246 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);248 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 247 249 AssertRC(rc); 248 250 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */ … … 276 278 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser) 277 279 { 278 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser); 280 AssertReturn(uUser < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges), VERR_INTERNAL_ERROR_3); 281 PPGMROMRANGE const pRom = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges[uUser]; 279 282 AssertReturn(pRom, VERR_INTERNAL_ERROR_3); 283 280 284 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT; 281 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT)); 285 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_2); 286 #ifdef IN_RING0 287 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2); 288 #endif 282 289 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage]; 283 290 … … 352 359 #endif 353 360 { 354 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);361 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK, &pvDstPage); 355 362 if (RT_SUCCESS(rc)) 356 363 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK); … … 389 396 * Get the MMIO2 range. 390 397 */ 391 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.a pMmio2RangesR3), VERR_INTERNAL_ERROR_3);398 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_3); 392 399 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3); 393 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];400 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[hMmio2 - 1]; 394 401 Assert(pMmio2->idMmio2 == hMmio2); 395 402 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, … … 413 420 * Disable the handler for this page. 414 421 */ 415 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2-> RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);422 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->GCPhys, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 416 423 AssertRC(rc); 417 424 #ifndef IN_RING3 … … 474 481 475 482 483 484 /********************************************************************************************************************************* 485 * RAM Ranges * 486 *********************************************************************************************************************************/ 487 488 #ifdef VBOX_STRICT 489 /** 490 * Asserts that the RAM range structures are sane. 491 */ 492 DECLHIDDEN(bool) pgmPhysAssertRamRangesLocked(PVMCC pVM, bool fInUpdate, bool fRamRelaxed) 493 { 494 bool fRet = true; 495 496 /* 497 * Check the generation ID. This is stable since we own the PGM lock. 498 */ 499 AssertStmt((pVM->pgm.s.RamRangeUnion.idGeneration & 1U) == (unsigned)fInUpdate, fRet = false); 500 501 /* 502 * Check the entry count and max ID. 503 */ 504 uint32_t const idRamRangeMax = pVM->pgm.s.idRamRangeMax; 505 /* Since this is set to the highest ID, it cannot be the same as the table size. */ 506 AssertStmt(idRamRangeMax < RT_ELEMENTS(pVM->pgm.s.apRamRanges), fRet = false); 507 508 /* Because ID=0 is reserved, it's one less than the table size and at most the 509 same as the max ID. */ 510 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries; 511 AssertStmt(cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), fRet = false); 512 AssertStmt(cLookupEntries <= idRamRangeMax, fRet = false); 513 514 /* 515 * Check the pointer table(s). 516 */ 517 /* The first entry shall be empty. */ 518 AssertStmt(pVM->pgm.s.apRamRanges[0] == NULL, fRet = false); 519 # ifdef IN_RING0 520 AssertStmt(pVM->pgmr0.s.apRamRanges[0] == NULL, fRet = false); 521 AssertStmt(pVM->pgmr0.s.acRamRangePages[0] == 0, fRet = false); 522 # endif 523 524 uint32_t cMappedRanges = 0; 525 for (uint32_t idRamRange = 1; idRamRange <= idRamRangeMax; idRamRange++) 526 { 527 # ifdef IN_RING0 528 PPGMRAMRANGE const pRamRange = pVM->pgmr0.s.apRamRanges[idRamRange]; 529 AssertContinueStmt(pRamRange, fRet = false); 530 AssertStmt(pVM->pgm.s.apRamRanges[idRamRange] != NIL_RTR3PTR, fRet = false); 531 AssertStmt( (pRamRange->cb >> GUEST_PAGE_SHIFT) == pVM->pgmr0.s.acRamRangePages[idRamRange] 532 || ( (pRamRange->cb >> GUEST_PAGE_SHIFT) < pVM->pgmr0.s.acRamRangePages[idRamRange] 533 && !(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX)), 534 fRet = false); 535 # else 536 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apRamRanges[idRamRange]; 537 AssertContinueStmt(pRamRange, fRet = false); 538 # endif 539 AssertStmt(pRamRange->idRange == idRamRange, fRet = false); 540 if (pRamRange->GCPhys != NIL_RTGCPHYS) 541 { 542 cMappedRanges++; 543 AssertStmt((pRamRange->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0, fRet = false); 544 AssertStmt((pRamRange->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, fRet = false); 545 AssertStmt(pRamRange->GCPhysLast > pRamRange->GCPhys, fRet = false); 546 AssertStmt(pRamRange->GCPhysLast - pRamRange->GCPhys + 1U == pRamRange->cb, fRet = false); 547 } 548 else 549 { 550 AssertStmt(pRamRange->GCPhysLast == NIL_RTGCPHYS, fRet = false); 551 AssertStmt(PGM_RAM_RANGE_IS_AD_HOC(pRamRange) || fRamRelaxed, fRet = false); 552 } 553 } 554 555 /* 556 * Check that the lookup table is sorted and contains the right information. 557 */ 558 AssertMsgStmt(cMappedRanges == cLookupEntries, 559 ("cMappedRanges=%#x cLookupEntries=%#x\n", cMappedRanges, cLookupEntries), 560 fRet = false); 561 RTGCPHYS GCPhysPrev = ~(RTGCPHYS)0; 562 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 563 { 564 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 565 AssertContinueStmt(idRamRange > 0 && idRamRange <= idRamRangeMax, fRet = false); 566 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm,pgmr0,pgmrc).s.apRamRanges[idRamRange]; 567 AssertContinueStmt(pRamRange, fRet = false); 568 569 AssertStmt(pRamRange->idRange == idRamRange, fRet = false); 570 AssertStmt(pRamRange->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]), 571 fRet = false); 572 AssertStmt(pRamRange->GCPhysLast == pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast, fRet = false); 573 574 AssertStmt(pRamRange->GCPhys >= GCPhysPrev + 1U, fRet = false); 575 GCPhysPrev = pRamRange->GCPhysLast; 576 } 577 578 return fRet; 579 } 580 #endif /* VBOX_STRICT */ 581 582 476 583 /** 477 584 * Invalidates the RAM range TLBs. … … 503 610 * 504 611 * @copydoc pgmPhysGetRange 505 */ 506 PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys) 612 * @note Caller owns the PGM lock. 613 */ 614 PPGMRAMRANGE pgmPhysGetRangeSlow(PVMCC pVM, RTGCPHYS GCPhys) 507 615 { 508 616 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses)); 509 617 510 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree); 511 while (pRam) 512 { 513 RTGCPHYS off = GCPhys - pRam->GCPhys; 514 if (off < pRam->cb) 515 { 516 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 517 return pRam; 618 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 619 uint32_t idxStart = 0; 620 for (;;) 621 { 622 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2; 623 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 624 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst; 625 RTGCPHYS const off = GCPhys - GCPhysEntryFirst; 626 if (off <= cbEntryMinus1) 627 { 628 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 629 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL); 630 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 631 Assert(pRamRange); 632 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange; 633 return pRamRange; 518 634 } 519 635 if (RTGCPHYS_IS_NEGATIVE(off)) 520 pRam = pRam->CTX_SUFF(pLeft); 636 { 637 if (idxStart < idxLookup) 638 idxEnd = idxLookup; 639 else 640 break; 641 } 521 642 else 522 pRam = pRam->CTX_SUFF(pRight); 643 { 644 idxLookup += 1; 645 if (idxLookup < idxEnd) 646 idxStart = idxLookup; 647 else 648 break; 649 } 523 650 } 524 651 return NULL; … … 531 658 * @copydoc pgmPhysGetRangeAtOrAbove 532 659 */ 533 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)660 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVMCC pVM, RTGCPHYS GCPhys) 534 661 { 535 662 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses)); 536 663 537 PPGMRAMRANGE pLastLeft = NULL; 538 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree); 539 while (pRam) 540 { 541 RTGCPHYS off = GCPhys - pRam->GCPhys; 542 if (off < pRam->cb) 543 { 544 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 545 return pRam; 664 uint32_t idRamRangeLastLeft = UINT32_MAX; 665 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 666 uint32_t idxStart = 0; 667 for (;;) 668 { 669 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2; 670 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 671 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst; 672 RTGCPHYS const off = GCPhys - GCPhysEntryFirst; 673 if (off <= cbEntryMinus1) 674 { 675 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 676 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL); 677 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 678 Assert(pRamRange); 679 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange; 680 return pRamRange; 546 681 } 547 682 if (RTGCPHYS_IS_NEGATIVE(off)) 548 683 { 549 pLastLeft = pRam; 550 pRam = pRam->CTX_SUFF(pLeft); 684 idRamRangeLastLeft = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 685 if (idxStart < idxLookup) 686 idxEnd = idxLookup; 687 else 688 break; 551 689 } 552 690 else 553 pRam = pRam->CTX_SUFF(pRight); 554 } 555 return pLastLeft; 691 { 692 idxLookup += 1; 693 if (idxLookup < idxEnd) 694 idxStart = idxLookup; 695 else 696 break; 697 } 698 } 699 if (idRamRangeLastLeft != UINT32_MAX) 700 { 701 AssertReturn(idRamRangeLastLeft < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL); 702 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRangeLastLeft]; 703 Assert(pRamRange); 704 return pRamRange; 705 } 706 return NULL; 556 707 } 557 708 … … 562 713 * @copydoc pgmPhysGetPage 563 714 */ 564 PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)715 PPGMPAGE pgmPhysGetPageSlow(PVMCC pVM, RTGCPHYS GCPhys) 565 716 { 566 717 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses)); 567 718 568 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree); 569 while (pRam) 570 { 571 RTGCPHYS off = GCPhys - pRam->GCPhys; 572 if (off < pRam->cb) 573 { 574 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 575 return &pRam->aPages[off >> GUEST_PAGE_SHIFT]; 576 } 577 719 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 720 uint32_t idxStart = 0; 721 for (;;) 722 { 723 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2; 724 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 725 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst; 726 RTGCPHYS const off = GCPhys - GCPhysEntryFirst; 727 if (off <= cbEntryMinus1) 728 { 729 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 730 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL); 731 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 732 AssertReturn(pRamRange, NULL); 733 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange; 734 735 /* Get the page. */ 736 Assert(off < pRamRange->cb); 737 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT; 738 #ifdef IN_RING0 739 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], NULL); 740 #endif 741 return &pRamRange->aPages[idxPage]; 742 } 578 743 if (RTGCPHYS_IS_NEGATIVE(off)) 579 pRam = pRam->CTX_SUFF(pLeft); 744 { 745 if (idxStart < idxLookup) 746 idxEnd = idxLookup; 747 else 748 break; 749 } 580 750 else 581 pRam = pRam->CTX_SUFF(pRight); 751 { 752 idxLookup += 1; 753 if (idxLookup < idxEnd) 754 idxStart = idxLookup; 755 else 756 break; 757 } 582 758 } 583 759 return NULL; … … 590 766 * @copydoc pgmPhysGetPageEx 591 767 */ 592 int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)768 int pgmPhysGetPageExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage) 593 769 { 594 770 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses)); 595 771 596 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree); 597 while (pRam) 598 { 599 RTGCPHYS off = GCPhys - pRam->GCPhys; 600 if (off < pRam->cb) 601 { 602 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 603 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT]; 772 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 773 uint32_t idxStart = 0; 774 for (;;) 775 { 776 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2; 777 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 778 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst; 779 RTGCPHYS const off = GCPhys - GCPhysEntryFirst; 780 if (off <= cbEntryMinus1) 781 { 782 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 783 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE); 784 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 785 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE); 786 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange; 787 788 /* Get the page. */ 789 Assert(off < pRamRange->cb); 790 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT; 791 #ifdef IN_RING0 792 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE); 793 #endif 794 *ppPage = &pRamRange->aPages[idxPage]; 604 795 return VINF_SUCCESS; 605 796 } 606 607 797 if (RTGCPHYS_IS_NEGATIVE(off)) 608 pRam = pRam->CTX_SUFF(pLeft); 798 { 799 if (idxStart < idxLookup) 800 idxEnd = idxLookup; 801 else 802 break; 803 } 609 804 else 610 pRam = pRam->CTX_SUFF(pRight); 805 { 806 idxLookup += 1; 807 if (idxLookup < idxEnd) 808 idxStart = idxLookup; 809 else 810 break; 811 } 611 812 } 612 813 … … 621 822 * @copydoc pgmPhysGetPageAndRangeEx 622 823 */ 623 int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)824 int pgmPhysGetPageAndRangeExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam) 624 825 { 625 826 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses)); 626 827 627 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree); 628 while (pRam) 629 { 630 RTGCPHYS off = GCPhys - pRam->GCPhys; 631 if (off < pRam->cb) 632 { 633 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 634 *ppRam = pRam; 635 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT]; 828 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 829 uint32_t idxStart = 0; 830 for (;;) 831 { 832 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2; 833 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 834 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst; 835 RTGCPHYS const off = GCPhys - GCPhysEntryFirst; 836 if (off <= cbEntryMinus1) 837 { 838 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 839 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE); 840 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 841 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE); 842 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange; 843 844 /* Get the page. */ 845 Assert(off < pRamRange->cb); 846 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT; 847 #ifdef IN_RING0 848 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE); 849 #endif 850 *ppRam = pRamRange; 851 *ppPage = &pRamRange->aPages[idxPage]; 636 852 return VINF_SUCCESS; 637 853 } 638 639 854 if (RTGCPHYS_IS_NEGATIVE(off)) 640 pRam = pRam->CTX_SUFF(pLeft); 855 { 856 if (idxStart < idxLookup) 857 idxEnd = idxLookup; 858 else 859 break; 860 } 641 861 else 642 pRam = pRam->CTX_SUFF(pRight); 862 { 863 idxLookup += 1; 864 if (idxLookup < idxEnd) 865 idxStart = idxLookup; 866 else 867 break; 868 } 643 869 } 644 870 … … 647 873 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 648 874 } 875 876 877 /** 878 * Common worker for pgmR3PhysAllocateRamRange, PGMR0PhysAllocateRamRangeReq, 879 * and pgmPhysMmio2RegisterWorker2. 880 */ 881 DECLHIDDEN(int) pgmPhysRamRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint32_t fFlags, uint32_t *pidNewRange) 882 { 883 884 /* 885 * Allocate the RAM range structure and map it into ring-3. 886 */ 887 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE); 888 #ifdef IN_RING0 889 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ; 890 int rc = RTR0MemObjAllocPage(&hMemObj, cbRamRange, false /*fExecutable*/); 891 #else 892 PPGMRAMRANGE pRamRange; 893 int rc = SUPR3PageAlloc(cbRamRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRamRange); 894 #endif 895 if (RT_SUCCESS(rc)) 896 { 897 /* Zero the memory and do basic range init before mapping it into userland. */ 898 #ifdef IN_RING0 899 PPGMRAMRANGE const pRamRange = (PPGMRAMRANGE)RTR0MemObjAddress(hMemObj); 900 if (!RTR0MemObjWasZeroInitialized(hMemObj)) 901 #endif 902 RT_BZERO(pRamRange, cbRamRange); 903 904 pRamRange->GCPhys = NIL_RTGCPHYS; 905 pRamRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT; 906 pRamRange->GCPhysLast = NIL_RTGCPHYS; 907 pRamRange->fFlags = fFlags; 908 pRamRange->idRange = UINT32_MAX / 2; 909 910 #ifdef IN_RING0 911 /* Map it into userland. */ 912 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ; 913 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, 914 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 915 if (RT_SUCCESS(rc)) 916 #endif 917 { 918 /* 919 * Grab the lock (unlikely to fail or block as caller typically owns it already). 920 */ 921 rc = PGM_LOCK(pVM); 922 if (RT_SUCCESS(rc)) 923 { 924 /* 925 * Allocate a range ID. 926 */ 927 uint32_t idRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax + 1; 928 if (idRamRange != 0 && idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges)) 929 { 930 #ifdef IN_RING0 931 if (pVM->pgmr0.s.apRamRanges[idRamRange] == NULL) 932 #endif 933 { 934 if (pVM->pgm.s.apRamRanges[idRamRange] == NIL_RTR3PTR) 935 { 936 /* 937 * Commit it. 938 */ 939 #ifdef IN_RING0 940 pVM->pgmr0.s.apRamRanges[idRamRange] = pRamRange; 941 pVM->pgmr0.s.acRamRangePages[idRamRange] = cPages; 942 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = hMemObj; 943 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = hMapObj; 944 pVM->pgmr0.s.idRamRangeMax = idRamRange; 945 #endif 946 947 pVM->pgm.s.idRamRangeMax = idRamRange; 948 #ifdef IN_RING0 949 pVM->pgm.s.apRamRanges[idRamRange] = RTR0MemObjAddressR3(hMapObj); 950 #else 951 pVM->pgm.s.apRamRanges[idRamRange] = pRamRange; 952 #endif 953 954 pRamRange->idRange = idRamRange; 955 *pidNewRange = idRamRange; 956 957 PGM_UNLOCK(pVM); 958 return VINF_SUCCESS; 959 } 960 } 961 962 /* 963 * Bail out. 964 */ 965 rc = VERR_INTERNAL_ERROR_5; 966 } 967 else 968 rc = VERR_PGM_TOO_MANY_RAM_RANGES; 969 PGM_UNLOCK(pVM); 970 } 971 #ifdef IN_RING0 972 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/); 973 #endif 974 } 975 #ifdef IN_RING0 976 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/); 977 #else 978 SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT); 979 #endif 980 } 981 *pidNewRange = UINT32_MAX; 982 return rc; 983 } 984 985 986 #ifdef IN_RING0 987 /** 988 * This is called during VM initialization to allocate a RAM range. 989 * 990 * The range is not entered into the lookup table, that is something the caller 991 * has to do. The PGMPAGE entries are zero'ed, but otherwise uninitialized. 992 * 993 * @returns VBox status code. 994 * @param pGVM Pointer to the global VM structure. 995 * @param pReq Where to get the parameters and return the range ID. 996 * @thread EMT(0) 997 */ 998 VMMR0_INT_DECL(int) PGMR0PhysAllocateRamRangeReq(PGVM pGVM, PPGMPHYSALLOCATERAMRANGEREQ pReq) 999 { 1000 /* 1001 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3 1002 * while we're here). 1003 */ 1004 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1005 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1006 1007 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG); 1008 1009 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE); 1010 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_RAM_RANGE, VERR_OUT_OF_RANGE); 1011 1012 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGM_RAM_RANGE_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags), 1013 VERR_INVALID_FLAGS); 1014 1015 /** @todo better VM state guard, enmVMState is ring-3 writable. */ 1016 VMSTATE const enmState = pGVM->enmVMState; 1017 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE); 1018 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT); 1019 1020 /* 1021 * Call common worker. 1022 */ 1023 return pgmPhysRamRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->fFlags, &pReq->idNewRange); 1024 } 1025 #endif /* IN_RING0 */ 1026 1027 1028 /** 1029 * Frees a RAM range. 1030 * 1031 * This is not a typical occurence. Currently only used for a special MMIO2 1032 * saved state compatibility scenario involving PCNet and state saved before 1033 * VBox v4.3.6. 1034 */ 1035 static int pgmPhysRamRangeFree(PVMCC pVM, PPGMRAMRANGE pRamRange) 1036 { 1037 /* 1038 * Some basic input validation. 1039 */ 1040 AssertPtrReturn(pRamRange, VERR_INVALID_PARAMETER); 1041 uint32_t const idRamRange = ASMAtomicReadU32(&pRamRange->idRange); 1042 ASMCompilerBarrier(); 1043 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_INVALID_PARAMETER); 1044 AssertReturn(pRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange], VERR_INVALID_PARAMETER); 1045 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_RESOURCE_BUSY); 1046 1047 /* 1048 * Kill the range pointers and associated data. 1049 */ 1050 pVM->pgm.s.apRamRanges[idRamRange] = NIL_RTR3PTR; 1051 #ifdef IN_RING0 1052 pVM->pgmr0.s.apRamRanges[idRamRange] = NULL; 1053 #endif 1054 1055 /* 1056 * Zap the pages and other RAM ranges properties to ensure there aren't any 1057 * stale references to anything hanging around should the freeing go awry. 1058 */ 1059 #ifdef IN_RING0 1060 uint32_t const cPages = pVM->pgmr0.s.acRamRangePages[idRamRange]; 1061 pVM->pgmr0.s.acRamRangePages[idRamRange] = 0; 1062 #else 1063 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT; 1064 #endif 1065 RT_BZERO(pRamRange->aPages, cPages * sizeof(pRamRange->aPages[0])); 1066 1067 pRamRange->fFlags = UINT32_MAX; 1068 pRamRange->cb = NIL_RTGCPHYS; 1069 pRamRange->pbR3 = NIL_RTR3PTR; 1070 pRamRange->pszDesc = NIL_RTR3PTR; 1071 pRamRange->paLSPages = NIL_RTR3PTR; 1072 pRamRange->idRange = UINT32_MAX / 8; 1073 1074 /* 1075 * Free the RAM range itself. 1076 */ 1077 #ifdef IN_RING0 1078 Assert(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] != NIL_RTR0MEMOBJ); 1079 int rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange], true /*fFreeMappings*/); 1080 if (RT_SUCCESS(rc)) 1081 { 1082 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = NIL_RTR0MEMOBJ; 1083 rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange], true /*fFreeMappings*/); 1084 if (RT_SUCCESS(rc)) 1085 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = NIL_RTR0MEMOBJ; 1086 } 1087 #else 1088 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE); 1089 int rc = SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT); 1090 #endif 1091 1092 /* 1093 * Decrease the max ID if removal was successful and this was the final 1094 * RAM range entry. 1095 */ 1096 if ( RT_SUCCESS(rc) 1097 && idRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax) 1098 { 1099 pVM->pgm.s.idRamRangeMax = idRamRange - 1; 1100 #ifdef IN_RING0 1101 pVM->pgmr0.s.idRamRangeMax = idRamRange - 1; 1102 #endif 1103 } 1104 1105 return rc; 1106 } 1107 1108 1109 1110 /********************************************************************************************************************************* 1111 * MMIO2 * 1112 *********************************************************************************************************************************/ 1113 1114 /** 1115 * Calculates the number of chunks 1116 * 1117 * @returns Number of registration chunk needed. 1118 * @param cb The size of the MMIO/MMIO2 range. 1119 * @param pcPagesPerChunk Where to return the number of guest pages tracked by 1120 * each chunk. Optional. 1121 */ 1122 DECLHIDDEN(uint16_t) pgmPhysMmio2CalcChunkCount(RTGCPHYS cb, uint32_t *pcPagesPerChunk) 1123 { 1124 /* 1125 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be 1126 * needing a few bytes extra the PGMREGMMIO2RANGE structure. 1127 * 1128 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving 1129 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB). 1130 */ 1131 AssertCompile(PGM_MAX_PAGES_PER_RAM_RANGE < _16M); 1132 uint32_t const cPagesPerChunk = PGM_MAX_PAGES_PER_RAM_RANGE; 1133 1134 if (pcPagesPerChunk) 1135 *pcPagesPerChunk = cPagesPerChunk; 1136 1137 /* Calc the number of chunks we need. */ 1138 RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT; 1139 uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk); 1140 #ifdef IN_RING3 1141 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages); 1142 #else 1143 AssertReturn((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages, 0); 1144 #endif 1145 return cChunks; 1146 } 1147 1148 1149 /** 1150 * Worker for PGMR3PhysMmio2Register and PGMR0PhysMmio2RegisterReq. 1151 * 1152 * (The caller already know which MMIO2 region ID will be assigned and how many 1153 * chunks will be used, so no output parameters required.) 1154 */ 1155 DECLHIDDEN(int) pgmPhysMmio2RegisterWorker(PVMCC pVM, uint32_t const cGuestPages, uint8_t const idMmio2, 1156 const uint8_t cChunks, PPDMDEVINSR3 const pDevIns, uint8_t 1157 const iSubDev, uint8_t const iRegion, uint32_t const fFlags) 1158 { 1159 /* 1160 * Get the number of pages per chunk. 1161 */ 1162 uint32_t cGuestPagesPerChunk; 1163 AssertReturn(pgmPhysMmio2CalcChunkCount((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, &cGuestPagesPerChunk) == cChunks, 1164 VERR_PGM_PHYS_MMIO_EX_IPE); 1165 Assert(idMmio2 != 0); 1166 1167 /* 1168 * The first thing we need to do is the allocate the memory that will be 1169 * backing the whole range. 1170 */ 1171 RTGCPHYS const cbMmio2Backing = (RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT; 1172 uint32_t const cHostPages = (cbMmio2Backing + HOST_PAGE_SIZE - 1U) >> HOST_PAGE_SHIFT; 1173 size_t const cbMmio2Aligned = cHostPages << HOST_PAGE_SHIFT; 1174 R3PTRTYPE(uint8_t *) pbMmio2BackingR3 = NIL_RTR3PTR; 1175 #ifdef IN_RING0 1176 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ; 1177 # ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1178 int rc = RTR0MemObjAllocPage(&hMemObj, cbMmio2Aligned, false /*fExecutable*/); 1179 # else 1180 int rc = RTR0MemObjAllocPhysNC(&hMemObj, cbMmio2Aligned, NIL_RTHCPHYS); 1181 # endif 1182 #else /* !IN_RING0 */ 1183 AssertReturn(PGM_IS_IN_NEM_MODE(pVM), VERR_INTERNAL_ERROR_4); 1184 int rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, (void **)&pbMmio2BackingR3); 1185 #endif /* !IN_RING0 */ 1186 if (RT_SUCCESS(rc)) 1187 { 1188 /* 1189 * Make sure it's is initialized to zeros before it's mapped to userland. 1190 */ 1191 #ifdef IN_RING0 1192 # ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1193 uint8_t *pbMmio2BackingR0 = (uint8_t *)RTR0MemObjAddress(hMemObj); 1194 AssertPtr(pbMmio2BackingR0); 1195 # endif 1196 if (!RTR0MemObjWasZeroInitialized(hMemObj)) 1197 { 1198 void *pv = RTR0MemObjAddress(hMemObj); 1199 AssertReturnStmt(pv, RTR0MemObjFree(hMemObj, true /*fFreeMappings*/), VERR_INTERNAL_ERROR_4); 1200 RT_BZERO(pv, cbMmio2Aligned); 1201 } 1202 #else 1203 RT_BZERO(pbMmio2BackingR3, cbMmio2Aligned); 1204 #endif 1205 1206 #ifdef IN_RING0 1207 /* 1208 * Map it into ring-3. 1209 */ 1210 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ; 1211 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 1212 if (RT_SUCCESS(rc)) 1213 { 1214 pbMmio2BackingR3 = RTR0MemObjAddressR3(hMapObj); 1215 #endif 1216 1217 /* 1218 * Create the MMIO2 registration records and associated RAM ranges. 1219 * The RAM range allocation may fail here. 1220 */ 1221 RTGCPHYS offMmio2Backing = 0; 1222 uint32_t cGuestPagesLeft = cGuestPages; 1223 for (uint32_t iChunk = 0, idx = idMmio2 - 1; iChunk < cChunks; iChunk++, idx++) 1224 { 1225 uint32_t const cPagesTrackedByChunk = RT_MIN(cGuestPagesLeft, cGuestPagesPerChunk); 1226 1227 /* 1228 * Allocate the RAM range for this chunk. 1229 */ 1230 uint32_t idRamRange = UINT32_MAX; 1231 rc = pgmPhysRamRangeAllocCommon(pVM, cPagesTrackedByChunk, PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, &idRamRange); 1232 if (RT_FAILURE(rc)) 1233 { 1234 /* We only zap the pointers to the backing storage. 1235 PGMR3Term and friends will clean up the RAM ranges and stuff. */ 1236 while (iChunk-- > 0) 1237 { 1238 idx--; 1239 #ifdef IN_RING0 1240 pVM->pgmr0.s.acMmio2RangePages[idx] = 0; 1241 # ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1242 pVM->pgmr0.s.apbMmio2Backing[idx] = NULL; 1243 # endif 1244 #endif 1245 1246 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 1247 pMmio2->pbR3 = NIL_RTR3PTR; 1248 1249 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx]; 1250 pRamRange->pbR3 = NIL_RTR3PTR; 1251 RT_BZERO(&pRamRange->aPages[0], sizeof(pRamRange->aPages) * cGuestPagesPerChunk); 1252 } 1253 break; 1254 } 1255 1256 pVM->pgm.s.apMmio2RamRanges[idx] = pVM->pgm.s.apRamRanges[idRamRange]; 1257 #ifdef IN_RING0 1258 pVM->pgmr0.s.apMmio2RamRanges[idx] = pVM->pgmr0.s.apRamRanges[idRamRange]; 1259 pVM->pgmr0.s.acMmio2RangePages[idx] = cPagesTrackedByChunk; 1260 #endif 1261 1262 /* Initialize the RAM range. */ 1263 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 1264 pRamRange->pbR3 = pbMmio2BackingR3 + offMmio2Backing; 1265 uint32_t iDstPage = cPagesTrackedByChunk; 1266 #ifdef IN_RING0 1267 AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT); 1268 while (iDstPage-- > 0) 1269 { 1270 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, iDstPage + (offMmio2Backing >> HOST_PAGE_SHIFT)); 1271 Assert(HCPhys != NIL_RTHCPHYS); 1272 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], HCPhys, PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage), 1273 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED); 1274 } 1275 #else 1276 Assert(PGM_IS_IN_NEM_MODE(pVM)); 1277 while (iDstPage-- > 0) 1278 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], UINT64_C(0x0000ffffffff0000), 1279 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage), 1280 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED); 1281 #endif 1282 1283 /* 1284 * Initialize the MMIO2 registration structure. 1285 */ 1286 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 1287 pMmio2->pDevInsR3 = pDevIns; 1288 pMmio2->pbR3 = pbMmio2BackingR3 + offMmio2Backing; 1289 pMmio2->fFlags = 0; 1290 if (iChunk == 0) 1291 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK; 1292 if (iChunk + 1 == cChunks) 1293 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK; 1294 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 1295 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES; 1296 1297 pMmio2->iSubDev = iSubDev; 1298 pMmio2->iRegion = iRegion; 1299 pMmio2->idSavedState = UINT8_MAX; 1300 pMmio2->idMmio2 = idMmio2 + iChunk; 1301 pMmio2->idRamRange = idRamRange; 1302 Assert(pMmio2->idRamRange == idRamRange); 1303 pMmio2->GCPhys = NIL_RTGCPHYS; 1304 pMmio2->cbReal = (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT; 1305 pMmio2->pPhysHandlerR3 = NIL_RTR3PTR; /* Pre-alloc is done by ring-3 caller. */ 1306 pMmio2->paLSPages = NIL_RTR3PTR; 1307 1308 #if defined(IN_RING0) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) 1309 pVM->pgmr0.s.apbMmio2Backing[idx] = &pbMmio2BackingR0[offMmio2Backing]; 1310 #endif 1311 1312 /* Advance */ 1313 cGuestPagesLeft -= cPagesTrackedByChunk; 1314 offMmio2Backing += (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT; 1315 } /* chunk alloc loop */ 1316 Assert(cGuestPagesLeft == 0 || RT_FAILURE_NP(rc)); 1317 if (RT_SUCCESS(rc)) 1318 { 1319 /* 1320 * Account for pages and ring-0 memory objects. 1321 */ 1322 pVM->pgm.s.cAllPages += cGuestPages; 1323 pVM->pgm.s.cPrivatePages += cGuestPages; 1324 #ifdef IN_RING0 1325 pVM->pgmr0.s.ahMmio2MemObjs[idMmio2 - 1] = hMemObj; 1326 pVM->pgmr0.s.ahMmio2MapObjs[idMmio2 - 1] = hMapObj; 1327 #endif 1328 pVM->pgm.s.cMmio2Ranges = idMmio2 + cChunks - 1U; 1329 1330 /* 1331 * Done!. 1332 */ 1333 return VINF_SUCCESS; 1334 } 1335 1336 /* 1337 * Bail. 1338 */ 1339 #ifdef IN_RING0 1340 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/); 1341 } 1342 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/); 1343 #else 1344 SUPR3PageFree(pbMmio2BackingR3, cHostPages); 1345 #endif 1346 } 1347 else 1348 LogRel(("pgmPhysMmio2RegisterWorker: Failed to allocate %RGp bytes of MMIO2 backing memory: %Rrc\n", cbMmio2Aligned, rc)); 1349 return rc; 1350 } 1351 1352 1353 #ifdef IN_RING0 1354 /** 1355 * This is called during VM initialization to create an MMIO2 range. 1356 * 1357 * This does everything except setting the PGMRAMRANGE::pszDesc to a non-zero 1358 * value and preallocating the access handler for dirty bitmap tracking. 1359 * 1360 * The caller already knows which MMIO2 ID will be assigned to the registration 1361 * and how many chunks it requires, so there are no output fields in the request 1362 * structure. 1363 * 1364 * @returns VBox status code. 1365 * @param pGVM Pointer to the global VM structure. 1366 * @param pReq Where to get the parameters. 1367 * @thread EMT(0) 1368 */ 1369 VMMR0_INT_DECL(int) PGMR0PhysMmio2RegisterReq(PGVM pGVM, PPGMPHYSMMIO2REGISTERREQ pReq) 1370 { 1371 /* 1372 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3 1373 * while we're here). 1374 */ 1375 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1376 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1377 1378 /** @todo better VM state guard, enmVMState is ring-3 writable. */ 1379 VMSTATE const enmState = pGVM->enmVMState; 1380 AssertMsgReturn( enmState == VMSTATE_CREATING 1381 || enmState == VMSTATE_LOADING /* pre 4.3.6 state loading needs to ignore a MMIO2 region in PCNet. */ 1382 , ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE); 1383 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT); 1384 1385 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG); 1386 AssertReturn(GUEST_PAGE_SIZE == HOST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG); 1387 1388 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE); 1389 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_MMIO2_REGION, VERR_OUT_OF_RANGE); 1390 AssertReturn(pReq->cGuestPages <= (MM_MMIO_64_MAX >> GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE); 1391 1392 AssertMsgReturn(!(pReq->fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), ("fFlags=%#x\n", pReq->fFlags), VERR_INVALID_FLAGS); 1393 1394 AssertMsgReturn( pReq->cChunks > 0 1395 && pReq->cChunks < PGM_MAX_MMIO2_RANGES 1396 && pReq->cChunks == pgmPhysMmio2CalcChunkCount((RTGCPHYS)pReq->cGuestPages << GUEST_PAGE_SHIFT, NULL), 1397 ("cChunks=%#x cGuestPages=%#x\n", pReq->cChunks, pReq->cGuestPages), 1398 VERR_INVALID_PARAMETER); 1399 1400 AssertMsgReturn( pReq->idMmio2 != 0 1401 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES 1402 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES, 1403 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks), 1404 VERR_INVALID_PARAMETER); 1405 1406 for (uint32_t iChunk = 0, idx = pReq->idMmio2 - 1; iChunk < pReq->cChunks; iChunk++, idx++) 1407 { 1408 AssertReturn(pGVM->pgmr0.s.ahMmio2MapObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE); 1409 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE); 1410 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL, VERR_INVALID_STATE); 1411 } 1412 1413 /* 1414 * Make sure we're owning the PGM lock (caller should be), recheck idMmio2 1415 * and call the worker function we share with ring-3. 1416 */ 1417 int rc = PGM_LOCK(pGVM); 1418 AssertRCReturn(rc, rc); 1419 1420 AssertReturnStmt(pGVM->pgm.s.cMmio2Ranges + 1U == pReq->idMmio2, 1421 PGM_UNLOCK(pGVM), VERR_INVALID_PARAMETER); 1422 AssertReturnStmt(pGVM->pgmr0.s.idRamRangeMax + 1U + pReq->cChunks <= RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges), 1423 PGM_UNLOCK(pGVM), VERR_PGM_TOO_MANY_RAM_RANGES); 1424 1425 rc = pgmPhysMmio2RegisterWorker(pGVM, pReq->cGuestPages, pReq->idMmio2, pReq->cChunks, 1426 pReq->pDevIns, pReq->iSubDev, pReq->iRegion, pReq->fFlags); 1427 1428 PGM_UNLOCK(pGVM); 1429 return rc; 1430 } 1431 #endif /* IN_RING0 */ 1432 1433 1434 1435 /** 1436 * Worker for PGMR3PhysMmio2Deregister & PGMR0PhysMmio2DeregisterReq. 1437 */ 1438 DECLHIDDEN(int) pgmPhysMmio2DeregisterWorker(PVMCC pVM, uint8_t idMmio2, uint8_t cChunks, PPDMDEVINSR3 pDevIns) 1439 { 1440 /* 1441 * The caller shall have made sure all this is true, but we check again 1442 * since we're paranoid. 1443 */ 1444 AssertReturn(idMmio2 > 0 && idMmio2 <= RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_2); 1445 AssertReturn(cChunks >= 1, VERR_INTERNAL_ERROR_2); 1446 uint8_t const idxFirst = idMmio2 - 1U; 1447 AssertReturn(idxFirst + cChunks <= pVM->pgm.s.cMmio2Ranges, VERR_INTERNAL_ERROR_2); 1448 uint32_t cGuestPages = 0; /* (For accounting and calulating backing memory size) */ 1449 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 1450 { 1451 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, VERR_NOT_OWNER); 1452 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_RESOURCE_BUSY); 1453 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE); 1454 if (iChunk == 0) 1455 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_PARAMETER); 1456 else 1457 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_INVALID_PARAMETER); 1458 if (iChunk + 1 == cChunks) 1459 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, VERR_INVALID_PARAMETER); 1460 else 1461 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), VERR_INVALID_PARAMETER); 1462 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 == NIL_RTR3PTR, VERR_INVALID_STATE); /* caller shall free this */ 1463 1464 #ifdef IN_RING0 1465 cGuestPages += pVM->pgmr0.s.acMmio2RangePages[idx]; 1466 #else 1467 cGuestPages += pVM->pgm.s.aMmio2Ranges[idx].cbReal >> GUEST_PAGE_SHIFT; 1468 #endif 1469 1470 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx]; 1471 AssertPtrReturn(pRamRange, VERR_INVALID_STATE); 1472 AssertReturn(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, VERR_INVALID_STATE); 1473 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE); 1474 AssertReturn(pRamRange->GCPhysLast == NIL_RTGCPHYS, VERR_INVALID_STATE); 1475 } 1476 1477 /* 1478 * Remove everything except the backing memory first. We work the ranges 1479 * in reverse so that we can reduce the max RAM range ID when possible. 1480 */ 1481 #ifdef IN_RING3 1482 uint8_t * const pbMmio2Backing = pVM->pgm.s.aMmio2Ranges[idxFirst].pbR3; 1483 RTGCPHYS const cbMmio2Backing = RT_ALIGN_T((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, HOST_PAGE_SIZE, RTGCPHYS); 1484 #endif 1485 1486 int rc = VINF_SUCCESS; 1487 uint32_t iChunk = cChunks; 1488 while (iChunk-- > 0) 1489 { 1490 uint32_t const idx = idxFirst + iChunk; 1491 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx]; 1492 1493 /* Zap the MMIO2 region data. */ 1494 pVM->pgm.s.apMmio2RamRanges[idx] = NIL_RTR3PTR; 1495 #ifdef IN_RING0 1496 pVM->pgmr0.s.apMmio2RamRanges[idx] = NULL; 1497 pVM->pgmr0.s.acMmio2RangePages[idx] = 0; 1498 #endif 1499 pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 = NIL_RTR3PTR; 1500 pVM->pgm.s.aMmio2Ranges[idx].pbR3 = NIL_RTR3PTR; 1501 pVM->pgm.s.aMmio2Ranges[idx].fFlags = 0; 1502 pVM->pgm.s.aMmio2Ranges[idx].iSubDev = UINT8_MAX; 1503 pVM->pgm.s.aMmio2Ranges[idx].iRegion = UINT8_MAX; 1504 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX; 1505 pVM->pgm.s.aMmio2Ranges[idx].idMmio2 = UINT8_MAX; 1506 pVM->pgm.s.aMmio2Ranges[idx].idRamRange = UINT16_MAX; 1507 pVM->pgm.s.aMmio2Ranges[idx].GCPhys = NIL_RTGCPHYS; 1508 pVM->pgm.s.aMmio2Ranges[idx].cbReal = 0; 1509 pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 = NIL_RTR3PTR; 1510 pVM->pgm.s.aMmio2Ranges[idx].paLSPages = NIL_RTR3PTR; 1511 1512 /* Free the RAM range. */ 1513 int rc2 = pgmPhysRamRangeFree(pVM, pRamRange); 1514 AssertLogRelMsgStmt(RT_SUCCESS(rc2), ("rc=%Rrc idx=%u chunk=%u/%u\n", rc, idx, iChunk + 1, cChunks), 1515 rc = RT_SUCCESS(rc) ? rc2 : rc); 1516 } 1517 1518 /* 1519 * Final removal frees up the backing memory. 1520 */ 1521 #ifdef IN_RING3 1522 int const rcBacking = SUPR3PageFree(pbMmio2Backing, cbMmio2Backing >> HOST_PAGE_SHIFT); 1523 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking), ("rc=%Rrc %p LB %#zx\n", rcBacking, pbMmio2Backing, cbMmio2Backing), 1524 rc = RT_SUCCESS(rc) ? rcBacking : rc); 1525 #else 1526 int rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], true /*fFreeMappings*/); 1527 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking), 1528 ("rc=%Rrc ahMmio2MapObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], idxFirst), 1529 rc = RT_SUCCESS(rc) ? rcBacking : rc); 1530 if (RT_SUCCESS(rcBacking)) 1531 { 1532 pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] = NIL_RTR0MEMOBJ; 1533 1534 rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], true /*fFreeMappings*/); 1535 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking), 1536 ("rc=%Rrc ahMmio2MemObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], idxFirst), 1537 rc = RT_SUCCESS(rc) ? rcBacking : rc); 1538 if (RT_SUCCESS(rcBacking)) 1539 pVM->pgmr0.s.ahMmio2MemObjs[idxFirst] = NIL_RTR0MEMOBJ; 1540 } 1541 #endif 1542 1543 /* 1544 * Decrease the MMIO2 count if these were the last ones. 1545 */ 1546 if (idxFirst + cChunks == pVM->pgm.s.cMmio2Ranges) 1547 pVM->pgm.s.cMmio2Ranges = idxFirst; 1548 1549 /* 1550 * Update page count stats. 1551 */ 1552 pVM->pgm.s.cAllPages -= cGuestPages; 1553 pVM->pgm.s.cPrivatePages -= cGuestPages; 1554 1555 return rc; 1556 } 1557 1558 1559 #ifdef IN_RING0 1560 /** 1561 * This is called during VM state loading to deregister an obsolete MMIO2 range. 1562 * 1563 * This does everything except TLB flushing and releasing the access handler. 1564 * The ranges must be unmapped and wihtout preallocated access handlers. 1565 * 1566 * @returns VBox status code. 1567 * @param pGVM Pointer to the global VM structure. 1568 * @param pReq Where to get the parameters. 1569 * @thread EMT(0) 1570 */ 1571 VMMR0_INT_DECL(int) PGMR0PhysMmio2DeregisterReq(PGVM pGVM, PPGMPHYSMMIO2DEREGISTERREQ pReq) 1572 { 1573 /* 1574 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3 1575 * while we're here). 1576 */ 1577 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1578 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1579 1580 /** @todo better VM state guard, enmVMState is ring-3 writable. */ 1581 /* Only LOADING, as this is special purpose for removing an unwanted PCNet MMIO2 region. */ 1582 VMSTATE const enmState = pGVM->enmVMState; 1583 AssertMsgReturn(enmState == VMSTATE_LOADING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE); 1584 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT); 1585 1586 AssertMsgReturn( pReq->cChunks > 0 1587 && pReq->cChunks < PGM_MAX_MMIO2_RANGES, 1588 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks), 1589 VERR_INVALID_PARAMETER); 1590 1591 AssertMsgReturn( pReq->idMmio2 != 0 1592 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES 1593 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES, 1594 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks), 1595 VERR_INVALID_PARAMETER); 1596 1597 /* 1598 * Validate that the requested range is for exactly one MMIO2 registration. 1599 * 1600 * This is safe to do w/o the lock because registration and deregistration 1601 * is restricted to EMT0, and we're on EMT0 so can't race ourselves. 1602 */ 1603 1604 /* Check that the first entry is valid and has a memory object for the backing memory. */ 1605 uint32_t idx = pReq->idMmio2 - 1; 1606 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE); 1607 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ, VERR_INVALID_STATE); 1608 1609 /* Any additional regions must also have RAM ranges, but shall not have any backing memory. */ 1610 idx++; 1611 for (uint32_t iChunk = 1; iChunk < pReq->cChunks; iChunk++, idx++) 1612 { 1613 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE); 1614 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE); 1615 } 1616 1617 /* Check that the next entry is for a different region. */ 1618 AssertReturn( idx >= RT_ELEMENTS(pGVM->pgmr0.s.apMmio2RamRanges) 1619 || pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL 1620 || pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ, 1621 VERR_INVALID_PARAMETER); 1622 1623 /* 1624 * Make sure we're owning the PGM lock (caller should be) and call the 1625 * common worker code. 1626 */ 1627 int rc = PGM_LOCK(pGVM); 1628 AssertRCReturn(rc, rc); 1629 1630 rc = pgmPhysMmio2DeregisterWorker(pGVM, pReq->idMmio2, pReq->cChunks, pReq->pDevIns); 1631 1632 PGM_UNLOCK(pGVM); 1633 return rc; 1634 } 1635 #endif /* IN_RING0 */ 1636 1637 1638 1639 1640 /********************************************************************************************************************************* 1641 * ROM * 1642 *********************************************************************************************************************************/ 1643 1644 1645 /** 1646 * Common worker for pgmR3PhysRomRegisterLocked and 1647 * PGMR0PhysRomAllocateRangeReq. 1648 */ 1649 DECLHIDDEN(int) pgmPhysRomRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint8_t idRomRange, uint32_t fFlags) 1650 { 1651 /* 1652 * Allocate the ROM range structure and map it into ring-3. 1653 */ 1654 size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cPages]), HOST_PAGE_SIZE); 1655 #ifdef IN_RING0 1656 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ; 1657 int rc = RTR0MemObjAllocPage(&hMemObj, cbRomRange, false /*fExecutable*/); 1658 #else 1659 PPGMROMRANGE pRomRange; 1660 int rc = SUPR3PageAlloc(cbRomRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRomRange); 1661 #endif 1662 if (RT_SUCCESS(rc)) 1663 { 1664 /* Zero the memory and do basic range init before mapping it into userland. */ 1665 #ifdef IN_RING0 1666 PPGMROMRANGE const pRomRange = (PPGMROMRANGE)RTR0MemObjAddress(hMemObj); 1667 if (!RTR0MemObjWasZeroInitialized(hMemObj)) 1668 #endif 1669 RT_BZERO(pRomRange, cbRomRange); 1670 1671 pRomRange->GCPhys = NIL_RTGCPHYS; 1672 pRomRange->GCPhysLast = NIL_RTGCPHYS; 1673 pRomRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT; 1674 pRomRange->fFlags = fFlags; 1675 pRomRange->idSavedState = UINT8_MAX; 1676 pRomRange->idRamRange = UINT16_MAX; 1677 pRomRange->cbOriginal = 0; 1678 pRomRange->pvOriginal = NIL_RTR3PTR; 1679 pRomRange->pszDesc = NIL_RTR3PTR; 1680 1681 #ifdef IN_RING0 1682 /* Map it into userland. */ 1683 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ; 1684 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, 1685 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 1686 if (RT_SUCCESS(rc)) 1687 #endif 1688 { 1689 /* 1690 * Grab the lock (unlikely to fail or block as caller typically owns it already). 1691 */ 1692 rc = PGM_LOCK(pVM); 1693 if (RT_SUCCESS(rc)) 1694 { 1695 /* 1696 * Check that idRomRange is still free. 1697 */ 1698 if (idRomRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges)) 1699 { 1700 #ifdef IN_RING0 1701 if (pVM->pgmr0.s.apRomRanges[idRomRange] == NULL) 1702 #endif 1703 { 1704 if ( pVM->pgm.s.apRomRanges[idRomRange] == NIL_RTR3PTR 1705 && pVM->pgm.s.cRomRanges == idRomRange) 1706 { 1707 /* 1708 * Commit it. 1709 */ 1710 #ifdef IN_RING0 1711 pVM->pgmr0.s.apRomRanges[idRomRange] = pRomRange; 1712 pVM->pgmr0.s.acRomRangePages[idRomRange] = cPages; 1713 pVM->pgmr0.s.ahRomRangeMemObjs[idRomRange] = hMemObj; 1714 pVM->pgmr0.s.ahRomRangeMapObjs[idRomRange] = hMapObj; 1715 #endif 1716 1717 pVM->pgm.s.cRomRanges = idRomRange + 1; 1718 #ifdef IN_RING0 1719 pVM->pgm.s.apRomRanges[idRomRange] = RTR0MemObjAddressR3(hMapObj); 1720 #else 1721 pVM->pgm.s.apRomRanges[idRomRange] = pRomRange; 1722 #endif 1723 1724 PGM_UNLOCK(pVM); 1725 return VINF_SUCCESS; 1726 } 1727 } 1728 1729 /* 1730 * Bail out. 1731 */ 1732 rc = VERR_INTERNAL_ERROR_5; 1733 } 1734 else 1735 rc = VERR_PGM_TOO_MANY_ROM_RANGES; 1736 PGM_UNLOCK(pVM); 1737 } 1738 #ifdef IN_RING0 1739 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/); 1740 #endif 1741 } 1742 #ifdef IN_RING0 1743 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/); 1744 #else 1745 SUPR3PageFree(pRomRange, cbRomRange >> HOST_PAGE_SHIFT); 1746 #endif 1747 } 1748 return rc; 1749 } 1750 1751 1752 #ifdef IN_RING0 1753 /** 1754 * This is called during VM initialization to allocate a ROM range. 1755 * 1756 * The page array is zeroed, the rest is initialized as best we can based on the 1757 * information in @a pReq. 1758 * 1759 * @returns VBox status code. 1760 * @param pGVM Pointer to the global VM structure. 1761 * @param pReq Where to get the parameters and return the range ID. 1762 * @thread EMT(0) 1763 */ 1764 VMMR0_INT_DECL(int) PGMR0PhysRomAllocateRangeReq(PGVM pGVM, PPGMPHYSROMALLOCATERANGEREQ pReq) 1765 { 1766 /* 1767 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3 1768 * while we're here). 1769 */ 1770 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1771 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1772 1773 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG); 1774 1775 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE); 1776 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_ROM_RANGE, VERR_OUT_OF_RANGE); 1777 1778 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGMPHYS_ROM_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags), 1779 VERR_INVALID_FLAGS); 1780 1781 AssertReturn(pReq->idRomRange < RT_ELEMENTS(pGVM->pgmr0.s.apRomRanges), VERR_OUT_OF_RANGE); 1782 AssertReturn(pReq->idRomRange == pGVM->pgm.s.cRomRanges, VERR_OUT_OF_RANGE); 1783 1784 /** @todo better VM state guard, enmVMState is ring-3 writable. */ 1785 VMSTATE const enmState = pGVM->enmVMState; 1786 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE); 1787 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT); 1788 1789 /* 1790 * Call common worker. 1791 */ 1792 return pgmPhysRomRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->idRomRange, pReq->fFlags); 1793 } 1794 #endif /* IN_RING0 */ 1795 1796 1797 /********************************************************************************************************************************* 1798 * Other stuff 1799 *********************************************************************************************************************************/ 1800 649 1801 650 1802 … … 1403 2555 1404 2556 /* 1405 * Special cases: MMIO2 , ZEROand specially aliased MMIO pages.2557 * Special cases: MMIO2 and specially aliased MMIO pages. 1406 2558 */ 1407 2559 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2 1408 2560 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) 1409 2561 { 2562 *ppMap = NULL; 2563 1410 2564 /* Decode the page id to a page in a MMIO2 ram range. */ 1411 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));1412 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));1413 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s. CTX_SUFF(apMmio2Ranges)),2565 uint8_t const idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage)); 2566 uint32_t const iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage)); 2567 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), 1414 2568 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2, 1415 RT_ELEMENTS(pVM->pgm.s. CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,2569 RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), PGM_PAGE_GET_TYPE(pPage), GCPhys, 1416 2570 pPage->s.idPage, pPage->s.uStateY), 1417 2571 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1418 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];2572 PPGMREGMMIO2RANGE const pMmio2Range = &pVM->pgm.s.aMmio2Ranges[idMmio2 - 1]; 1419 2573 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1420 2574 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1421 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1422 *ppMap = NULL; 1423 # if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) 2575 #ifndef IN_RING0 2576 uint32_t const idRamRange = pMmio2Range->idRamRange; 2577 AssertLogRelReturn(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 2578 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 2579 AssertLogRelReturn(pRamRange, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 2580 AssertLogRelReturn(iPage < (pRamRange->cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 2581 *ppv = pMmio2Range->pbR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT); 2582 return VINF_SUCCESS; 2583 2584 #else /* IN_RING0 */ 2585 AssertLogRelReturn(iPage < pVM->pgmr0.s.acMmio2RangePages[idMmio2 - 1], VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 2586 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1424 2587 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv); 1425 # elif defined(IN_RING0)1426 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);1427 return VINF_SUCCESS;1428 2588 # else 1429 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT); 2589 AssertPtr(pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1]); 2590 *ppv = pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1] + ((uintptr_t)iPage << GUEST_PAGE_SHIFT); 1430 2591 return VINF_SUCCESS; 1431 2592 # endif 1432 } 1433 1434 # ifdef VBOX_WITH_PGM_NEM_MODE 2593 #endif 2594 } 2595 2596 #ifdef VBOX_WITH_PGM_NEM_MODE 1435 2597 if (pVM->pgm.s.fNemMode) 1436 2598 { 1437 # 2599 # ifdef IN_RING3 1438 2600 /* 1439 2601 * Find the corresponding RAM range and use that to locate the mapping address. … … 1445 2607 Assert(pPage == &pRam->aPages[idxPage]); 1446 2608 *ppMap = NULL; 1447 *ppv = (uint8_t *)pRam->p vR3 + (idxPage << GUEST_PAGE_SHIFT);2609 *ppv = (uint8_t *)pRam->pbR3 + (idxPage << GUEST_PAGE_SHIFT); 1448 2610 return VINF_SUCCESS; 1449 # 2611 # else 1450 2612 AssertFailedReturn(VERR_INTERNAL_ERROR_2); 1451 # endif1452 }1453 2613 # endif 2614 } 2615 #endif /* VBOX_WITH_PGM_NEM_MODE */ 1454 2616 1455 2617 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage); … … 1699 2861 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW 1700 2862 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM) 1701 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;2863 pTlbe->GCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1702 2864 else 1703 2865 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */ … … 2606 3768 */ 2607 3769 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 2608 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);2609 3770 for (;;) 2610 3771 { 3772 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 3773 2611 3774 /* Inside range or not? */ 2612 3775 if (pRam && GCPhys >= pRam->GCPhys) … … 2698 3861 } 2699 3862 2700 /* Advance range if necessary. */2701 while (pRam && GCPhys > pRam->GCPhysLast)2702 pRam = pRam->CTX_SUFF(pNext);2703 3863 } /* Ram range walk */ 2704 3864 … … 3013 4173 */ 3014 4174 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 3015 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);3016 4175 for (;;) 3017 4176 { 4177 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 4178 3018 4179 /* Inside range or not? */ 3019 4180 if (pRam && GCPhys >= pRam->GCPhys) … … 3095 4256 } 3096 4257 3097 /* Advance range if necessary. */3098 while (pRam && GCPhys > pRam->GCPhysLast)3099 pRam = pRam->CTX_SUFF(pNext);3100 4258 } /* Ram range walk */ 3101 4259 … … 4139 5297 */ 4140 5298 PGM_LOCK_VOID(pVM); 4141 int rc = VINF_SUCCESS; 4142 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext)) 4143 { 5299 int rc = VINF_SUCCESS; 5300 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 5301 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries && RT_SUCCESS(rc); idxLookup++) 5302 { 5303 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 5304 AssertContinue(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges)); 5305 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange]; 5306 AssertContinue(pRam); 5307 Assert(pRam->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup])); 5308 5309 #ifdef IN_RING0 5310 uint32_t const cPages = RT_MIN(pRam->cb >> X86_PAGE_SHIFT, pVM->pgmr0.s.acRamRangePages[idRamRange]); 5311 #else 4144 5312 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT; 5313 #endif 4145 5314 for (uint32_t iPage = 0; iPage < cPages; iPage++) 4146 5315 { -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r104032 r104840 4510 4510 */ 4511 4511 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches); 4512 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX); 4513 while (pRam) 4514 { 4512 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 4513 Assert(pVM->pgm.s.apRamRanges[0] == NULL); 4514 for (uint32_t idx = 1; idx <= idRamRangeMax; idx++) 4515 { 4516 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idx]; 4517 AssertContinue(pRam); 4515 4518 unsigned iPage = pRam->cb >> PAGE_SHIFT; 4516 4519 while (iPage-- > 0) 4517 {4518 4520 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 4519 4521 { … … 4527 4529 return; 4528 4530 } 4529 }4530 pRam = pRam->CTX_SUFF(pNext);4531 4531 } 4532 4532 … … 5795 5795 * Clear all the GCPhys links and rebuild the phys ext free list. 5796 5796 */ 5797 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 5798 pRam; 5799 pRam = pRam->CTX_SUFF(pNext)) 5800 { 5797 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 5798 Assert(pVM->pgm.s.apRamRanges[0] == NULL); 5799 for (uint32_t idx = 1; idx <= idRamRangeMax; idx++) 5800 { 5801 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idx]; 5802 AssertContinue(pRam); 5801 5803 unsigned iPage = pRam->cb >> PAGE_SHIFT; 5802 5804 while (iPage-- > 0) -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r100966 r104840 88 88 AssertCompile(sizeof(pGVM->pgmr0.s) <= sizeof(pGVM->pgmr0.padding)); 89 89 90 /* Set the RAM range memory handles to NIL. */ 91 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.acRamRangePages) == RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges)); 92 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahRamRangeMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges)); 93 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahRamRangeMapObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges)); 94 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahRamRangeMemObjs); i++) 95 { 96 pGVM->pgmr0.s.ahRamRangeMemObjs[i] = NIL_RTR0MEMOBJ; 97 pGVM->pgmr0.s.ahRamRangeMapObjs[i] = NIL_RTR0MEMOBJ; 98 } 99 Assert(pGVM->pgmr0.s.idRamRangeMax == 0); /* the structure is ZERO'ed */ 100 101 /* Set the MMIO2 range memory handles to NIL. */ 102 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahMmio2MemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apMmio2RamRanges)); 103 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahMmio2MapObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apMmio2RamRanges)); 104 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahMmio2MemObjs); i++) 105 { 106 pGVM->pgmr0.s.ahMmio2MemObjs[i] = NIL_RTR0MEMOBJ; 107 pGVM->pgmr0.s.ahMmio2MapObjs[i] = NIL_RTR0MEMOBJ; 108 } 109 110 /* Set the ROM range memory handles to NIL. */ 111 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahRomRangeMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apRomRanges)); 112 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahRomRangeMapObjs) == RT_ELEMENTS(pGVM->pgmr0.s.apRomRanges)); 113 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahRomRangeMemObjs); i++) 114 { 115 pGVM->pgmr0.s.ahRomRangeMemObjs[i] = NIL_RTR0MEMOBJ; 116 pGVM->pgmr0.s.ahRomRangeMapObjs[i] = NIL_RTR0MEMOBJ; 117 } 118 119 /* Set the physical handler related memory handles to NIL. */ 90 120 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs)); 91 121 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++) … … 278 308 AssertRC(rc); 279 309 pGVM->pgmr0.s.hPhysHandlerMemObj = NIL_RTR0MEMOBJ; 310 } 311 312 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahRomRangeMemObjs); i++) 313 { 314 if (pGVM->pgmr0.s.ahRomRangeMapObjs[i] != NIL_RTR0MEMOBJ) 315 { 316 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahRomRangeMapObjs[i], true /*fFreeMappings*/); 317 AssertRC(rc); 318 pGVM->pgmr0.s.ahRomRangeMapObjs[i] = NIL_RTR0MEMOBJ; 319 } 320 321 if (pGVM->pgmr0.s.ahRomRangeMemObjs[i] != NIL_RTR0MEMOBJ) 322 { 323 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahRomRangeMemObjs[i], true /*fFreeMappings*/); 324 AssertRC(rc); 325 pGVM->pgmr0.s.ahRomRangeMemObjs[i] = NIL_RTR0MEMOBJ; 326 } 327 } 328 329 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahMmio2MemObjs); i++) 330 { 331 if (pGVM->pgmr0.s.ahMmio2MapObjs[i] != NIL_RTR0MEMOBJ) 332 { 333 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahMmio2MapObjs[i], true /*fFreeMappings*/); 334 AssertRC(rc); 335 pGVM->pgmr0.s.ahMmio2MapObjs[i] = NIL_RTR0MEMOBJ; 336 } 337 338 if (pGVM->pgmr0.s.ahMmio2MemObjs[i] != NIL_RTR0MEMOBJ) 339 { 340 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahMmio2MemObjs[i], true /*fFreeMappings*/); 341 AssertRC(rc); 342 pGVM->pgmr0.s.ahMmio2MemObjs[i] = NIL_RTR0MEMOBJ; 343 } 344 } 345 346 uint32_t const cRangesMax = RT_MIN(pGVM->pgmr0.s.idRamRangeMax, RT_ELEMENTS(pGVM->pgmr0.s.ahRamRangeMemObjs) - 1U) + 1U; 347 for (uint32_t i = 0; i < cRangesMax; i++) 348 { 349 if (pGVM->pgmr0.s.ahRamRangeMapObjs[i] != NIL_RTR0MEMOBJ) 350 { 351 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahRamRangeMapObjs[i], true /*fFreeMappings*/); 352 AssertRC(rc); 353 pGVM->pgmr0.s.ahRamRangeMapObjs[i] = NIL_RTR0MEMOBJ; 354 } 355 356 if (pGVM->pgmr0.s.ahRamRangeMemObjs[i] != NIL_RTR0MEMOBJ) 357 { 358 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahRamRangeMemObjs[i], true /*fFreeMappings*/); 359 AssertRC(rc); 360 pGVM->pgmr0.s.ahRamRangeMemObjs[i] = NIL_RTR0MEMOBJ; 361 } 280 362 } 281 363 … … 710 792 * @param hMmio2 Handle to look up. 711 793 */ 712 DECLINLINE( PPGMREGMMIO2RANGE) pgmR0PhysMmio2Find(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)794 DECLINLINE(int32_t) pgmR0PhysMmio2ValidateHandle(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2) 713 795 { 714 796 /* … … 716 798 * ring-3 pointers and this probably will require some kind of refactoring anyway. 717 799 */ 718 if (hMmio2 <= RT_ELEMENTS(pGVM->pgm.s.apMmio2RangesR0) && hMmio2 != 0) 719 { 720 PPGMREGMMIO2RANGE pCur = pGVM->pgm.s.apMmio2RangesR0[hMmio2 - 1]; 721 if (pCur && pCur->pDevInsR3 == pDevIns->pDevInsForR3) 722 { 723 Assert(pCur->idMmio2 == hMmio2); 724 return pCur; 725 } 726 Assert(!pCur); 727 } 728 return NULL; 800 AssertReturn(hMmio2 <= RT_ELEMENTS(pGVM->pgm.s.aMmio2Ranges) && hMmio2 != 0, VERR_INVALID_HANDLE); 801 uint32_t const idx = hMmio2 - 1U; 802 AssertReturn(pGVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns->pDevInsForR3, VERR_NOT_OWNER); 803 AssertReturn(pGVM->pgm.s.aMmio2Ranges[idx].idMmio2 == hMmio2, VERR_INVALID_HANDLE); 804 AssertReturn(pGVM->pgmr0.s.ahMmio2MapObjs[idx] != NIL_RTR0MEMOBJ, VERR_INVALID_HANDLE); 805 AssertReturn(pGVM->pgmr0.s.acMmio2RangePages[idx] != 0, VERR_INVALID_HANDLE); 806 return idx; 729 807 } 730 808 … … 744 822 size_t offSub, size_t cbSub, void **ppvMapping) 745 823 { 824 *ppvMapping = NULL; 746 825 AssertReturn(!(offSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT); 747 826 AssertReturn(!(cbSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT); 748 827 749 828 /* 750 * Translate hRegion into a range pointer. 751 */ 752 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR0PhysMmio2Find(pGVM, pDevIns, hMmio2); 753 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND); 829 * Validate and translate hMmio2 into an MMIO2 index. 830 */ 831 uint32_t const idxFirst = pgmR0PhysMmio2ValidateHandle(pGVM, pDevIns, hMmio2); 832 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 833 754 834 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 755 uint8_t * const pvR0 = (uint8_t *)pFirstRegMmio->pvR0;835 uint8_t * const pbR0 = pGVM->pgmr0.s.apbMmio2Backing[idxFirst]; 756 836 #else 757 RTR 3PTR const pvR3 = pFirstRegMmio->pvR3;837 RTR0MEMOBJ const hMemObj = pGVM->pgmr0.s.ahMmio2MemObjs[idxFirst]; 758 838 #endif 759 RTGCPHYS const cbReal = pFirstRegMmio->cbReal; 760 pFirstRegMmio = NULL; 839 RTGCPHYS const cbReal = (RTGCPHYS)pGVM->pgmr0.s.acMmio2RangePages[idxFirst] << GUEST_PAGE_SHIFT; 761 840 ASMCompilerBarrier(); 762 841 … … 767 846 AssertReturn(cbSub < cbReal && cbSub + offSub <= cbReal, VERR_OUT_OF_RANGE); 768 847 769 /*770 * Do the mapping.771 */772 848 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 773 AssertPtr(pvR0); 774 *ppvMapping = pvR0 + offSub; 849 /* 850 * Just return the address of the existing ring-0 mapping. 851 */ 852 AssertPtrReturn(pbR0, VERR_INTERNAL_ERROR_4); 853 *ppvMapping = &pbR0[offSub]; 775 854 return VINF_SUCCESS; 776 855 #else 777 return SUPR0PageMapKernel(pGVM->pSession, pvR3, (uint32_t)offSub, (uint32_t)cbSub, 0 /*fFlags*/, ppvMapping); 856 /* 857 * Call IPRT to do the mapping. Cleanup is done indirectly by telling 858 * RTR0MemObjFree to include mappings. It can only be done once, so no 859 * risk of excessive mapping leaks. 860 */ 861 RTR0MEMOBJ hMapObj; 862 int rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub); 863 if (RT_SUCCESS(rc)) 864 *ppvMapping = RTR0MemObjAddress(hMapObj); 865 return rc; 778 866 #endif 779 867 } … … 1058 1146 1059 1147 #ifdef VBOX_WITH_PCI_PASSTHROUGH 1148 # error fixme 1060 1149 if (pGVM->pgm.s.fPciPassthrough) 1061 1150 { -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r98103 r104840 1920 1920 break; 1921 1921 1922 case VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE: 1923 if (idCpu != 0 || u64Arg) 1924 return VERR_INVALID_PARAMETER; 1925 rc = PGMR0PhysAllocateRamRangeReq(pGVM, (PPGMPHYSALLOCATERAMRANGEREQ)pReqHdr); 1926 break; 1927 1928 case VMMR0_DO_PGM_PHYS_MMIO2_REGISTER: 1929 if (idCpu != 0 || u64Arg) 1930 return VERR_INVALID_PARAMETER; 1931 rc = PGMR0PhysMmio2RegisterReq(pGVM, (PPGMPHYSMMIO2REGISTERREQ)pReqHdr); 1932 break; 1933 1934 case VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER: 1935 if (idCpu != 0 || u64Arg) 1936 return VERR_INVALID_PARAMETER; 1937 rc = PGMR0PhysMmio2DeregisterReq(pGVM, (PPGMPHYSMMIO2DEREGISTERREQ)pReqHdr); 1938 break; 1939 1940 case VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE: 1941 if (idCpu != 0 || u64Arg) 1942 return VERR_INVALID_PARAMETER; 1943 rc = PGMR0PhysRomAllocateRangeReq(pGVM, (PPGMPHYSROMALLOCATERANGEREQ)pReqHdr); 1944 break; 1945 1922 1946 /* 1923 1947 * GMM wrappers. -
trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp
r104767 r104840 275 275 AssertPtrReturn(phRegion, VERR_INVALID_POINTER); 276 276 *phRegion = UINT32_MAX; 277 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 277 PVMCPU const pVCpu = VMMGetCpu(pVM); 278 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 278 279 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); 279 280 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER); … … 324 325 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1); 325 326 } 327 328 /* 329 * Create a matching ad-hoc RAM range for this MMIO region. 330 */ 331 uint16_t idRamRange = 0; 332 int rc = PGMR3PhysMmioRegister(pVM, pVCpu, cbRegion, pszDesc, &idRamRange); 333 AssertRCReturn(rc, rc); 326 334 327 335 /* … … 341 349 pVM->iom.s.paMmioRegs[idx].fMapped = false; 342 350 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags; 351 pVM->iom.s.paMmioRegs[idx].idRamRange = idRamRange; 343 352 pVM->iom.s.paMmioRegs[idx].idxSelf = idx; 344 353 … … 409 418 /* Register with PGM before we shuffle the array: */ 410 419 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys); 411 rc = PGMR3PhysMmio Register(pVM, pVCpu, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,412 hRegion, pRegEntry->pszDesc);420 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange, 421 pVM->iom.s.hNewMmioHandlerType, hRegion); 413 422 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc); 414 423 … … 428 437 /* Register with PGM before we shuffle the array: */ 429 438 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys); 430 rc = PGMR3PhysMmio Register(pVM, pVCpu, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,431 hRegion, pRegEntry->pszDesc);439 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange, 440 pVM->iom.s.hNewMmioHandlerType, hRegion); 432 441 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc); 433 442 … … 455 464 /* First entry in the lookup table: */ 456 465 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys); 457 rc = PGMR3PhysMmioRegister(pVM, pVCpu, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType, hRegion, pRegEntry->pszDesc); 466 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange, 467 pVM->iom.s.hNewMmioHandlerType, hRegion); 458 468 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc); 459 469 … … 572 582 pVM->iom.s.cMmioLookupEntries = cEntries - 1; 573 583 574 rc = PGMR3PhysMmio Deregister(pVM, pVCpu, GCPhys, pRegEntry->cbRegion);584 rc = PGMR3PhysMmioUnmap(pVM, pVCpu, GCPhys, pRegEntry->cbRegion, pRegEntry->idRamRange); 575 585 AssertRC(rc); 576 586 … … 622 632 { 623 633 RT_NOREF(pVM, pDevIns, hRegion, cbRegion); 634 AssertFailed(); 624 635 return VERR_NOT_IMPLEMENTED; 625 636 } -
trunk/src/VBox/VMM/VMMR3/NEMR3NativeTemplate-linux.cpp.h
r104725 r104840 927 927 * notification, unless we're replacing RAM). 928 928 */ 929 /** @todo r=bird: if it's overlapping RAM, we shouldn't need an additional 930 * registration, should we? */ 929 931 struct kvm_userspace_memory_region Region; 930 932 Region.slot = idSlot; -
trunk/src/VBox/VMM/VMMR3/PDM.cpp
r99576 r104840 857 857 pdmR3ThreadDestroyDevice(pVM, pDevIns); 858 858 PDMR3QueueDestroyDevice(pVM, pDevIns); 859 #if 0 859 860 PGMR3PhysMmio2Deregister(pVM, pDevIns, NIL_PGMMMIO2HANDLE); 861 #endif 860 862 #ifdef VBOX_WITH_PDM_ASYNC_COMPLETION 861 863 pdmR3AsyncCompletionTemplateDestroyDevice(pVM, pDevIns); -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r104767 r104840 289 289 AssertReturn(!pPciDev || pPciDev->Int.s.pDevInsR3 == pDevIns, VERR_INVALID_PARAMETER); 290 290 291 PVM pVM = pDevIns->Internal.s.pVMR3; 292 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 293 AssertMsgReturn( pVM->enmVMState == VMSTATE_CREATING 294 || pVM->enmVMState == VMSTATE_LOADING, 295 ("state %s, expected CREATING or LOADING\n", VMGetStateName(pVM->enmVMState)), VERR_VM_INVALID_VM_STATE); 291 PVM const pVM = pDevIns->Internal.s.pVMR3; 296 292 297 293 AssertReturn(!(iPciRegion & UINT16_MAX), VERR_INVALID_PARAMETER); /* not implemented. */ … … 314 310 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 315 311 LogFlow(("pdmR3DevHlp_Mmio2Destroy: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion)); 316 317 PVM pVM = pDevIns->Internal.s.pVMR3;318 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);319 AssertMsgReturn( pVM->enmVMState == VMSTATE_DESTROYING320 || pVM->enmVMState == VMSTATE_LOADING,321 ("state %s, expected DESTROYING or LOADING\n", VMGetStateName(pVM->enmVMState)), VERR_VM_INVALID_VM_STATE);322 312 323 313 int rc = PGMR3PhysMmio2Deregister(pDevIns->Internal.s.pVMR3, pDevIns, hRegion); -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r101001 r104840 1845 1845 1846 1846 /* 1847 * Ram ranges.1848 */1849 if (pVM->pgm.s.pRamRangesXR3)1850 pgmR3PhysRelinkRamRanges(pVM);1851 1852 /*1853 1847 * The page pool. 1854 1848 */ … … 2121 2115 2122 2116 /** 2123 * D ump registered MMIO ranges to the log.2117 * Display the RAM range info. 2124 2118 * 2125 2119 * @param pVM The cross context VM structure. … … 2136 2130 pVM, 2137 2131 sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range ", 2138 sizeof(RTHCPTR) * 2, "pvHC "); 2139 2140 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 2141 { 2132 sizeof(RTHCPTR) * 2, "pbR3 "); 2133 2134 /* 2135 * Traverse the lookup table so we only display mapped MMIO and get it in sorted order. 2136 */ 2137 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, 2138 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2139 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++) 2140 { 2141 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2142 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2143 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange]; 2144 if (pCur != NULL) { /*likely*/ } 2145 else continue; 2146 2142 2147 pHlp->pfnPrintf(pHlp, 2143 2148 "%RGp-%RGp %RHv %s\n", 2144 2149 pCur->GCPhys, 2145 2150 pCur->GCPhysLast, 2146 pCur->p vR3,2151 pCur->pbR3, 2147 2152 pCur->pszDesc); 2148 2153 if (fVerbose) … … 2182 2187 pszType = enmType == PGMPAGETYPE_ROM_SHADOW ? "ROM-shadowed" : "ROM"; 2183 2188 2184 RTGCPHYS const GCPhysFirstPg = iFirstPage * X86_PAGE_SIZE; 2185 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; 2186 while (pRom && GCPhysFirstPg > pRom->GCPhysLast) 2187 pRom = pRom->pNextR3; 2188 if (pRom && GCPhysFirstPg - pRom->GCPhys < pRom->cb) 2189 pszMore = pRom->pszDesc; 2189 RTGCPHYS const GCPhysFirstPg = iFirstPage << GUEST_PAGE_SHIFT; 2190 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 2191 for (uint32_t idxRom = 0; idxRom < cRomRanges; idxRom++) 2192 { 2193 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idxRom]; 2194 if ( pRomRange 2195 && GCPhysFirstPg < pRomRange->GCPhysLast 2196 && GCPhysFirstPg >= pRomRange->GCPhys) 2197 { 2198 pszMore = pRomRange->pszDesc; 2199 break; 2200 } 2201 } 2190 2202 break; 2191 2203 } … … 2540 2552 2541 2553 PGM_LOCK_VOID(pVM); 2542 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2543 pRam && pRam->GCPhys < GCPhysEnd && RT_SUCCESS(rc); 2544 pRam = pRam->pNextR3) 2545 { 2554 2555 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, 2556 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2557 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries && RT_SUCCESS(rc); idxLookup++) 2558 { 2559 if (PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]) >= GCPhysEnd) 2560 break; 2561 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2562 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2563 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2564 AssertContinue(pRam); 2565 Assert(pRam->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup])); 2566 2546 2567 /* fill the gap */ 2547 2568 if (pRam->GCPhys > GCPhys && fIncZeroPgs) -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r103359 r104840 204 204 { 205 205 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE); 206 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE); 206 PVM const pVM = pUVM->pVM; 207 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 207 208 208 209 /* … … 216 217 return VERR_INVALID_POINTER; 217 218 218 for (PPGMRAMRANGE pRam = pUVM->pVM->pgm.s.CTX_SUFF(pRamRangesX); 219 pRam; 220 pRam = pRam->CTX_SUFF(pNext)) 221 { 219 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 220 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 221 { 222 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 223 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 224 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 225 if (pRam != NULL) { /*likely*/ } 226 else continue; 227 222 228 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT; 223 229 while (iPage-- > 0) … … 830 836 */ 831 837 PGM_LOCK_VOID(pVM); 832 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 833 pRam; 834 pRam = pRam->CTX_SUFF(pNext)) 835 { 838 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 839 /** @todo binary search the start address. */ 840 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 841 { 842 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 843 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 844 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 845 if (pRam != NULL) { /*likely*/ } 846 else continue; 847 836 848 /* 837 849 * If the search range starts prior to the current ram range record, … … 3274 3286 LOG_PGM_MEMBER("RTbool", fPageFusionAllowed); 3275 3287 LOG_PGM_MEMBER("RTbool", fPciPassthrough); 3276 LOG_PGM_MEMBER("#x", cMmio2R egions);3288 LOG_PGM_MEMBER("#x", cMmio2Ranges); 3277 3289 LOG_PGM_MEMBER("RTbool", fRestoreRomPagesOnReset); 3278 3290 LOG_PGM_MEMBER("RTbool", fZeroRamPagesOnReset); -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r104767 r104840 138 138 * Copy loop on ram ranges. 139 139 */ 140 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);141 140 for (;;) 142 141 { 142 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 143 143 144 /* Inside range or not? */ 144 145 if (pRam && GCPhys >= pRam->GCPhys) … … 219 220 GCPhys += cb; 220 221 } 221 222 /* Advance range if necessary. */223 while (pRam && GCPhys > pRam->GCPhysLast)224 pRam = pRam->CTX_SUFF(pNext);225 222 } /* Ram range walk */ 226 223 … … 274 271 * Copy loop on ram ranges, stop when we hit something difficult. 275 272 */ 276 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);277 273 for (;;) 278 274 { 275 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 276 279 277 /* Inside range or not? */ 280 278 if (pRam && GCPhys >= pRam->GCPhys) … … 358 356 GCPhys += cb; 359 357 } 360 361 /* Advance range if necessary. */362 while (pRam && GCPhys > pRam->GCPhysLast)363 pRam = pRam->CTX_SUFF(pNext);364 358 } /* Ram range walk */ 365 359 … … 1004 998 *********************************************************************************************************************************/ 1005 999 1006 #define MAKE_LEAF(a_pNode) \ 1007 do { \ 1008 (a_pNode)->pLeftR3 = NIL_RTR3PTR; \ 1009 (a_pNode)->pRightR3 = NIL_RTR3PTR; \ 1010 (a_pNode)->pLeftR0 = NIL_RTR0PTR; \ 1011 (a_pNode)->pRightR0 = NIL_RTR0PTR; \ 1012 } while (0) 1013 1014 #define INSERT_LEFT(a_pParent, a_pNode) \ 1015 do { \ 1016 (a_pParent)->pLeftR3 = (a_pNode); \ 1017 (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \ 1018 } while (0) 1019 #define INSERT_RIGHT(a_pParent, a_pNode) \ 1020 do { \ 1021 (a_pParent)->pRightR3 = (a_pNode); \ 1022 (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \ 1023 } while (0) 1024 1025 1026 /** 1027 * Recursive tree builder. 1028 * 1029 * @param ppRam Pointer to the iterator variable. 1030 * @param iDepth The current depth. Inserts a leaf node if 0. 1031 */ 1032 static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth) 1033 { 1034 PPGMRAMRANGE pRam; 1035 if (iDepth <= 0) 1036 { 1037 /* 1038 * Leaf node. 1039 */ 1040 pRam = *ppRam; 1041 if (pRam) 1042 { 1043 *ppRam = pRam->pNextR3; 1044 MAKE_LEAF(pRam); 1045 } 1046 } 1000 /** 1001 * Given the range @a GCPhys thru @a GCPhysLast, find overlapping RAM range or 1002 * the correct insertion point. 1003 * 1004 * @returns Pointer to overlapping RAM range if found, NULL if not. 1005 * @param pVM The cross context VM structure. 1006 * @param GCPhys The address of the first byte in the range. 1007 * @param GCPhysLast The address of the last byte in the range. 1008 * @param pidxInsert Where to return the lookup table index to insert the 1009 * range at when returning NULL. Set to UINT32_MAX when 1010 * returning the pointer to an overlapping range. 1011 * @note Caller must own the PGM lock. 1012 */ 1013 static PPGMRAMRANGE pgmR3PhysRamRangeFindOverlapping(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint32_t *pidxInsert) 1014 { 1015 PGM_LOCK_ASSERT_OWNER(pVM); 1016 uint32_t iStart = 0; 1017 uint32_t iEnd = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1018 for (;;) 1019 { 1020 uint32_t idxLookup = iStart + (iEnd - iStart) / 2; 1021 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1022 if (GCPhysLast < GCPhysEntryFirst) 1023 { 1024 if (idxLookup > iStart) 1025 iEnd = idxLookup; 1026 else 1027 { 1028 *pidxInsert = idxLookup; 1029 return NULL; 1030 } 1031 } 1032 else 1033 { 1034 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast; 1035 if (GCPhys > GCPhysEntryLast) 1036 { 1037 idxLookup += 1; 1038 if (idxLookup < iEnd) 1039 iStart = idxLookup; 1040 else 1041 { 1042 *pidxInsert = idxLookup; 1043 return NULL; 1044 } 1045 } 1046 else 1047 { 1048 /* overlap */ 1049 Assert(GCPhysEntryLast > GCPhys && GCPhysEntryFirst < GCPhysLast); 1050 *pidxInsert = UINT32_MAX; 1051 return pVM->pgm.s.apRamRanges[PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup])]; 1052 } 1053 } 1054 } 1055 } 1056 1057 1058 /** 1059 * Given the range @a GCPhys thru @a GCPhysLast, find the lookup table entry 1060 * that's overlapping it. 1061 * 1062 * @returns The lookup table index of the overlapping entry, UINT32_MAX if not 1063 * found. 1064 * @param pVM The cross context VM structure. 1065 * @param GCPhys The address of the first byte in the range. 1066 * @param GCPhysLast The address of the last byte in the range. 1067 * @note Caller must own the PGM lock. 1068 */ 1069 static uint32_t pgmR3PhysRamRangeFindOverlappingIndex(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast) 1070 { 1071 PGM_LOCK_ASSERT_OWNER(pVM); 1072 uint32_t iStart = 0; 1073 uint32_t iEnd = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1074 for (;;) 1075 { 1076 uint32_t idxLookup = iStart + (iEnd - iStart) / 2; 1077 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1078 if (GCPhysLast < GCPhysEntryFirst) 1079 { 1080 if (idxLookup > iStart) 1081 iEnd = idxLookup; 1082 else 1083 return UINT32_MAX; 1084 } 1085 else 1086 { 1087 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast; 1088 if (GCPhys > GCPhysEntryLast) 1089 { 1090 idxLookup += 1; 1091 if (idxLookup < iEnd) 1092 iStart = idxLookup; 1093 else 1094 return UINT32_MAX; 1095 } 1096 else 1097 { 1098 /* overlap */ 1099 Assert(GCPhysEntryLast > GCPhys && GCPhysEntryFirst < GCPhysLast); 1100 return idxLookup; 1101 } 1102 } 1103 } 1104 } 1105 1106 1107 /** 1108 * Insert @a pRam into the lookup table. 1109 * 1110 * @returns VBox status code. 1111 * @param pVM The cross context VM structure. 1112 * @param pRam The RAM range to insert into the lookup table. 1113 * @param pidxLookup Optional lookup table hint. This is updated. 1114 * @note Caller must own PGM lock. 1115 */ 1116 static int pgmR3PhysRamRangeInsertLookup(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, uint32_t *pidxLookup) 1117 { 1118 PGM_LOCK_ASSERT_OWNER(pVM); 1119 #ifdef DEBUG_bird 1120 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, true /*fRamRelaxed*/); 1121 #endif 1122 AssertMsg(pRam->pszDesc, ("%RGp-%RGp\n", pRam->GCPhys, pRam->GCPhysLast)); 1123 AssertLogRelMsgReturn( pRam->GCPhys == NIL_RTGCPHYS 1124 && pRam->GCPhysLast == NIL_RTGCPHYS, 1125 ("GCPhys=%RGp; range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", 1126 GCPhys, pRam->GCPhys, pRam->cb, pRam->GCPhysLast, pRam->pszDesc), 1127 VERR_ALREADY_EXISTS); 1128 uint32_t const idRamRange = pRam->idRange; 1129 AssertReturn(pVM->pgm.s.apRamRanges[idRamRange] == pRam, VERR_INTERNAL_ERROR_2); 1130 1131 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3); 1132 RTGCPHYS const GCPhysLast = GCPhys + pRam->cb - 1U; 1133 AssertReturn(GCPhysLast > GCPhys, VERR_INTERNAL_ERROR_4); 1134 LogFlowFunc(("GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n", GCPhys, pRam->cb, GCPhysLast, idRamRange, pRam->pszDesc)); 1135 1136 /* 1137 * Find the lookup table location if necessary. 1138 */ 1139 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1140 AssertLogRelMsgReturn(cLookupEntries + 1 < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), /* id=0 is unused, so < is correct. */ 1141 ("%#x\n", cLookupEntries), VERR_INTERNAL_ERROR_3); 1142 1143 uint32_t idxLookup = pidxLookup ? *pidxLookup : UINT32_MAX; 1144 if (cLookupEntries == 0) 1145 idxLookup = 0; /* special case: empty table */ 1047 1146 else 1048 1147 { 1049 1050 /* 1051 * Intermediate node. 1052 */ 1053 PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1); 1054 1055 pRam = *ppRam; 1056 if (!pRam) 1057 return pLeft; 1058 *ppRam = pRam->pNextR3; 1059 MAKE_LEAF(pRam); 1060 INSERT_LEFT(pRam, pLeft); 1061 1062 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1); 1063 if (pRight) 1064 INSERT_RIGHT(pRam, pRight); 1065 } 1066 return pRam; 1067 } 1068 1069 1070 /** 1071 * Rebuilds the RAM range search trees. 1072 * 1148 if ( idxLookup > cLookupEntries 1149 || ( idxLookup != 0 1150 && pVM->pgm.s.aRamRangeLookup[idxLookup - 1].GCPhysLast >= GCPhys) 1151 || ( idxLookup < cLookupEntries 1152 && PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]) < GCPhysLast)) 1153 { 1154 PPGMRAMRANGE pOverlapping = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxLookup); 1155 AssertLogRelMsgReturn(!pOverlapping, 1156 ("GCPhys=%RGp; GCPhysLast=%RGp %s - overlaps %RGp...%RGp %s\n", 1157 GCPhys, GCPhysLast, pRam->pszDesc, 1158 pOverlapping->GCPhys, pOverlapping->GCPhysLast, pOverlapping->pszDesc), 1159 VERR_PGM_RAM_CONFLICT); 1160 AssertLogRelMsgReturn(idxLookup <= cLookupEntries, ("%#x vs %#x\n", idxLookup, cLookupEntries), VERR_INTERNAL_ERROR_5); 1161 } 1162 /* else we've got a good hint. */ 1163 } 1164 1165 /* 1166 * Do the actual job. 1167 * 1168 * The moving of existing table entries is done in a way that allows other 1169 * EMTs to perform concurrent lookups with the updating. 1170 */ 1171 bool const fUseAtomic = pVM->enmVMState != VMSTATE_CREATING 1172 && pVM->cCpus > 1 1173 #ifdef RT_ARCH_AMD64 1174 && g_CpumHostFeatures.s.fCmpXchg16b 1175 #endif 1176 ; 1177 1178 /* Signal that we're modifying the lookup table: */ 1179 uint32_t const idGeneration = (pVM->pgm.s.RamRangeUnion.idGeneration + 1) | 1; /* paranoia^3 */ 1180 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.idGeneration, idGeneration); 1181 1182 /* Update the RAM range entry. */ 1183 pRam->GCPhys = GCPhys; 1184 pRam->GCPhysLast = GCPhysLast; 1185 1186 /* Do we need to shift any lookup table entries? */ 1187 if (idxLookup != cLookupEntries) 1188 { 1189 /* We do. Make a copy of the final entry first. */ 1190 uint32_t cToMove = cLookupEntries - idxLookup; 1191 PGMRAMRANGELOOKUPENTRY *pCur = &pVM->pgm.s.aRamRangeLookup[cLookupEntries]; 1192 pCur->GCPhysFirstAndId = pCur[-1].GCPhysFirstAndId; 1193 pCur->GCPhysLast = pCur[-1].GCPhysLast; 1194 1195 /* Then increase the table size. This will ensure that anyone starting 1196 a search from here on should have consistent data. */ 1197 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.cLookupEntries, cLookupEntries + 1); 1198 1199 /* Transfer the rest of the entries. */ 1200 cToMove -= 1; 1201 if (cToMove > 0) 1202 { 1203 if (!fUseAtomic) 1204 do 1205 { 1206 pCur -= 1; 1207 pCur->GCPhysFirstAndId = pCur[-1].GCPhysFirstAndId; 1208 pCur->GCPhysLast = pCur[-1].GCPhysLast; 1209 cToMove -= 1; 1210 } while (cToMove > 0); 1211 else 1212 { 1213 #if RTASM_HAVE_WRITE_U128 >= 2 1214 do 1215 { 1216 pCur -= 1; 1217 ASMAtomicWriteU128U(&pCur->u128Volatile, pCur[-1].u128Normal); 1218 cToMove -= 1; 1219 } while (cToMove > 0); 1220 1221 #else 1222 uint64_t u64PrevLo = pCur[-1].u128Normal.s.Lo; 1223 uint64_t u64PrevHi = pCur[-1].u128Normal.s.Hi; 1224 do 1225 { 1226 pCur -= 1; 1227 uint64_t const u64CurLo = pCur[-1].u128Normal.s.Lo; 1228 uint64_t const u64CurHi = pCur[-1].u128Normal.s.Hi; 1229 uint128_t uOldIgn; 1230 AssertStmt(ASMAtomicCmpXchgU128v2(&pCur->u128Volatile.u, u64CurHi, u64CurLo, u64PrevHi, u64PrevLo, &uOldIgn), 1231 (pCur->u128Volatile.s.Lo = u64CurLo, pCur->u128Volatile.s.Hi = u64CurHi)); 1232 u64PrevLo = u64CurLo; 1233 u64PrevHi = u64CurHi; 1234 cToMove -= 1; 1235 } while (cToMove > 0); 1236 #endif 1237 } 1238 } 1239 } 1240 1241 /* 1242 * Write the new entry. 1243 */ 1244 PGMRAMRANGELOOKUPENTRY *pInsert = &pVM->pgm.s.aRamRangeLookup[idxLookup]; 1245 if (!fUseAtomic) 1246 { 1247 pInsert->GCPhysFirstAndId = idRamRange | GCPhys; 1248 pInsert->GCPhysLast = GCPhysLast; 1249 } 1250 else 1251 { 1252 PGMRAMRANGELOOKUPENTRY NewEntry; 1253 NewEntry.GCPhysFirstAndId = idRamRange | GCPhys; 1254 NewEntry.GCPhysLast = GCPhysLast; 1255 ASMAtomicWriteU128v2(&pInsert->u128Volatile.u, NewEntry.u128Normal.s.Hi, NewEntry.u128Normal.s.Lo); 1256 } 1257 1258 /* 1259 * Update the generation and count in one go, signaling the end of the updating. 1260 */ 1261 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT GenAndCount; 1262 GenAndCount.cLookupEntries = cLookupEntries + 1; 1263 GenAndCount.idGeneration = idGeneration + 1; 1264 ASMAtomicWriteU64(&pVM->pgm.s.RamRangeUnion.u64Combined, GenAndCount.u64Combined); 1265 1266 if (pidxLookup) 1267 *pidxLookup = idxLookup + 1; 1268 1269 #ifdef DEBUG_bird 1270 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 1271 #endif 1272 return VINF_SUCCESS; 1273 } 1274 1275 1276 /** 1277 * Removes @a pRam from the lookup table. 1278 * 1279 * @returns VBox status code. 1073 1280 * @param pVM The cross context VM structure. 1074 */ 1075 static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM) 1076 { 1077 1078 /* 1079 * Create the reasonably balanced tree in a sequential fashion. 1080 * For simplicity (laziness) we use standard recursion here. 1081 */ 1082 int iDepth = 0; 1083 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 1084 PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0); 1085 while (pRam) 1086 { 1087 PPGMRAMRANGE pLeft = pRoot; 1088 1089 pRoot = pRam; 1090 pRam = pRam->pNextR3; 1091 MAKE_LEAF(pRoot); 1092 INSERT_LEFT(pRoot, pLeft); 1093 1094 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth); 1095 if (pRight) 1096 INSERT_RIGHT(pRoot, pRight); 1097 /** @todo else: rotate the tree. */ 1098 1099 iDepth++; 1100 } 1101 1102 pVM->pgm.s.pRamRangeTreeR3 = pRoot; 1103 pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR; 1104 1105 #ifdef VBOX_STRICT 1106 /* 1107 * Verify that the above code works. 1108 */ 1109 unsigned cRanges = 0; 1110 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1111 cRanges++; 1112 Assert(cRanges > 0); 1113 1114 unsigned cMaxDepth = ASMBitLastSetU32(cRanges); 1115 if ((1U << cMaxDepth) < cRanges) 1116 cMaxDepth++; 1117 1118 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1119 { 1120 unsigned cDepth = 0; 1121 PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3; 1281 * @param pRam The RAM range to insert into the lookup table. 1282 * @param pidxLookup Optional lookup table hint. This is updated. 1283 * @note Caller must own PGM lock. 1284 */ 1285 static int pgmR3PhysRamRangeRemoveLookup(PVM pVM, PPGMRAMRANGE pRam, uint32_t *pidxLookup) 1286 { 1287 PGM_LOCK_ASSERT_OWNER(pVM); 1288 AssertMsg(pRam->pszDesc, ("%RGp-%RGp\n", pRam->GCPhys, pRam->GCPhysLast)); 1289 1290 RTGCPHYS const GCPhys = pRam->GCPhys; 1291 RTGCPHYS const GCPhysLast = pRam->GCPhysLast; 1292 AssertLogRelMsgReturn( GCPhys != NIL_RTGCPHYS 1293 || GCPhysLast != NIL_RTGCPHYS, 1294 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc), 1295 VERR_NOT_FOUND); 1296 AssertLogRelMsgReturn( GCPhys != NIL_RTGCPHYS 1297 && GCPhysLast == GCPhys + pRam->cb - 1U 1298 && (GCPhys & GUEST_PAGE_OFFSET_MASK) == 0 1299 && (GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK 1300 && GCPhysLast > GCPhys, 1301 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc), 1302 VERR_INTERNAL_ERROR_5); 1303 uint32_t const idRamRange = pRam->idRange; 1304 AssertReturn(pVM->pgm.s.apRamRanges[idRamRange] == pRam, VERR_INTERNAL_ERROR_4); 1305 LogFlowFunc(("GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n", GCPhys, pRam->cb, GCPhysLast, idRamRange, pRam->pszDesc)); 1306 1307 /* 1308 * Find the lookup table location. 1309 */ 1310 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1311 AssertLogRelMsgReturn( cLookupEntries > 0 1312 && cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), /* id=0 is unused, so < is correct. */ 1313 ("%#x\n", cLookupEntries), VERR_INTERNAL_ERROR_3); 1314 1315 uint32_t idxLookup = pidxLookup ? *pidxLookup : UINT32_MAX; 1316 if ( idxLookup >= cLookupEntries 1317 || pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast != GCPhysLast 1318 || pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysFirstAndId != (GCPhys | idRamRange)) 1319 { 1320 uint32_t iStart = 0; 1321 uint32_t iEnd = cLookupEntries; 1122 1322 for (;;) 1123 1323 { 1124 if (pRam == pRam2) 1125 break; 1126 Assert(pRam2); 1127 if (pRam->GCPhys < pRam2->GCPhys) 1128 pRam2 = pRam2->pLeftR3; 1324 idxLookup = iStart + (iEnd - iStart) / 2; 1325 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1326 if (GCPhysLast < GCPhysEntryFirst) 1327 { 1328 AssertLogRelMsgReturn(idxLookup > iStart, 1329 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", 1330 GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc), 1331 VERR_NOT_FOUND); 1332 iEnd = idxLookup; 1333 } 1129 1334 else 1130 pRam2 = pRam2->pRightR3; 1131 } 1132 AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth)); 1133 } 1134 #endif /* VBOX_STRICT */ 1135 } 1136 1137 #undef MAKE_LEAF 1138 #undef INSERT_LEFT 1139 #undef INSERT_RIGHT 1140 1141 /** 1142 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers. 1143 * 1144 * Called when anything was relocated. 1145 * 1146 * @param pVM The cross context VM structure. 1147 */ 1148 void pgmR3PhysRelinkRamRanges(PVM pVM) 1149 { 1150 PPGMRAMRANGE pCur; 1151 1152 #ifdef VBOX_STRICT 1153 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1154 { 1155 Assert((pCur->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0); 1156 Assert((pCur->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK); 1157 Assert((pCur->cb & GUEST_PAGE_OFFSET_MASK) == 0); 1158 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1); 1159 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3) 1160 Assert( pCur2 == pCur 1161 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */ 1162 } 1163 #endif 1164 1165 pCur = pVM->pgm.s.pRamRangesXR3; 1166 if (pCur) 1167 { 1168 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0; 1169 1170 for (; pCur->pNextR3; pCur = pCur->pNextR3) 1171 pCur->pNextR0 = pCur->pNextR3->pSelfR0; 1172 1173 Assert(pCur->pNextR0 == NIL_RTR0PTR); 1174 } 1175 else 1176 { 1177 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR); 1178 } 1179 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); 1180 1181 pgmR3PhysRebuildRamRangeSearchTrees(pVM); 1182 } 1183 1184 1185 /** 1186 * Links a new RAM range into the list. 1187 * 1188 * @param pVM The cross context VM structure. 1189 * @param pNew Pointer to the new list entry. 1190 * @param pPrev Pointer to the previous list entry. If NULL, insert as head. 1191 */ 1192 static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev) 1193 { 1194 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast)); 1195 1196 PGM_LOCK_VOID(pVM); 1197 1198 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3; 1199 pNew->pNextR3 = pRam; 1200 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR; 1201 1202 if (pPrev) 1203 { 1204 pPrev->pNextR3 = pNew; 1205 pPrev->pNextR0 = pNew->pSelfR0; 1206 } 1207 else 1208 { 1209 pVM->pgm.s.pRamRangesXR3 = pNew; 1210 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0; 1211 } 1212 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); 1213 1214 pgmR3PhysRebuildRamRangeSearchTrees(pVM); 1215 PGM_UNLOCK(pVM); 1216 } 1217 1218 1219 /** 1220 * Unlink an existing RAM range from the list. 1221 * 1222 * @param pVM The cross context VM structure. 1223 * @param pRam Pointer to the new list entry. 1224 * @param pPrev Pointer to the previous list entry. If NULL, insert as head. 1225 */ 1226 static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev) 1227 { 1228 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam); 1229 1230 PGM_LOCK_VOID(pVM); 1231 1232 PPGMRAMRANGE pNext = pRam->pNextR3; 1233 if (pPrev) 1234 { 1235 pPrev->pNextR3 = pNext; 1236 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR; 1237 } 1238 else 1239 { 1240 Assert(pVM->pgm.s.pRamRangesXR3 == pRam); 1241 pVM->pgm.s.pRamRangesXR3 = pNext; 1242 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR; 1243 } 1244 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); 1245 1246 pgmR3PhysRebuildRamRangeSearchTrees(pVM); 1247 PGM_UNLOCK(pVM); 1248 } 1249 1250 1251 /** 1252 * Unlink an existing RAM range from the list. 1253 * 1254 * @param pVM The cross context VM structure. 1255 * @param pRam Pointer to the new list entry. 1256 */ 1257 static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam) 1258 { 1259 PGM_LOCK_VOID(pVM); 1260 1261 /* find prev. */ 1262 PPGMRAMRANGE pPrev = NULL; 1263 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; 1264 while (pCur != pRam) 1265 { 1266 pPrev = pCur; 1267 pCur = pCur->pNextR3; 1268 } 1269 AssertFatal(pCur); 1270 1271 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev); 1272 PGM_UNLOCK(pVM); 1335 { 1336 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast; 1337 if (GCPhys > GCPhysEntryLast) 1338 { 1339 idxLookup += 1; 1340 AssertLogRelMsgReturn(idxLookup < iEnd, 1341 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", 1342 GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc), 1343 VERR_NOT_FOUND); 1344 iStart = idxLookup; 1345 } 1346 else 1347 { 1348 uint32_t const idEntry = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1349 AssertLogRelMsgReturn( GCPhysEntryFirst == GCPhys 1350 && GCPhysEntryLast == GCPhysLast 1351 && idEntry == idRamRange, 1352 ("Found: %RGp..%RGp id=%#x; Wanted: GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n", 1353 GCPhysEntryFirst, GCPhysEntryLast, idEntry, 1354 GCPhys, pRam->cb, GCPhysLast, pRam->idRange, pRam->pszDesc), 1355 VERR_NOT_FOUND); 1356 break; 1357 } 1358 } 1359 } 1360 } 1361 /* else we've got a good hint. */ 1362 1363 /* 1364 * Do the actual job. 1365 * 1366 * The moving of existing table entries is done in a way that allows other 1367 * EMTs to perform concurrent lookups with the updating. 1368 */ 1369 bool const fUseAtomic = pVM->enmVMState != VMSTATE_CREATING 1370 && pVM->cCpus > 1 1371 #ifdef RT_ARCH_AMD64 1372 && g_CpumHostFeatures.s.fCmpXchg16b 1373 #endif 1374 ; 1375 1376 /* Signal that we're modifying the lookup table: */ 1377 uint32_t const idGeneration = (pVM->pgm.s.RamRangeUnion.idGeneration + 1) | 1; /* paranoia^3 */ 1378 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.idGeneration, idGeneration); 1379 1380 /* Do we need to shift any lookup table entries? (This is a lot simpler 1381 than insertion.) */ 1382 if (idxLookup + 1U < cLookupEntries) 1383 { 1384 uint32_t cToMove = cLookupEntries - idxLookup - 1U; 1385 PGMRAMRANGELOOKUPENTRY *pCur = &pVM->pgm.s.aRamRangeLookup[idxLookup]; 1386 if (!fUseAtomic) 1387 do 1388 { 1389 pCur->GCPhysFirstAndId = pCur[1].GCPhysFirstAndId; 1390 pCur->GCPhysLast = pCur[1].GCPhysLast; 1391 pCur += 1; 1392 cToMove -= 1; 1393 } while (cToMove > 0); 1394 else 1395 { 1396 #if RTASM_HAVE_WRITE_U128 >= 2 1397 do 1398 { 1399 ASMAtomicWriteU128U(&pCur->u128Volatile, pCur[1].u128Normal); 1400 pCur += 1; 1401 cToMove -= 1; 1402 } while (cToMove > 0); 1403 1404 #else 1405 uint64_t u64PrevLo = pCur->u128Normal.s.Lo; 1406 uint64_t u64PrevHi = pCur->u128Normal.s.Hi; 1407 do 1408 { 1409 uint64_t const u64CurLo = pCur[1].u128Normal.s.Lo; 1410 uint64_t const u64CurHi = pCur[1].u128Normal.s.Hi; 1411 uint128_t uOldIgn; 1412 AssertStmt(ASMAtomicCmpXchgU128v2(&pCur->u128Volatile.u, u64CurHi, u64CurLo, u64PrevHi, u64PrevLo, &uOldIgn), 1413 (pCur->u128Volatile.s.Lo = u64CurLo, pCur->u128Volatile.s.Hi = u64CurHi)); 1414 u64PrevLo = u64CurLo; 1415 u64PrevHi = u64CurHi; 1416 pCur += 1; 1417 cToMove -= 1; 1418 } while (cToMove > 0); 1419 #endif 1420 } 1421 } 1422 1423 /* Update the RAM range entry to indicate that it is no longer mapped. */ 1424 pRam->GCPhys = NIL_RTGCPHYS; 1425 pRam->GCPhysLast = NIL_RTGCPHYS; 1426 1427 /* 1428 * Update the generation and count in one go, signaling the end of the updating. 1429 */ 1430 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT GenAndCount; 1431 GenAndCount.cLookupEntries = cLookupEntries - 1; 1432 GenAndCount.idGeneration = idGeneration + 1; 1433 ASMAtomicWriteU64(&pVM->pgm.s.RamRangeUnion.u64Combined, GenAndCount.u64Combined); 1434 1435 if (pidxLookup) 1436 *pidxLookup = idxLookup + 1; 1437 1438 return VINF_SUCCESS; 1273 1439 } 1274 1440 … … 1285 1451 1286 1452 PGM_LOCK_VOID(pVM); 1287 uint32_t cRamRanges = 0; 1288 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext)) 1289 cRamRanges++; 1453 uint32_t const cRamRanges = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 1290 1454 PGM_UNLOCK(pVM); 1291 1455 return cRamRanges; … … 1312 1476 1313 1477 PGM_LOCK_VOID(pVM); 1314 uint32_t iCurRange = 0; 1315 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++) 1316 if (iCurRange == iRange) 1317 { 1318 if (pGCPhysStart) 1319 *pGCPhysStart = pCur->GCPhys; 1320 if (pGCPhysLast) 1321 *pGCPhysLast = pCur->GCPhysLast; 1322 if (ppszDesc) 1323 *ppszDesc = pCur->pszDesc; 1324 if (pfIsMmio) 1325 *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO); 1326 1327 PGM_UNLOCK(pVM); 1328 return VINF_SUCCESS; 1329 } 1478 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 1479 if (iRange < cLookupEntries) 1480 { 1481 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[iRange]); 1482 Assert(idRamRange && idRamRange <= pVM->pgm.s.idRamRangeMax); 1483 PGMRAMRANGE const * const pRamRange = pVM->pgm.s.apRamRanges[idRamRange]; 1484 AssertPtr(pRamRange); 1485 1486 if (pGCPhysStart) 1487 *pGCPhysStart = pRamRange->GCPhys; 1488 if (pGCPhysLast) 1489 *pGCPhysLast = pRamRange->GCPhysLast; 1490 if (ppszDesc) 1491 *ppszDesc = pRamRange->pszDesc; 1492 if (pfIsMmio) 1493 *pfIsMmio = !!(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO); 1494 1495 PGM_UNLOCK(pVM); 1496 return VINF_SUCCESS; 1497 } 1330 1498 PGM_UNLOCK(pVM); 1331 1499 return VERR_OUT_OF_RANGE; … … 1516 1684 uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE; 1517 1685 int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, 1518 pRam->p vR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,1686 pRam->pbR3 ? pRam->pbR3 + GCPhys - pRam->GCPhys : NULL, 1519 1687 pvMmio2, &u2State, NULL /*puNemRange*/); 1520 1688 AssertLogRelRCReturn(rc, rc); … … 1593 1761 1594 1762 /** 1763 * Wrapper around VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE. 1764 */ 1765 static int pgmR3PhysAllocateRamRange(PVM pVM, PVMCPU pVCpu, uint32_t cGuestPages, uint32_t fFlags, PPGMRAMRANGE *ppRamRange) 1766 { 1767 int rc; 1768 PGMPHYSALLOCATERAMRANGEREQ AllocRangeReq; 1769 AllocRangeReq.idNewRange = UINT32_MAX / 4; 1770 if (SUPR3IsDriverless()) 1771 rc = pgmPhysRamRangeAllocCommon(pVM, cGuestPages, fFlags, &AllocRangeReq.idNewRange); 1772 else 1773 { 1774 AllocRangeReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 1775 AllocRangeReq.Hdr.cbReq = sizeof(AllocRangeReq); 1776 AllocRangeReq.cbGuestPage = GUEST_PAGE_SIZE; 1777 AllocRangeReq.cGuestPages = cGuestPages; 1778 AllocRangeReq.fFlags = fFlags; 1779 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE, 0 /*u64Arg*/, &AllocRangeReq.Hdr); 1780 } 1781 if (RT_SUCCESS(rc)) 1782 { 1783 Assert(AllocRangeReq.idNewRange != 0); 1784 Assert(AllocRangeReq.idNewRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 1785 AssertPtr(pVM->pgm.s.apRamRanges[AllocRangeReq.idNewRange]); 1786 *ppRamRange = pVM->pgm.s.apRamRanges[AllocRangeReq.idNewRange]; 1787 return VINF_SUCCESS; 1788 } 1789 1790 *ppRamRange = NULL; 1791 return rc; 1792 } 1793 1794 1795 /** 1595 1796 * PGMR3PhysRegisterRam worker that initializes and links a RAM range. 1596 1797 * … … 1603 1804 * @param GCPhys The address of the RAM range. 1604 1805 * @param GCPhysLast The last address of the RAM range. 1605 * @param R0PtrNew Ditto for R0.1606 * @param fFlags PGM_RAM_RANGE_FLAGS_FLOATING or zero.1607 1806 * @param pszDesc The description. 1608 * @param p Prev The previous RAM range (for linking).1807 * @param pidxLookup The lookup table insertion point. 1609 1808 */ 1610 1809 static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, 1611 RTR0PTR R0PtrNew, uint32_t fFlags, const char *pszDesc, PPGMRAMRANGE pPrev)1810 const char *pszDesc, uint32_t *pidxLookup) 1612 1811 { 1613 1812 /* 1614 1813 * Initialize the range. 1615 1814 */ 1616 pNew->pSelfR0 = R0PtrNew; 1617 pNew->GCPhys = GCPhys; 1618 pNew->GCPhysLast = GCPhysLast; 1619 pNew->cb = GCPhysLast - GCPhys + 1; 1815 Assert(pNew->cb == GCPhysLast - GCPhys + 1U); 1620 1816 pNew->pszDesc = pszDesc; 1621 pNew->fFlags = fFlags;1622 1817 pNew->uNemRange = UINT32_MAX; 1623 pNew->p vR3 = NULL;1818 pNew->pbR3 = NULL; 1624 1819 pNew->paLSPages = NULL; 1625 1820 … … 1641 1836 { 1642 1837 int rc = SUPR3PageAlloc(RT_ALIGN_Z(pNew->cb, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT, 1643 pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pNew->pvR3);1838 pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, (void **)&pNew->pbR3); 1644 1839 if (RT_FAILURE(rc)) 1645 1840 return rc; … … 1657 1852 1658 1853 /* 1659 * Link it. 1660 */ 1661 pgmR3PhysLinkRamRange(pVM, pNew, pPrev); 1854 * Insert it into the lookup table. 1855 */ 1856 int rc = pgmR3PhysRamRangeInsertLookup(pVM, pNew, GCPhys, pidxLookup); 1857 AssertRCReturn(rc, rc); 1662 1858 1663 1859 #ifdef VBOX_WITH_NATIVE_NEM 1664 1860 /* 1665 1861 * Notify NEM now that it has been linked. 1862 * 1863 * As above, it is assumed that on failure the VM creation will fail, so 1864 * no extra cleanup is needed here. 1666 1865 */ 1667 1866 if (VM_IS_NEM_ENABLED(pVM)) 1668 1867 { 1669 1868 uint8_t u2State = UINT8_MAX; 1670 int rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, pNew->cb, pNew->pvR3, &u2State, &pNew->uNemRange); 1671 if (RT_SUCCESS(rc)) 1672 { 1673 if (u2State != UINT8_MAX) 1674 pgmPhysSetNemStateForPages(&pNew->aPages[0], cPages, u2State); 1675 } 1676 else 1677 pgmR3PhysUnlinkRamRange2(pVM, pNew, pPrev); 1869 rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, pNew->cb, pNew->pbR3, &u2State, &pNew->uNemRange); 1870 if (RT_SUCCESS(rc) && u2State != UINT8_MAX) 1871 pgmPhysSetNemStateForPages(&pNew->aPages[0], cPages, u2State); 1678 1872 return rc; 1679 1873 } … … 1684 1878 1685 1879 /** 1686 * PGMR3PhysRegisterRam worker that registers a high chunk. 1687 * 1688 * @returns VBox status code. 1689 * @param pVM The cross context VM structure. 1690 * @param GCPhys The address of the RAM. 1691 * @param cRamPages The number of RAM pages to register. 1692 * @param iChunk The chunk number. 1693 * @param pszDesc The RAM range description. 1694 * @param ppPrev Previous RAM range pointer. In/Out. 1695 */ 1696 static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, uint32_t iChunk, 1697 const char *pszDesc, PPGMRAMRANGE *ppPrev) 1698 { 1699 const char *pszDescChunk = iChunk == 0 1700 ? pszDesc 1701 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1); 1702 AssertReturn(pszDescChunk, VERR_NO_MEMORY); 1703 1704 /* 1705 * Allocate memory for the new chunk. 1706 */ 1707 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cRamPages]), HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 1708 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages); 1709 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY); 1710 RTR0PTR R0PtrChunk = NIL_RTR0PTR; 1711 void *pvChunk = NULL; 1712 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages); 1713 if (RT_SUCCESS(rc)) 1714 { 1715 Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM)); 1716 memset(pvChunk, 0, cChunkPages << HOST_PAGE_SHIFT); 1717 1718 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk; 1880 * Worker for PGMR3PhysRegisterRam called with the PGM lock. 1881 * 1882 * The caller releases the lock. 1883 */ 1884 static int pgmR3PhysRegisterRamWorker(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc, 1885 uint32_t const cRamRanges, RTGCPHYS const GCPhysLast) 1886 { 1887 #ifdef VBOX_STRICT 1888 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 1889 #endif 1890 1891 /* 1892 * Check that we've got enough free RAM ranges. 1893 */ 1894 AssertLogRelMsgReturn((uint64_t)pVM->pgm.s.idRamRangeMax + cRamRanges + 1 <= RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), 1895 ("idRamRangeMax=%#RX32 vs GCPhys=%RGp cb=%RGp / %#RX32 ranges (%s)\n", 1896 pVM->pgm.s.idRamRangeMax, GCPhys, cb, cRamRanges, pszDesc), 1897 VERR_PGM_TOO_MANY_RAM_RANGES); 1898 1899 /* 1900 * Check for conflicts via the lookup table. We search it backwards, 1901 * assuming that memory is added in ascending order by address. 1902 */ 1903 uint32_t idxLookup = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1904 while (idxLookup) 1905 { 1906 if (GCPhys > pVM->pgm.s.aRamRangeLookup[idxLookup - 1].GCPhysLast) 1907 break; 1908 idxLookup--; 1909 RTGCPHYS const GCPhysCur = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1910 AssertLogRelMsgReturn( GCPhysLast < GCPhysCur 1911 || GCPhys > pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast, 1912 ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n", 1913 GCPhys, GCPhysLast, pszDesc, GCPhysCur, pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast, 1914 pVM->pgm.s.apRamRanges[PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup])]->pszDesc), 1915 VERR_PGM_RAM_CONFLICT); 1916 } 1917 1918 /* 1919 * Register it with GMM (the API bitches). 1920 */ 1921 const RTGCPHYS cPages = cb >> GUEST_PAGE_SHIFT; 1922 int rc = MMR3IncreaseBaseReservation(pVM, cPages); 1923 if (RT_FAILURE(rc)) 1924 return rc; 1925 1926 /* 1927 * Create the required chunks. 1928 */ 1929 RTGCPHYS cPagesLeft = cPages; 1930 RTGCPHYS GCPhysChunk = GCPhys; 1931 uint32_t idxChunk = 0; 1932 while (cPagesLeft > 0) 1933 { 1934 uint32_t cPagesInChunk = cPagesLeft; 1935 if (cPagesInChunk > PGM_MAX_PAGES_PER_RAM_RANGE) 1936 cPagesInChunk = PGM_MAX_PAGES_PER_RAM_RANGE; 1937 1938 const char *pszDescChunk = idxChunk == 0 1939 ? pszDesc 1940 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, idxChunk + 1); 1941 AssertReturn(pszDescChunk, VERR_NO_MEMORY); 1942 1943 /* 1944 * Allocate a RAM range. 1945 */ 1946 PPGMRAMRANGE pNew = NULL; 1947 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cPagesInChunk, 0 /*fFlags*/, &pNew); 1948 AssertLogRelMsgReturn(RT_SUCCESS(rc), 1949 ("pgmR3PhysAllocateRamRange failed: GCPhysChunk=%RGp cPagesInChunk=%#RX32 (%s): %Rrc\n", 1950 GCPhysChunk, cPagesInChunk, pszDescChunk, rc), 1951 rc); 1719 1952 1720 1953 /* 1721 1954 * Ok, init and link the range. 1722 1955 */ 1723 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << GUEST_PAGE_SHIFT) - 1, 1724 R0PtrChunk, PGM_RAM_RANGE_FLAGS_FLOATING, pszDescChunk, *ppPrev); 1725 if (RT_SUCCESS(rc)) 1726 *ppPrev = pNew; 1727 1728 if (RT_FAILURE(rc)) 1729 SUPR3PageFreeEx(pvChunk, cChunkPages); 1730 } 1731 1732 RTMemTmpFree(paChunkPages); 1956 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhysChunk, 1957 GCPhysChunk + ((RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT) - 1U, 1958 pszDescChunk, &idxLookup); 1959 AssertLogRelMsgReturn(RT_SUCCESS(rc), 1960 ("pgmR3PhysInitAndLinkRamRange failed: GCPhysChunk=%RGp cPagesInChunk=%#RX32 (%s): %Rrc\n", 1961 GCPhysChunk, cPagesInChunk, pszDescChunk, rc), 1962 rc); 1963 1964 /* advance */ 1965 GCPhysChunk += (RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT; 1966 cPagesLeft -= cPagesInChunk; 1967 idxChunk++; 1968 } 1969 1733 1970 return rc; 1734 1971 } … … 1738 1975 * Sets up a range RAM. 1739 1976 * 1740 * This will check for conflicting registrations, make a resource 1741 * reservation for the memory (with GMM), and setup the per-page1742 * tracking structures(PGMPAGE).1977 * This will check for conflicting registrations, make a resource reservation 1978 * for the memory (with GMM), and setup the per-page tracking structures 1979 * (PGMPAGE). 1743 1980 * 1744 1981 * @returns VBox status code. … … 1750 1987 VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc) 1751 1988 { 1752 /*1989 /* 1753 1990 * Validate input. 1754 1991 */ 1755 1992 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc)); 1756 1993 AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER); 1757 AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb,VERR_INVALID_PARAMETER);1994 AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER); 1758 1995 AssertReturn(cb > 0, VERR_INVALID_PARAMETER); 1759 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);1996 RTGCPHYS const GCPhysLast = GCPhys + (cb - 1); 1760 1997 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER); 1761 1998 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 1762 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 1999 PVMCPU const pVCpu = VMMGetCpu(pVM); 2000 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT); 2001 AssertReturn(pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 2002 2003 /* 2004 * Calculate the number of RAM ranges required. 2005 * See also pgmPhysMmio2CalcChunkCount. 2006 */ 2007 uint32_t const cPagesPerChunk = PGM_MAX_PAGES_PER_RAM_RANGE; 2008 uint32_t const cRamRanges = (uint32_t)(((cb >> GUEST_PAGE_SHIFT) + cPagesPerChunk - 1) / cPagesPerChunk); 2009 AssertLogRelMsgReturn(cRamRanges * (RTGCPHYS)cPagesPerChunk * GUEST_PAGE_SIZE >= cb, 2010 ("cb=%RGp cRamRanges=%#RX32 cPagesPerChunk=%#RX32\n", cb, cRamRanges, cPagesPerChunk), 2011 VERR_OUT_OF_RANGE); 1763 2012 1764 2013 PGM_LOCK_VOID(pVM); 1765 2014 1766 /* 1767 * Find range location and check for conflicts. 1768 */ 1769 PPGMRAMRANGE pPrev = NULL; 1770 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 1771 while (pRam && GCPhysLast >= pRam->GCPhys) 1772 { 1773 AssertLogRelMsgReturnStmt( GCPhysLast < pRam->GCPhys 1774 || GCPhys > pRam->GCPhysLast, 1775 ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n", 1776 GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 1777 PGM_UNLOCK(pVM), VERR_PGM_RAM_CONFLICT); 1778 1779 /* next */ 1780 pPrev = pRam; 1781 pRam = pRam->pNextR3; 1782 } 1783 1784 /* 1785 * Register it with GMM (the API bitches). 1786 */ 1787 const RTGCPHYS cPages = cb >> GUEST_PAGE_SHIFT; 1788 int rc = MMR3IncreaseBaseReservation(pVM, cPages); 1789 if (RT_FAILURE(rc)) 1790 { 1791 PGM_UNLOCK(pVM); 1792 return rc; 1793 } 1794 1795 if ( GCPhys >= _4G 1796 && cPages > 256) 1797 { 1798 /* 1799 * The PGMRAMRANGE structures for the high memory can get very big. 1800 * There used to be some limitations on SUPR3PageAllocEx allocation 1801 * sizes, so traditionally we limited this to 16MB chunks. These days 1802 * we do ~64 MB chunks each covering 16GB of guest RAM, making sure 1803 * each range is a multiple of 1GB to enable eager hosts to use 1GB 1804 * pages in NEM mode. 1805 * 1806 * See also pgmR3PhysMmio2CalcChunkCount. 1807 */ 1808 uint32_t const cPagesPerChunk = _4M; 1809 Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */ 1810 1811 RTGCPHYS cPagesLeft = cPages; 1812 RTGCPHYS GCPhysChunk = GCPhys; 1813 uint32_t iChunk = 0; 1814 while (cPagesLeft > 0) 1815 { 1816 uint32_t cPagesInChunk = cPagesLeft; 1817 if (cPagesInChunk > cPagesPerChunk) 1818 cPagesInChunk = cPagesPerChunk; 1819 1820 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, iChunk, pszDesc, &pPrev); 1821 AssertRCReturn(rc, rc); 1822 1823 /* advance */ 1824 GCPhysChunk += (RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT; 1825 cPagesLeft -= cPagesInChunk; 1826 iChunk++; 1827 } 1828 } 1829 else 1830 { 1831 /* 1832 * Allocate, initialize and link the new RAM range. 1833 */ 1834 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]); 1835 PPGMRAMRANGE pNew = NULL; 1836 RTR0PTR pNewR0 = NIL_RTR0PTR; 1837 rc = SUPR3PageAllocEx(RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT, 0 /*fFlags*/, 1838 (void **)&pNew, &pNewR0, NULL /*paPages*/); 1839 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc); 1840 1841 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, pNewR0, 0 /*fFlags*/, pszDesc, pPrev); 1842 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc); 1843 } 1844 pgmPhysInvalidatePageMapTLB(pVM); 2015 int rc = pgmR3PhysRegisterRamWorker(pVM, pVCpu, GCPhys, cb, pszDesc, cRamRanges, GCPhysLast); 2016 #ifdef VBOX_STRICT 2017 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 2018 #endif 1845 2019 1846 2020 PGM_UNLOCK(pVM); … … 1874 2048 uint64_t NanoTS = RTTimeNanoTS(); 1875 2049 PGM_LOCK_VOID(pVM); 1876 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1877 { 2050 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2051 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 2052 { 2053 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2054 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2055 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2056 AssertContinue(pRam); 2057 1878 2058 PPGMPAGE pPage = &pRam->aPages[0]; 1879 2059 RTGCPHYS GCPhys = pRam->GCPhys; … … 1936 2116 * Walk the ram ranges. 1937 2117 */ 1938 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1939 { 2118 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2119 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 2120 { 2121 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2122 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2123 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2124 AssertContinue(pRam); 2125 1940 2126 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT; 1941 2127 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, … … 2035 2221 * Walk the ram ranges. 2036 2222 */ 2037 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 2038 { 2223 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 2224 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 2225 { 2226 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2227 Assert(pRam || idRamRange == 0); 2228 if (!pRam) continue; 2229 Assert(pRam->idRange == idRamRange); 2230 2039 2231 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT; 2040 2232 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb)); … … 2146 2338 } /* for each page */ 2147 2339 } 2148 2149 2340 } 2150 2341 … … 2214 2405 * Walk the ram ranges. 2215 2406 */ 2216 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 2217 { 2407 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 2408 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 2409 { 2410 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2411 Assert(pRam || idRamRange == 0); 2412 if (!pRam) continue; 2413 Assert(pRam->idRange == idRamRange); 2414 2218 2415 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT; 2219 2416 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb)); … … 2267 2464 2268 2465 /** 2269 * This is the interface IOM is using to register an MMIO region. 2270 * 2271 * It will check for conflicts and ensure that a RAM range structure 2272 * is present before calling the PGMR3HandlerPhysicalRegister API to 2273 * register the callbacks. 2466 * This is the interface IOM is using to register an MMIO region (unmapped). 2467 * 2274 2468 * 2275 2469 * @returns VBox status code. … … 2277 2471 * @param pVM The cross context VM structure. 2278 2472 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2279 * @param GCPhys The start of the MMIO region.2280 2473 * @param cb The size of the MMIO region. 2281 * @param hType The physical access handler type registration.2282 * @param uUser The user argument.2283 2474 * @param pszDesc The description of the MMIO region. 2284 * @thread EMT(pVCpu) 2285 */ 2286 VMMR3_INT_DECL(int) PGMR3PhysMmioRegister(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType, 2287 uint64_t uUser, const char *pszDesc) 2288 { 2289 /* 2290 * Assert on some assumption. 2291 */ 2292 VMCPU_ASSERT_EMT(pVCpu); 2475 * @param pidRamRange Where to return the RAM range ID for the MMIO region 2476 * on success. 2477 * @thread EMT(0) 2478 */ 2479 VMMR3_INT_DECL(int) PGMR3PhysMmioRegister(PVM pVM, PVMCPU pVCpu, RTGCPHYS cb, const char *pszDesc, uint16_t *pidRamRange) 2480 { 2481 /* 2482 * Assert assumptions. 2483 */ 2484 AssertPtrReturn(pidRamRange, VERR_INVALID_POINTER); 2485 *pidRamRange = UINT16_MAX; 2486 AssertReturn(pVCpu == VMMGetCpu(pVM) && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 2487 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); 2488 /// @todo AssertReturn(!pVM->pgm.s.fRamRangesFrozen, VERR_WRONG_ORDER); 2489 AssertReturn(cb <= ((RTGCPHYS)PGM_MAX_PAGES_PER_RAM_RANGE << GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE); 2293 2490 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2294 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);2295 2491 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 2296 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER); 2297 #ifdef VBOX_STRICT 2298 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType); 2299 Assert(pType); 2300 Assert(pType->enmKind == PGMPHYSHANDLERKIND_MMIO); 2301 #endif 2302 2492 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER); 2493 2494 /* 2495 * Take the PGM lock and allocate an ad-hoc MMIO RAM range. 2496 */ 2303 2497 int rc = PGM_LOCK(pVM); 2304 2498 AssertRCReturn(rc, rc); 2305 2499 2306 /* 2307 * Make sure there's a RAM range structure for the region. 2308 */ 2309 RTGCPHYS GCPhysLast = GCPhys + (cb - 1); 2310 bool fRamExists = false; 2311 PPGMRAMRANGE pRamPrev = NULL; 2312 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2313 while (pRam && GCPhysLast >= pRam->GCPhys) 2314 { 2315 if ( GCPhysLast >= pRam->GCPhys 2316 && GCPhys <= pRam->GCPhysLast) 2317 { 2318 /* Simplification: all within the same range. */ 2319 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys 2320 && GCPhysLast <= pRam->GCPhysLast, 2321 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n", 2322 GCPhys, GCPhysLast, pszDesc, 2323 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 2324 PGM_UNLOCK(pVM), 2325 VERR_PGM_RAM_CONFLICT); 2326 2327 /* Check that it's all RAM or MMIO pages. */ 2328 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT]; 2329 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT; 2330 while (cLeft-- > 0) 2331 { 2332 AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 2333 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO, 2334 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n", 2335 GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc), 2336 PGM_UNLOCK(pVM), 2337 VERR_PGM_RAM_CONFLICT); 2338 pPage++; 2339 } 2340 2341 /* Looks good. */ 2342 fRamExists = true; 2343 break; 2344 } 2345 2346 /* next */ 2347 pRamPrev = pRam; 2348 pRam = pRam->pNextR3; 2349 } 2350 PPGMRAMRANGE pNew; 2351 if (fRamExists) 2352 { 2353 pNew = NULL; 2500 uint32_t const cPages = cb >> GUEST_PAGE_SHIFT; 2501 PPGMRAMRANGE pNew = NULL; 2502 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cPages, PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, &pNew); 2503 AssertLogRelMsg(RT_SUCCESS(rc), ("pgmR3PhysAllocateRamRange failed: cPages=%#RX32 (%s): %Rrc\n", cPages, pszDesc, rc)); 2504 if (RT_SUCCESS(rc)) 2505 { 2506 /* Initialize the range. */ 2507 pNew->pszDesc = pszDesc; 2508 pNew->uNemRange = UINT32_MAX; 2509 pNew->pbR3 = NULL; 2510 pNew->paLSPages = NULL; 2511 Assert(pNew->fFlags == PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO && pNew->cb == cb); 2512 2513 uint32_t iPage = cPages; 2514 while (iPage-- > 0) 2515 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO); 2516 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO); 2517 2518 /* update the page count stats. */ 2519 pVM->pgm.s.cPureMmioPages += cPages; 2520 pVM->pgm.s.cAllPages += cPages; 2521 2522 /* 2523 * Set the return value, release lock and return to IOM. 2524 */ 2525 *pidRamRange = pNew->idRange; 2526 } 2527 2528 PGM_UNLOCK(pVM); 2529 return rc; 2530 } 2531 2532 2533 /** 2534 * Worker for PGMR3PhysMmioMap that's called owning the lock. 2535 */ 2536 static int pgmR3PhysMmioMapLocked(PVM pVM, PVMCPU pVCpu, RTGCPHYS const GCPhys, RTGCPHYS const cb, RTGCPHYS const GCPhysLast, 2537 PPGMRAMRANGE const pMmioRamRange, PGMPHYSHANDLERTYPE const hType, uint64_t const uUser) 2538 { 2539 /* Check that the range isn't mapped already. */ 2540 AssertLogRelMsgReturn(pMmioRamRange->GCPhys == NIL_RTGCPHYS, 2541 ("desired %RGp mapping for '%s' - already mapped at %RGp!\n", 2542 GCPhys, pMmioRamRange->pszDesc, pMmioRamRange->GCPhys), 2543 VERR_ALREADY_EXISTS); 2544 2545 /* 2546 * Now, check if this falls into a regular RAM range or if we should use 2547 * the ad-hoc one (idRamRange). 2548 */ 2549 int rc; 2550 uint32_t idxInsert = UINT32_MAX; 2551 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert); 2552 if (pOverlappingRange) 2553 { 2554 /* Simplification: all within the same range. */ 2555 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys 2556 && GCPhysLast <= pOverlappingRange->GCPhysLast, 2557 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n", 2558 GCPhys, GCPhysLast, pMmioRamRange->pszDesc, 2559 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 2560 VERR_PGM_RAM_CONFLICT); 2561 2562 /* Check that is isn't an ad hoc range, but a real RAM range. */ 2563 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange), 2564 ("%RGp-%RGp (MMIO/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n", 2565 GCPhys, GCPhysLast, pMmioRamRange->pszDesc, 2566 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 2567 VERR_PGM_RAM_CONFLICT); 2568 2569 /* Check that it's all RAM or MMIO pages. */ 2570 PCPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT]; 2571 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT; 2572 while (cLeft-- > 0) 2573 { 2574 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 2575 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO, /** @todo MMIO type isn't right */ 2576 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n", 2577 GCPhys, GCPhysLast, pMmioRamRange->pszDesc, pOverlappingRange->GCPhys, 2578 PGM_PAGE_GET_TYPE(pPage), pOverlappingRange->pszDesc), 2579 VERR_PGM_RAM_CONFLICT); 2580 pPage++; 2581 } 2354 2582 2355 2583 /* … … 2358 2586 * for PCI memory, but we're doing the same thing for MMIO2 pages. 2359 2587 */ 2360 rc = pgmR3PhysFreePageRange(pVM, p Ram, GCPhys, GCPhysLast, NULL);2361 AssertRCReturn Stmt(rc, PGM_UNLOCK(pVM), rc);2588 rc = pgmR3PhysFreePageRange(pVM, pOverlappingRange, GCPhys, GCPhysLast, NULL); 2589 AssertRCReturn(rc, rc); 2362 2590 2363 2591 /* Force a PGM pool flush as guest ram references have been changed. */ … … 2371 2599 { 2372 2600 /* 2373 * No RAM range, insert an ad hoc one.2601 * No RAM range, use the ad hoc one (idRamRange). 2374 2602 * 2375 2603 * Note that we don't have to tell REM about this range because 2376 2604 * PGMHandlerPhysicalRegisterEx will do that for us. 2377 2605 */ 2378 Log(("PGMR3PhysMmioRegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc)); 2379 2380 /* Alloc. */ 2381 const uint32_t cPages = cb >> GUEST_PAGE_SHIFT; 2382 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]); 2383 const size_t cRangePages = RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 2384 RTR0PTR pNewR0 = NIL_RTR0PTR; 2385 rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pNew, &pNewR0, NULL /*paPages*/); 2386 AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), PGM_UNLOCK(pVM), rc); 2606 AssertLogRelReturn(idxInsert <= pVM->pgm.s.RamRangeUnion.cLookupEntries, VERR_INTERNAL_ERROR_4); 2607 Log(("PGMR3PhysMmioMap: Inserting ad hoc MMIO range #%x for %RGp-%RGp %s\n", 2608 pMmioRamRange->idRange, GCPhys, GCPhysLast, pMmioRamRange->pszDesc)); 2609 2610 Assert(PGM_PAGE_GET_TYPE(&pMmioRamRange->aPages[0]) == PGMPAGETYPE_MMIO); 2611 2612 /* We ASSUME that all the pages in the ad-hoc range are in the proper 2613 state and all that and that we don't need to re-initialize them here. */ 2387 2614 2388 2615 #ifdef VBOX_WITH_NATIVE_NEM 2389 2616 /* Notify NEM. */ 2390 uint8_t u2State = 0; /* (must have valid state as there can't be anything to preserve) */2391 2617 if (VM_IS_NEM_ENABLED(pVM)) 2392 2618 { 2393 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, cPages << GUEST_PAGE_SHIFT, 0 /*fFlags*/, NULL, NULL,2394 &u2State, &pNew->uNemRange);2395 AssertLogRelRCReturn Stmt(rc, SUPR3PageFreeEx(pNew, cRangePages), rc);2396 } 2397 #endif 2398 2399 /* Initialize the range. */2400 pNew->pSelfR0 = pNewR0;2401 pNew->GCPhys = GCPhys; 2402 pNew->GCPhysLast = GCPhysLast;2403 pNew->cb = cb;2404 pNew->pszDesc = pszDesc;2405 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;2406 pNew->pvR3 = NULL;2407 pNew->paLSPages = NULL;2408 2409 uint32_t iPage = cPages;2410 while (iPage-- > 0)2411 {2412 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);2619 uint8_t u2State = 0; /* (must have valid state as there can't be anything to preserve) */ 2620 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, cb, 0 /*fFlags*/, NULL, NULL, &u2State, &pMmioRamRange->uNemRange); 2621 AssertLogRelRCReturn(rc, rc); 2622 2623 uint32_t iPage = cb >> GUEST_PAGE_SHIFT; 2624 while (iPage-- > 0) 2625 PGM_PAGE_SET_NEM_STATE(&pMmioRamRange->aPages[iPage], u2State); 2626 } 2627 #endif 2628 /* Insert it into the lookup table (may in theory fail). */ 2629 rc = pgmR3PhysRamRangeInsertLookup(pVM, pMmioRamRange, GCPhys, &idxInsert); 2630 } 2631 if (RT_SUCCESS(rc)) 2632 { 2633 /* 2634 * Register the access handler. 2635 */ 2636 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, uUser, pMmioRamRange->pszDesc); 2637 if (RT_SUCCESS(rc)) 2638 { 2413 2639 #ifdef VBOX_WITH_NATIVE_NEM 2414 PGM_PAGE_SET_NEM_STATE(&pNew->aPages[iPage], u2State); 2415 #endif 2416 } 2417 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO); 2418 2419 /* update the page count stats. */ 2420 pVM->pgm.s.cPureMmioPages += cPages; 2421 pVM->pgm.s.cAllPages += cPages; 2422 2423 /* link it */ 2424 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev); 2425 } 2426 2427 /* 2428 * Register the access handler. 2429 */ 2430 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, uUser, pszDesc); 2640 /* Late NEM notification (currently not used by anyone). */ 2641 if (VM_IS_NEM_ENABLED(pVM)) 2642 { 2643 if (pOverlappingRange) 2644 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, cb, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 2645 pOverlappingRange->pbR3 + (uintptr_t)(GCPhys - pOverlappingRange->GCPhys), 2646 NULL /*pvMmio2*/, NULL /*puNemRange*/); 2647 else 2648 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, cb, 0 /*fFlags*/, NULL /*pvRam*/, NULL /*pvMmio2*/, 2649 &pMmioRamRange->uNemRange); 2650 AssertLogRelRC(rc); 2651 } 2652 if (RT_SUCCESS(rc)) 2653 #endif 2654 { 2655 pgmPhysInvalidatePageMapTLB(pVM); 2656 return VINF_SUCCESS; 2657 } 2658 2659 /* 2660 * Failed, so revert it all as best as we can (the memory content in 2661 * the overlapping case is gone). 2662 */ 2663 PGMHandlerPhysicalDeregister(pVM, GCPhys); 2664 } 2665 } 2666 2667 if (!pOverlappingRange) 2668 { 2669 #ifdef VBOX_WITH_NATIVE_NEM 2670 /* Notify NEM about the sudden removal of the RAM range we just told it about. */ 2671 NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, cb, 0 /*fFlags*/, NULL /*pvRam*/, NULL /*pvMmio2*/, 2672 NULL /*pu2State*/, &pMmioRamRange->uNemRange); 2673 #endif 2674 2675 /* Remove the ad hoc range from the lookup table. */ 2676 idxInsert -= 1; 2677 pgmR3PhysRamRangeRemoveLookup(pVM, pMmioRamRange, &idxInsert); 2678 } 2679 2680 pgmPhysInvalidatePageMapTLB(pVM); 2681 return rc; 2682 } 2683 2684 2685 /** 2686 * This is the interface IOM is using to map an MMIO region. 2687 * 2688 * It will check for conflicts and ensure that a RAM range structure 2689 * is present before calling the PGMR3HandlerPhysicalRegister API to 2690 * register the callbacks. 2691 * 2692 * @returns VBox status code. 2693 * 2694 * @param pVM The cross context VM structure. 2695 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2696 * @param GCPhys The start of the MMIO region. 2697 * @param cb The size of the MMIO region. 2698 * @param idRamRange The RAM range ID for the MMIO region as returned by 2699 * PGMR3PhysMmioRegister(). 2700 * @param hType The physical access handler type registration. 2701 * @param uUser The user argument. 2702 * @thread EMT(pVCpu) 2703 */ 2704 VMMR3_INT_DECL(int) PGMR3PhysMmioMap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange, 2705 PGMPHYSHANDLERTYPE hType, uint64_t uUser) 2706 { 2707 /* 2708 * Assert on some assumption. 2709 */ 2710 VMCPU_ASSERT_EMT(pVCpu); 2711 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2712 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2713 RTGCPHYS const GCPhysLast = GCPhys + cb - 1U; 2714 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 2715 #ifdef VBOX_STRICT 2716 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType); 2717 Assert(pType); 2718 Assert(pType->enmKind == PGMPHYSHANDLERKIND_MMIO); 2719 #endif 2720 AssertReturn(idRamRange <= pVM->pgm.s.idRamRangeMax && idRamRange > 0, VERR_INVALID_HANDLE); 2721 PPGMRAMRANGE const pMmioRamRange = pVM->pgm.s.apRamRanges[idRamRange]; 2722 AssertReturn(pMmioRamRange, VERR_INVALID_HANDLE); 2723 AssertReturn(pMmioRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, VERR_INVALID_HANDLE); 2724 AssertReturn(pMmioRamRange->cb == cb, VERR_OUT_OF_RANGE); 2725 2726 /* 2727 * Take the PGM lock and do the work. 2728 */ 2729 int rc = PGM_LOCK(pVM); 2730 AssertRCReturn(rc, rc); 2731 2732 rc = pgmR3PhysMmioMapLocked(pVM, pVCpu, GCPhys, cb, GCPhysLast, pMmioRamRange, hType, uUser); 2733 #ifdef VBOX_STRICT 2734 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 2735 #endif 2736 2737 PGM_UNLOCK(pVM); 2738 return rc; 2739 } 2740 2741 2742 /** 2743 * Worker for PGMR3PhysMmioUnmap that's called with the PGM lock held. 2744 */ 2745 static int pgmR3PhysMmioUnmapLocked(PVM pVM, PVMCPU pVCpu, RTGCPHYS const GCPhys, RTGCPHYS const cb, 2746 RTGCPHYS const GCPhysLast, PPGMRAMRANGE const pMmioRamRange) 2747 { 2748 /* 2749 * Lookup the RAM range containing the region to make sure it is actually mapped. 2750 */ 2751 uint32_t idxLookup = pgmR3PhysRamRangeFindOverlappingIndex(pVM, GCPhys, GCPhysLast); 2752 AssertLogRelMsgReturn(idxLookup < pVM->pgm.s.RamRangeUnion.cLookupEntries, 2753 ("MMIO range not found at %RGp LB %RGp! (%s)\n", GCPhys, cb, pMmioRamRange->pszDesc), 2754 VERR_NOT_FOUND); 2755 2756 uint32_t const idLookupRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2757 AssertLogRelReturn(idLookupRange != 0 && idLookupRange <= pVM->pgm.s.idRamRangeMax, VERR_INTERNAL_ERROR_5); 2758 PPGMRAMRANGE const pLookupRange = pVM->pgm.s.apRamRanges[idLookupRange]; 2759 AssertLogRelReturn(pLookupRange, VERR_INTERNAL_ERROR_4); 2760 2761 AssertLogRelMsgReturn(pLookupRange == pMmioRamRange || !PGM_RAM_RANGE_IS_AD_HOC(pLookupRange), 2762 ("MMIO unmap mixup at %RGp LB %RGp (%s) vs %RGp LB %RGp (%s)\n", 2763 GCPhys, cb, pMmioRamRange->pszDesc, pLookupRange->GCPhys, pLookupRange->cb, pLookupRange->pszDesc), 2764 VERR_NOT_FOUND); 2765 2766 /* 2767 * Deregister the handler. This should reset any aliases, so an ad hoc 2768 * range will only contain MMIO type pages afterwards. 2769 */ 2770 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys); 2431 2771 if (RT_SUCCESS(rc)) 2432 2772 { 2773 if (pLookupRange != pMmioRamRange) 2774 { 2775 /* 2776 * Turn the pages back into RAM pages. 2777 */ 2778 Log(("pgmR3PhysMmioUnmapLocked: Reverting MMIO range %RGp-%RGp (%s) in %RGp-%RGp (%s) to RAM.\n", 2779 GCPhys, GCPhysLast, pMmioRamRange->pszDesc, 2780 pLookupRange->GCPhys, pLookupRange->GCPhysLast, pLookupRange->pszDesc)); 2781 2782 RTGCPHYS const offRange = GCPhys - pLookupRange->GCPhys; 2783 uint32_t iPage = offRange >> GUEST_PAGE_SHIFT; 2784 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT; 2785 while (cLeft--) 2786 { 2787 PPGMPAGE pPage = &pLookupRange->aPages[iPage]; 2788 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage)) 2789 //|| PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO 2790 //|| PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO 2791 , ("%RGp %R[pgmpage]\n", pLookupRange->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage)); 2792 /** @todo this isn't entirely correct, is it now... aliases must be converted 2793 * to zero pages as they won't be. however, shouldn't 2794 * PGMHandlerPhysicalDeregister deal with this already? */ 2795 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)) 2796 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM); 2797 iPage++; 2798 } 2799 2433 2800 #ifdef VBOX_WITH_NATIVE_NEM 2434 /* Late NEM notification. */ 2435 if (VM_IS_NEM_ENABLED(pVM)) 2436 { 2437 uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0); 2438 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, 2439 fRamExists ? (uint8_t *)pRam->pvR3 + (uintptr_t)(GCPhys - pRam->GCPhys) : NULL, 2440 NULL, !fRamExists ? &pRam->uNemRange : NULL); 2441 AssertLogRelRCReturn(rc, rc); 2442 } 2443 #endif 2444 } 2445 /** @todo the phys handler failure handling isn't complete, esp. wrt NEM. */ 2446 else if (!fRamExists) 2447 { 2448 pVM->pgm.s.cPureMmioPages -= cb >> GUEST_PAGE_SHIFT; 2449 pVM->pgm.s.cAllPages -= cb >> GUEST_PAGE_SHIFT; 2450 2451 /* remove the ad hoc range. */ 2452 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev); 2453 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS; 2454 SUPR3PageFreeEx(pRam, RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cb >> GUEST_PAGE_SHIFT]), 2455 HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT); 2456 } 2801 /* Notify REM (failure will probably leave things in a non-working state). */ 2802 if (VM_IS_NEM_ENABLED(pVM)) 2803 { 2804 uint8_t u2State = UINT8_MAX; 2805 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 2806 pLookupRange->pbR3 ? pLookupRange->pbR3 + GCPhys - pLookupRange->GCPhys : NULL, 2807 NULL, &u2State, &pLookupRange->uNemRange); 2808 AssertLogRelRC(rc); 2809 /** @todo status code propagation here... This is likely fatal, right? */ 2810 if (u2State != UINT8_MAX) 2811 pgmPhysSetNemStateForPages(&pLookupRange->aPages[(GCPhys - pLookupRange->GCPhys) >> GUEST_PAGE_SHIFT], 2812 cb >> GUEST_PAGE_SHIFT, u2State); 2813 } 2814 #endif 2815 } 2816 else 2817 { 2818 /* 2819 * Unlink the ad hoc range. 2820 */ 2821 #ifdef VBOX_STRICT 2822 uint32_t iPage = cb >> GUEST_PAGE_SHIFT; 2823 while (iPage-- > 0) 2824 { 2825 PPGMPAGE const pPage = &pMmioRamRange->aPages[iPage]; 2826 Assert(PGM_PAGE_IS_MMIO(pPage)); 2827 } 2828 #endif 2829 2830 Log(("pgmR3PhysMmioUnmapLocked: Unmapping ad hoc MMIO range for %RGp-%RGp %s\n", 2831 GCPhys, GCPhysLast, pMmioRamRange->pszDesc)); 2832 2833 #ifdef VBOX_WITH_NATIVE_NEM 2834 if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */ 2835 { 2836 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/, 2837 NULL, NULL, NULL, &pMmioRamRange->uNemRange); 2838 AssertLogRelRCReturn(rc, rc); /* we're up the creek if this hits. */ 2839 } 2840 #endif 2841 2842 pgmR3PhysRamRangeRemoveLookup(pVM, pMmioRamRange, &idxLookup); 2843 } 2844 } 2845 2846 /* Force a PGM pool flush as guest ram references have been changed. */ 2847 /** @todo Not entirely SMP safe; assuming for now the guest takes care of 2848 * this internally (not touch mapped mmio while changing the mapping). */ 2849 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 2850 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2851 2457 2852 pgmPhysInvalidatePageMapTLB(pVM); 2458 2459 PGM_UNLOCK(pVM); 2853 pgmPhysInvalidRamRangeTlbs(pVM); 2854 2460 2855 return rc; 2461 2856 } … … 2473 2868 * @param GCPhys The start of the MMIO region. 2474 2869 * @param cb The size of the MMIO region. 2870 * @param idRamRange The RAM range ID for the MMIO region as returned by 2871 * PGMR3PhysMmioRegister(). 2475 2872 * @thread EMT(pVCpu) 2476 2873 */ 2477 VMMR3_INT_DECL(int) PGMR3PhysMmioDeregister(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb) 2478 { 2874 VMMR3_INT_DECL(int) PGMR3PhysMmioUnmap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange) 2875 { 2876 /* 2877 * Input validation. 2878 */ 2479 2879 VMCPU_ASSERT_EMT(pVCpu); 2480 2880 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2881 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 2882 RTGCPHYS const GCPhysLast = GCPhys + cb - 1U; 2883 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 2884 AssertReturn(idRamRange <= pVM->pgm.s.idRamRangeMax && idRamRange > 0, VERR_INVALID_HANDLE); 2885 PPGMRAMRANGE const pMmioRamRange = pVM->pgm.s.apRamRanges[idRamRange]; 2886 AssertReturn(pMmioRamRange, VERR_INVALID_HANDLE); 2887 AssertReturn(pMmioRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, VERR_INVALID_HANDLE); 2888 AssertReturn(pMmioRamRange->cb == cb, VERR_OUT_OF_RANGE); 2889 2890 /* 2891 * Take the PGM lock and do what's asked. 2892 */ 2481 2893 int rc = PGM_LOCK(pVM); 2482 2894 AssertRCReturn(rc, rc); 2483 2895 2484 /* 2485 * First deregister the handler, then check if we should remove the ram range. 2486 */ 2487 rc = PGMHandlerPhysicalDeregister(pVM, GCPhys); 2488 if (RT_SUCCESS(rc)) 2489 { 2490 RTGCPHYS GCPhysLast = GCPhys + (cb - 1); 2491 PPGMRAMRANGE pRamPrev = NULL; 2492 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2493 while (pRam && GCPhysLast >= pRam->GCPhys) 2494 { 2495 /** @todo We're being a bit too careful here. rewrite. */ 2496 if ( GCPhysLast == pRam->GCPhysLast 2497 && GCPhys == pRam->GCPhys) 2498 { 2499 Assert(pRam->cb == cb); 2500 2501 /* 2502 * See if all the pages are dead MMIO pages. 2503 */ 2504 uint32_t const cGuestPages = cb >> GUEST_PAGE_SHIFT; 2505 bool fAllMMIO = true; 2506 uint32_t iPage = 0; 2507 uint32_t cLeft = cGuestPages; 2508 while (cLeft-- > 0) 2509 { 2510 PPGMPAGE pPage = &pRam->aPages[iPage]; 2511 if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage) 2512 /*|| not-out-of-action later */) 2513 { 2514 fAllMMIO = false; 2515 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage)); 2516 break; 2517 } 2518 Assert( PGM_PAGE_IS_ZERO(pPage) 2519 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO 2520 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO); 2521 iPage++; 2522 } 2523 if (fAllMMIO) 2524 { 2525 /* 2526 * Ad-hoc range, unlink and free it. 2527 */ 2528 Log(("PGMR3PhysMmioDeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n", 2529 GCPhys, GCPhysLast, pRam->pszDesc)); 2530 /** @todo check the ad-hoc flags? */ 2531 2532 #ifdef VBOX_WITH_NATIVE_NEM 2533 if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */ 2534 { 2535 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/, 2536 NULL, NULL, NULL, &pRam->uNemRange); 2537 AssertLogRelRCReturn(rc, rc); 2538 } 2539 #endif 2540 2541 pVM->pgm.s.cAllPages -= cGuestPages; 2542 pVM->pgm.s.cPureMmioPages -= cGuestPages; 2543 2544 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev); 2545 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT; 2546 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]); 2547 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS; 2548 SUPR3PageFreeEx(pRam, RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT); 2549 break; 2550 } 2551 } 2552 2553 /* 2554 * Range match? It will all be within one range (see PGMAllHandler.cpp). 2555 */ 2556 if ( GCPhysLast >= pRam->GCPhys 2557 && GCPhys <= pRam->GCPhysLast) 2558 { 2559 Assert(GCPhys >= pRam->GCPhys); 2560 Assert(GCPhysLast <= pRam->GCPhysLast); 2561 2562 /* 2563 * Turn the pages back into RAM pages. 2564 */ 2565 uint32_t iPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT; 2566 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT; 2567 while (cLeft--) 2568 { 2569 PPGMPAGE pPage = &pRam->aPages[iPage]; 2570 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage)) 2571 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO 2572 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO, 2573 ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage)); 2574 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)) 2575 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM); 2576 iPage++; 2577 } 2578 2579 #ifdef VBOX_WITH_NATIVE_NEM 2580 /* Notify REM (failure will probably leave things in a non-working state). */ 2581 if (VM_IS_NEM_ENABLED(pVM)) 2582 { 2583 uint8_t u2State = UINT8_MAX; 2584 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 2585 pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL, 2586 NULL, &u2State, &pRam->uNemRange); 2587 AssertLogRelRCReturn(rc, rc); 2588 if (u2State != UINT8_MAX) 2589 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT], 2590 cb >> GUEST_PAGE_SHIFT, u2State); 2591 } 2592 #endif 2593 break; 2594 } 2595 2596 /* next */ 2597 pRamPrev = pRam; 2598 pRam = pRam->pNextR3; 2599 } 2600 } 2601 2602 /* Force a PGM pool flush as guest ram references have been changed. */ 2603 /** @todo Not entirely SMP safe; assuming for now the guest takes care of 2604 * this internally (not touch mapped mmio while changing the mapping). */ 2605 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 2606 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2607 2608 pgmPhysInvalidatePageMapTLB(pVM); 2609 pgmPhysInvalidRamRangeTlbs(pVM); 2896 rc = pgmR3PhysMmioUnmapLocked(pVM, pVCpu, GCPhys, cb, GCPhysLast, pMmioRamRange); 2897 #ifdef VBOX_STRICT 2898 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 2899 #endif 2900 2610 2901 PGM_UNLOCK(pVM); 2611 2902 return rc; … … 2619 2910 2620 2911 /** 2621 * Locate a MMIO2 range. 2622 * 2623 * @returns Pointer to the MMIO2 range. 2912 * Validates the claim to an MMIO2 range and returns the pointer to it. 2913 * 2914 * @returns The MMIO2 entry index on success, negative error status on failure. 2915 * @param pVM The cross context VM structure. 2916 * @param pDevIns The device instance owning the region. 2917 * @param hMmio2 Handle to look up. 2918 * @param pcChunks Where to return the number of chunks associated with 2919 * this handle. 2920 */ 2921 static int32_t pgmR3PhysMmio2ResolveHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t *pcChunks) 2922 { 2923 *pcChunks = 0; 2924 uint32_t const idxFirst = hMmio2 - 1U; 2925 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 2926 AssertReturn(idxFirst < cMmio2Ranges, VERR_INVALID_HANDLE); 2927 2928 PPGMREGMMIO2RANGE const pFirst = &pVM->pgm.s.aMmio2Ranges[idxFirst]; 2929 AssertReturn(pFirst->idMmio2 == hMmio2, VERR_INVALID_HANDLE); 2930 AssertReturn((pFirst->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_INVALID_HANDLE); 2931 AssertReturn(pFirst->pDevInsR3 == pDevIns && RT_VALID_PTR(pDevIns), VERR_NOT_OWNER); 2932 2933 /* Figure out how many chunks this handle spans. */ 2934 if (pFirst->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2935 *pcChunks = 1; 2936 else 2937 { 2938 uint32_t cChunks = 1; 2939 for (uint32_t idx = idxFirst + 1;; idx++) 2940 { 2941 cChunks++; 2942 AssertReturn(idx < cMmio2Ranges, VERR_INTERNAL_ERROR_2); 2943 PPGMREGMMIO2RANGE const pCur = &pVM->pgm.s.aMmio2Ranges[idx]; 2944 AssertLogRelMsgReturn( pCur->pDevInsR3 == pDevIns 2945 && pCur->idMmio2 == idx + 1 2946 && pCur->iSubDev == pFirst->iSubDev 2947 && pCur->iRegion == pFirst->iRegion 2948 && !(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), 2949 ("cur: %p/%#x/%#x/%#x/%#x/%s; first: %p/%#x/%#x/%#x/%#x/%s\n", 2950 pCur->pDevInsR3, pCur->idMmio2, pCur->iSubDev, pCur->iRegion, pCur->fFlags, 2951 pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc, 2952 pDevIns, idx + 1, pFirst->iSubDev, pFirst->iRegion, pFirst->fFlags, 2953 pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc), 2954 VERR_INTERNAL_ERROR_3); 2955 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2956 break; 2957 } 2958 *pcChunks = cChunks; 2959 } 2960 2961 return (int32_t)idxFirst; 2962 } 2963 2964 2965 /** 2966 * Check if a device has already registered a MMIO2 region. 2967 * 2968 * @returns NULL if not registered, otherwise pointer to the MMIO2. 2624 2969 * @param pVM The cross context VM structure. 2625 2970 * @param pDevIns The device instance owning the region. 2626 2971 * @param iSubDev The sub-device number. 2627 2972 * @param iRegion The region. 2628 * @param hMmio2 Handle to look up. If NIL, use the @a iSubDev and 2629 * @a iRegion. 2630 */ 2631 DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMmio2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, 2632 uint32_t iRegion, PGMMMIO2HANDLE hMmio2) 2633 { 2634 if (hMmio2 != NIL_PGMMMIO2HANDLE) 2635 { 2636 if (hMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3) && hMmio2 != 0) 2637 { 2638 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.apMmio2RangesR3[hMmio2 - 1]; 2639 if (pCur && pCur->pDevInsR3 == pDevIns) 2640 { 2641 Assert(pCur->idMmio2 == hMmio2); 2642 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL); 2643 return pCur; 2644 } 2645 Assert(!pCur); 2646 } 2647 for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3) 2648 if (pCur->idMmio2 == hMmio2) 2649 { 2650 AssertBreak(pCur->pDevInsR3 == pDevIns); 2651 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL); 2652 return pCur; 2653 } 2654 } 2655 else 2656 { 2657 /* 2658 * Search the list. There shouldn't be many entries. 2659 */ 2660 /** @todo Optimize this lookup! There may now be many entries and it'll 2661 * become really slow when doing MMR3HyperMapMMIO2 and similar. */ 2662 for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3) 2663 if ( pCur->pDevInsR3 == pDevIns 2664 && pCur->iRegion == iRegion 2665 && pCur->iSubDev == iSubDev) 2666 return pCur; 2667 } 2973 */ 2974 DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMmio2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion) 2975 { 2976 /* 2977 * Search the array. There shouldn't be many entries. 2978 */ 2979 uint32_t idx = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 2980 while (idx-- > 0) 2981 if (RT_LIKELY( pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 != pDevIns 2982 || pVM->pgm.s.aMmio2Ranges[idx].iRegion != iRegion 2983 || pVM->pgm.s.aMmio2Ranges[idx].iSubDev != iSubDev)) 2984 { /* likely */ } 2985 else 2986 return &pVM->pgm.s.aMmio2Ranges[idx]; 2668 2987 return NULL; 2669 2988 } 2670 2989 2671 2672 2990 /** 2673 2991 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map. 2674 2992 */ 2675 static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)2993 static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, uint32_t idx, uint32_t cChunks) 2676 2994 { 2677 2995 int rc = VINF_SUCCESS; 2678 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)2679 { 2680 Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING));2681 int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys,2682 pCurMmio2->RamRange.GCPhysLast); 2683 Assert LogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,2684 pCurMmio2->RamRange.pszDesc, rc2));2996 while (cChunks-- > 0) 2997 { 2998 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 2999 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3000 3001 Assert(!(pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)); 3002 int rc2 = pgmHandlerPhysicalExRegister(pVM, pMmio2->pPhysHandlerR3, pRamRange->GCPhys, pRamRange->GCPhysLast); 2685 3003 if (RT_SUCCESS(rc2)) 2686 pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING; 2687 else if (RT_SUCCESS(rc)) 2688 rc = rc2; 2689 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2690 return rc; 2691 } 2692 AssertFailed(); 3004 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING; 3005 else 3006 AssertLogRelMsgFailedStmt(("%#RGp-%#RGp %s failed -> %Rrc\n", 3007 pRamRange->GCPhys, pRamRange->GCPhysLast, pRamRange->pszDesc, rc2), 3008 rc = RT_SUCCESS(rc) ? rc2 : rc); 3009 3010 idx++; 3011 } 2693 3012 return rc; 2694 3013 } … … 2698 3017 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap. 2699 3018 */ 2700 static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2) 2701 { 2702 for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3) 2703 { 2704 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING) 2705 { 2706 int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3); 2707 AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast, 2708 pCurMmio2->RamRange.pszDesc, rc2)); 2709 pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING; 2710 } 2711 if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 2712 return VINF_SUCCESS; 2713 } 2714 AssertFailed(); 2715 return VINF_SUCCESS; 2716 2717 } 2718 2719 2720 /** 2721 * Calculates the number of chunks 2722 * 2723 * @returns Number of registration chunk needed. 2724 * @param pVM The cross context VM structure. 2725 * @param cb The size of the MMIO/MMIO2 range. 2726 * @param pcPagesPerChunk Where to return the number of pages tracked by each 2727 * chunk. Optional. 2728 * @param pcbChunk Where to return the guest mapping size for a chunk. 2729 */ 2730 static uint16_t pgmR3PhysMmio2CalcChunkCount(PVM pVM, RTGCPHYS cb, uint32_t *pcPagesPerChunk, uint32_t *pcbChunk) 2731 { 2732 RT_NOREF_PV(pVM); /* without raw mode */ 2733 2734 /* 2735 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be 2736 * needing a few bytes extra the PGMREGMMIO2RANGE structure. 2737 * 2738 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving 2739 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB). 2740 */ 2741 uint32_t const cPagesPerChunk = _4M; 2742 Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */ 2743 uint32_t const cbChunk = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]); 2744 AssertRelease(cPagesPerChunk < _16M); 2745 2746 if (pcbChunk) 2747 *pcbChunk = cbChunk; 2748 if (pcPagesPerChunk) 2749 *pcPagesPerChunk = cPagesPerChunk; 2750 2751 /* Calc the number of chunks we need. */ 2752 RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT; 2753 uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk); 2754 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages); 2755 return cChunks; 2756 } 2757 2758 2759 /** 2760 * Worker for PGMR3PhysMMIO2Register that allocates and the PGMREGMMIO2RANGE 2761 * structures and does basic initialization. 2762 * 2763 * Caller must set type specfic members and initialize the PGMPAGE structures. 2764 * 2765 * This was previously also used by PGMR3PhysMmio2PreRegister, a function for 2766 * pre-registering MMIO that was later (6.1) replaced by a new handle based IOM 2767 * interface. The reference to caller and type above is purely historical. 2768 * 2769 * @returns VBox status code. 2770 * @param pVM The cross context VM structure. 2771 * @param pDevIns The device instance owning the region. 2772 * @param iSubDev The sub-device number (internal PCI config number). 2773 * @param iRegion The region number. If the MMIO2 memory is a PCI 2774 * I/O region this number has to be the number of that 2775 * region. Otherwise it can be any number safe 2776 * UINT8_MAX. 2777 * @param cb The size of the region. Must be page aligned. 2778 * @param fFlags PGMPHYS_MMIO2_FLAGS_XXX. 2779 * @param idMmio2 The MMIO2 ID for the first chunk. 2780 * @param pszDesc The description. 2781 * @param ppHeadRet Where to return the pointer to the first 2782 * registration chunk. 2783 * 2784 * @thread EMT 2785 */ 2786 static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, 2787 uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet) 2788 { 2789 /* 2790 * Figure out how many chunks we need and of which size. 2791 */ 2792 uint32_t cPagesPerChunk; 2793 uint16_t cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, &cPagesPerChunk, NULL); 2794 AssertReturn(cChunks, VERR_PGM_PHYS_MMIO_EX_IPE); 2795 2796 /* 2797 * Allocate the chunks. 2798 */ 2799 PPGMREGMMIO2RANGE *ppNext = ppHeadRet; 2800 *ppNext = NULL; 2801 3019 static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, uint32_t idx, uint32_t cChunks) 3020 { 2802 3021 int rc = VINF_SUCCESS; 2803 uint32_t cPagesLeft = cb >> GUEST_PAGE_SHIFT; 2804 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++) 2805 { 2806 /* 2807 * We currently do a single RAM range for the whole thing. This will 2808 * probably have to change once someone needs really large MMIO regions, 2809 * as we will be running into SUPR3PageAllocEx limitations and such. 2810 */ 2811 const uint32_t cPagesTrackedByChunk = RT_MIN(cPagesLeft, cPagesPerChunk); 2812 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesTrackedByChunk]); 2813 PPGMREGMMIO2RANGE pNew = NULL; 2814 2815 /* 2816 * Allocate memory for the registration structure. 2817 */ 2818 size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 2819 size_t const cbChunk = (1 + cChunkPages + 1) << HOST_PAGE_SHIFT; 2820 AssertLogRelBreakStmt(cbChunk == (uint32_t)cbChunk, rc = VERR_OUT_OF_RANGE); 2821 RTR0PTR R0PtrChunk = NIL_RTR0PTR; 2822 void *pvChunk = NULL; 2823 rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, NULL /*paPages*/); 2824 AssertLogRelMsgRCBreak(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages)); 2825 2826 Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM)); 2827 RT_BZERO(pvChunk, cChunkPages << HOST_PAGE_SHIFT); 2828 2829 pNew = (PPGMREGMMIO2RANGE)pvChunk; 2830 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING; 2831 pNew->RamRange.pSelfR0 = R0PtrChunk + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange); 2832 2833 /* 2834 * Initialize the registration structure (caller does specific bits). 2835 */ 2836 pNew->pDevInsR3 = pDevIns; 2837 //pNew->pvR3 = NULL; 2838 //pNew->pNext = NULL; 2839 if (iChunk == 0) 2840 pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK; 2841 if (iChunk + 1 == cChunks) 2842 pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK; 2843 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2844 pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES; 2845 pNew->iSubDev = iSubDev; 2846 pNew->iRegion = iRegion; 2847 pNew->idSavedState = UINT8_MAX; 2848 pNew->idMmio2 = idMmio2; 2849 //pNew->pPhysHandlerR3 = NULL; 2850 //pNew->paLSPages = NULL; 2851 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 2852 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 2853 pNew->RamRange.pszDesc = pszDesc; 2854 pNew->RamRange.cb = pNew->cbReal = (RTGCPHYS)cPagesTrackedByChunk << X86_PAGE_SHIFT; 2855 pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX; 2856 pNew->RamRange.uNemRange = UINT32_MAX; 2857 //pNew->RamRange.pvR3 = NULL; 2858 //pNew->RamRange.paLSPages = NULL; 2859 2860 *ppNext = pNew; 2861 ASMCompilerBarrier(); 2862 cPagesLeft -= cPagesTrackedByChunk; 2863 ppNext = &pNew->pNextR3; 2864 2865 /* 2866 * Pre-allocate a handler if we're tracking dirty pages, unless NEM takes care of this. 2867 */ 2868 if ( (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 2869 #ifdef VBOX_WITH_PGM_NEM_MODE 2870 && (!VM_IS_NEM_ENABLED(pVM) || !NEMR3IsMmio2DirtyPageTrackingSupported(pVM)) 2871 #endif 2872 ) 2873 2874 { 2875 rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, idMmio2, pszDesc, &pNew->pPhysHandlerR3); 2876 AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2)); 2877 } 2878 } 2879 Assert(cPagesLeft == 0); 2880 2881 if (RT_SUCCESS(rc)) 2882 { 2883 Assert((*ppHeadRet)->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK); 2884 return VINF_SUCCESS; 2885 } 2886 2887 /* 2888 * Free floating ranges. 2889 */ 2890 while (*ppHeadRet) 2891 { 2892 PPGMREGMMIO2RANGE pFree = *ppHeadRet; 2893 *ppHeadRet = pFree->pNextR3; 2894 2895 if (pFree->pPhysHandlerR3) 2896 { 2897 pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3); 2898 pFree->pPhysHandlerR3 = NULL; 2899 } 2900 2901 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) 2902 { 2903 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, 2904 RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]); 2905 size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 2906 SUPR3PageFreeEx(pFree, cChunkPages); 2907 } 2908 } 2909 3022 while (cChunks-- > 0) 3023 { 3024 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3025 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3026 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING) 3027 { 3028 int rc2 = pgmHandlerPhysicalExDeregister(pVM, pMmio2->pPhysHandlerR3); 3029 AssertLogRelMsgStmt(RT_SUCCESS(rc2), 3030 ("%#RGp-%#RGp %s failed -> %Rrc\n", 3031 pRamRange->GCPhys, pRamRange->GCPhysLast, pRamRange->pszDesc, rc2), 3032 rc = RT_SUCCESS(rc) ? rc2 : rc); 3033 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING; 3034 } 3035 idx++; 3036 } 2910 3037 return rc; 2911 3038 } 2912 3039 3040 #if 0 // temp 2913 3041 2914 3042 /** … … 2964 3092 PGM_UNLOCK(pVM); 2965 3093 } 3094 #endif 2966 3095 2967 3096 … … 2997 3126 * the memory. 2998 3127 * @param phRegion Where to return the MMIO2 region handle. Optional. 2999 * @thread EMT 3128 * @thread EMT(0) 3129 * 3130 * @note Only callable at VM creation time and during VM state loading. 3131 * The latter is for PCNet saved state compatibility with pre 4.3.6 3132 * state. 3000 3133 */ 3001 3134 VMMR3_INT_DECL(int) PGMR3PhysMmio2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, … … 3012 3145 *phRegion = NIL_PGMMMIO2HANDLE; 3013 3146 } 3014 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 3147 PVMCPU const pVCpu = VMMGetCpu(pVM); 3148 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 3149 VMSTATE const enmVMState = VMR3GetState(pVM); 3150 AssertMsgReturn(enmVMState == VMSTATE_CREATING || enmVMState == VMSTATE_LOADING, 3151 ("state %s, expected CREATING or LOADING\n", VMGetStateName(enmVMState)), 3152 VERR_VM_INVALID_VM_STATE); 3153 3015 3154 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3016 3155 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER); 3017 3156 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 3157 3018 3158 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 3019 3159 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER); 3020 AssertReturn(pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE) == NULL, VERR_ALREADY_EXISTS); 3160 3021 3161 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3022 3162 AssertReturn(cb, VERR_INVALID_PARAMETER); … … 3025 3165 const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT; 3026 3166 AssertLogRelReturn(((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER); 3027 AssertLogRelReturn(cGuestPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE); 3028 AssertLogRelReturn(cGuestPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE); 3167 AssertLogRelReturn(cGuestPages <= PGM_MAX_PAGES_PER_MMIO2_REGION, VERR_OUT_OF_RANGE); 3168 AssertLogRelReturn(cGuestPages <= (MM_MMIO_64_MAX >> GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE); 3169 3170 AssertReturn(pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion) == NULL, VERR_ALREADY_EXISTS); 3029 3171 3030 3172 /* … … 3039 3181 3040 3182 /* 3041 * Allocate an MMIO2 range ID (not freed on failure). 3183 * Check that we've got sufficient MMIO2 ID space for this request (the 3184 * allocation will be done later once we've got the backing memory secured, 3185 * but given the EMT0 restriction, that's not going to be a problem). 3042 3186 * 3043 3187 * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so 3044 * the IDs goes from 1 thru PGM_MMIO2_MAX_RANGES. 3045 */ 3046 unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL); 3047 3048 PGM_LOCK_VOID(pVM); 3049 AssertCompile(PGM_MMIO2_MAX_RANGES < 255); 3050 uint8_t const idMmio2 = pVM->pgm.s.cMmio2Regions + 1; 3051 unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks; 3052 if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES) 3053 { 3054 PGM_UNLOCK(pVM); 3055 AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES); 3056 } 3057 pVM->pgm.s.cMmio2Regions = cNewMmio2Regions; 3058 PGM_UNLOCK(pVM); 3188 * the IDs goes from 1 thru PGM_MAX_MMIO2_RANGES. 3189 */ 3190 unsigned const cChunks = pgmPhysMmio2CalcChunkCount(cb, NULL); 3191 3192 int rc = PGM_LOCK(pVM); 3193 AssertRCReturn(rc, rc); 3194 3195 AssertCompile(PGM_MAX_MMIO2_RANGES < 255); 3196 uint8_t const idMmio2 = pVM->pgm.s.cMmio2Ranges + 1; 3197 AssertLogRelReturnStmt(idMmio2 + cChunks <= PGM_MAX_MMIO2_RANGES, PGM_UNLOCK(pVM), VERR_PGM_TOO_MANY_MMIO2_RANGES); 3059 3198 3060 3199 /* … … 3062 3201 * most likely to fail. 3063 3202 */ 3064 intrc = MMR3AdjustFixedReservation(pVM, cGuestPages, pszDesc);3203 rc = MMR3AdjustFixedReservation(pVM, cGuestPages, pszDesc); 3065 3204 if (RT_SUCCESS(rc)) 3066 3205 { 3067 const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT; 3068 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cHostPages * sizeof(SUPPAGE)); 3206 /* 3207 * If we're in driverless we'll be doing the work here, otherwise we 3208 * must call ring-0 to do the job as we'll need physical addresses 3209 * and maybe a ring-0 mapping address for it all. 3210 */ 3211 if (SUPR3IsDriverless()) 3212 rc = pgmPhysMmio2RegisterWorker(pVM, cGuestPages, idMmio2, cChunks, pDevIns, iSubDev, iRegion, fFlags); 3213 else 3214 { 3215 PGMPHYSMMIO2REGISTERREQ Mmio2RegReq; 3216 Mmio2RegReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 3217 Mmio2RegReq.Hdr.cbReq = sizeof(Mmio2RegReq); 3218 Mmio2RegReq.cbGuestPage = GUEST_PAGE_SIZE; 3219 Mmio2RegReq.cGuestPages = cGuestPages; 3220 Mmio2RegReq.idMmio2 = idMmio2; 3221 Mmio2RegReq.cChunks = cChunks; 3222 Mmio2RegReq.iSubDev = (uint8_t)iSubDev; 3223 Mmio2RegReq.iRegion = (uint8_t)iRegion; 3224 Mmio2RegReq.fFlags = fFlags; 3225 Mmio2RegReq.pDevIns = pDevIns; 3226 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_MMIO2_REGISTER, 0 /*u64Arg*/, &Mmio2RegReq.Hdr); 3227 } 3069 3228 if (RT_SUCCESS(rc)) 3070 3229 { 3071 void *pvPages = NULL; 3072 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 3073 RTR0PTR pvPagesR0 = NIL_RTR0PTR; 3074 #endif 3230 Assert(idMmio2 + cChunks - 1 == pVM->pgm.s.cMmio2Ranges); 3231 3232 /* 3233 * There are two things left to do: 3234 * 1. Add the description to the associated RAM ranges. 3235 * 2. Pre-allocate access handlers for dirty bit tracking if necessary. 3236 */ 3237 bool const fNeedHandler = (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES) 3075 3238 #ifdef VBOX_WITH_PGM_NEM_MODE 3076 if (PGM_IS_IN_NEM_MODE(pVM)) 3077 rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pvPages); 3078 else 3079 #endif 3080 { 3081 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 3082 rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, &pvPagesR0, paPages); 3083 #else 3084 rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages); 3085 #endif 3239 && (!VM_IS_NEM_ENABLED(pVM) || !NEMR3IsMmio2DirtyPageTrackingSupported(pVM)) 3240 #endif 3241 ; 3242 for (uint32_t idxChunk = 0; idxChunk < cChunks; idxChunk++) 3243 { 3244 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idxChunk + idMmio2 - 1]; 3245 Assert(pMmio2->idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 3246 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apRamRanges[pMmio2->idRamRange]; 3247 Assert(pRamRange->pbR3 == pMmio2->pbR3); 3248 Assert(pRamRange->cb == pMmio2->cbReal); 3249 3250 pRamRange->pszDesc = pszDesc; /** @todo mangle this if we got more than one chunk */ 3251 if (fNeedHandler) 3252 { 3253 rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, pMmio2->idMmio2, 3254 pszDesc, &pMmio2->pPhysHandlerR3); 3255 AssertLogRelMsgReturnStmt(RT_SUCCESS(rc), 3256 ("idMmio2=%#x idxChunk=%#x rc=%Rc\n", idMmio2, idxChunk, rc), 3257 PGM_UNLOCK(pVM), 3258 rc); /* PGMR3Term will take care of it all. */ 3259 } 3086 3260 } 3087 if (RT_SUCCESS(rc)) 3088 { 3089 memset(pvPages, 0, cGuestPages * GUEST_PAGE_SIZE); 3090 3091 /* 3092 * Create the registered MMIO range record for it. 3093 */ 3094 PPGMREGMMIO2RANGE pNew; 3095 rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew); 3096 if (RT_SUCCESS(rc)) 3097 { 3098 if (phRegion) 3099 *phRegion = idMmio2; /* The ID of the first chunk. */ 3100 3101 uint32_t iSrcPage = 0; 3102 uint8_t *pbCurPages = (uint8_t *)pvPages; 3103 for (PPGMREGMMIO2RANGE pCur = pNew; pCur; pCur = pCur->pNextR3) 3104 { 3105 pCur->pvR3 = pbCurPages; 3106 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 3107 pCur->pvR0 = pvPagesR0 + (iSrcPage << GUEST_PAGE_SHIFT); 3108 #endif 3109 pCur->RamRange.pvR3 = pbCurPages; 3110 3111 uint32_t iDstPage = pCur->RamRange.cb >> GUEST_PAGE_SHIFT; 3112 #ifdef VBOX_WITH_PGM_NEM_MODE 3113 if (PGM_IS_IN_NEM_MODE(pVM)) 3114 while (iDstPage-- > 0) 3115 PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], UINT64_C(0x0000ffffffff0000), 3116 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage), 3117 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED); 3118 else 3119 #endif 3120 { 3121 AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT); 3122 while (iDstPage-- > 0) 3123 PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], paPages[iDstPage + iSrcPage].Phys, 3124 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage), 3125 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED); 3126 } 3127 3128 /* advance. */ 3129 iSrcPage += pCur->RamRange.cb >> GUEST_PAGE_SHIFT; 3130 pbCurPages += pCur->RamRange.cb; 3131 } 3132 3133 RTMemTmpFree(paPages); 3134 3135 /* 3136 * Update the page count stats, link the registration and we're done. 3137 */ 3138 pVM->pgm.s.cAllPages += cGuestPages; 3139 pVM->pgm.s.cPrivatePages += cGuestPages; 3140 3141 pgmR3PhysMmio2Link(pVM, pNew); 3142 3143 *ppv = pvPages; 3144 return VINF_SUCCESS; 3145 } 3146 3147 SUPR3PageFreeEx(pvPages, cHostPages); 3148 } 3149 } 3150 RTMemTmpFree(paPages); 3261 3262 /* 3263 * Done! 3264 */ 3265 if (phRegion) 3266 *phRegion = idMmio2; 3267 *ppv = pVM->pgm.s.aMmio2Ranges[idMmio2 - 1].pbR3; 3268 3269 PGM_UNLOCK(pVM); 3270 return VINF_SUCCESS; 3271 } 3272 3151 3273 MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc); 3152 3274 } … … 3155 3277 return rc; 3156 3278 } 3157 3158 3279 3159 3280 /** … … 3168 3289 * @param hMmio2 The MMIO2 handle to deregister, or NIL if all 3169 3290 * regions for the given device is to be deregistered. 3291 * @thread EMT(0) 3292 * 3293 * @note Only callable during VM state loading. This is to jettison an unused 3294 * MMIO2 section present in PCNet saved state prior to VBox v4.3.6. 3170 3295 */ 3171 3296 VMMR3_INT_DECL(int) PGMR3PhysMmio2Deregister(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2) … … 3174 3299 * Validate input. 3175 3300 */ 3176 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 3301 PVMCPU const pVCpu = VMMGetCpu(pVM); 3302 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 3303 VMSTATE const enmVMState = VMR3GetState(pVM); 3304 AssertMsgReturn(enmVMState == VMSTATE_LOADING, 3305 ("state %s, expected LOADING\n", VMGetStateName(enmVMState)), 3306 VERR_VM_INVALID_VM_STATE); 3307 3177 3308 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3178 3309 3179 3310 /* 3180 * The loop here scanning all registrations will make sure that multi-chunk ranges 3181 * get properly deregistered, though it's original purpose was the wildcard iRegion. 3311 * Take the PGM lock and scan for registrations matching the requirements. 3312 * We do this backwards to more easily reduce the cMmio2Ranges count when 3313 * stuff is removed. 3182 3314 */ 3183 3315 PGM_LOCK_VOID(pVM); 3184 int rc = VINF_SUCCESS; 3185 unsigned cFound = 0; 3186 PPGMREGMMIO2RANGE pPrev = NULL; 3187 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; 3188 while (pCur) 3189 { 3190 uint32_t const fFlags = pCur->fFlags; 3191 if ( pCur->pDevInsR3 == pDevIns 3192 && ( hMmio2 == NIL_PGMMMIO2HANDLE 3193 || pCur->idMmio2 == hMmio2)) 3316 3317 int rc = VINF_SUCCESS; 3318 unsigned cFound = 0; 3319 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 3320 uint32_t idx = cMmio2Ranges; 3321 while (idx-- > 0) 3322 { 3323 PPGMREGMMIO2RANGE pCur = &pVM->pgm.s.aMmio2Ranges[idx]; 3324 if ( pCur->pDevInsR3 == pDevIns 3325 && ( hMmio2 == NIL_PGMMMIO2HANDLE 3326 || pCur->idMmio2 == hMmio2)) 3194 3327 { 3195 3328 cFound++; 3329 3330 /* 3331 * Wind back the first chunk for this registration. 3332 */ 3333 AssertLogRelMsgReturnStmt(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, ("idx=%u fFlags=%#x\n", idx, pCur->fFlags), 3334 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3); 3335 uint32_t cGuestPages = pCur->cbReal >> GUEST_PAGE_SHIFT; 3336 uint32_t cChunks = 1; 3337 while ( idx > 0 3338 && !(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3339 { 3340 AssertLogRelMsgReturnStmt( pCur[-1].pDevInsR3 == pDevIns 3341 && pCur[-1].iRegion == pCur->iRegion 3342 && pCur[-1].iSubDev == pCur->iSubDev, 3343 ("[%u]: %p/%#x/%#x/fl=%#x; [%u]: %p/%#x/%#x/fl=%#x; cChunks=%#x\n", 3344 idx - 1, pCur[-1].pDevInsR3, pCur[-1].iRegion, pCur[-1].iSubDev, pCur[-1].fFlags, 3345 idx, pCur->pDevInsR3, pCur->iRegion, pCur->iSubDev, pCur->fFlags, cChunks), 3346 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3); 3347 cChunks++; 3348 pCur--; 3349 idx--; 3350 cGuestPages += pCur->cbReal >> GUEST_PAGE_SHIFT; 3351 } 3352 AssertLogRelMsgReturnStmt(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, 3353 ("idx=%u fFlags=%#x cChunks=%#x\n", idx, pCur->fFlags, cChunks), 3354 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3); 3196 3355 3197 3356 /* 3198 3357 * Unmap it if it's mapped. 3199 3358 */ 3200 if ( fFlags & PGMREGMMIO2RANGE_F_MAPPED)3201 { 3202 int rc2 = PGMR3PhysMmio2Unmap(pVM, pCur->pDevInsR3, pCur->idMmio2, pCur->RamRange.GCPhys);3359 if (pCur->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 3360 { 3361 int rc2 = PGMR3PhysMmio2Unmap(pVM, pCur->pDevInsR3, idx + 1, pCur->GCPhys); 3203 3362 AssertRC(rc2); 3204 3363 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) … … 3207 3366 3208 3367 /* 3209 * Unlink it3368 * Destroy access handlers. 3210 3369 */ 3211 PPGMREGMMIO2RANGE pNext = pCur->pNextR3; 3212 if (pPrev) 3213 pPrev->pNextR3 = pNext; 3370 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++) 3371 if (pCur[iChunk].pPhysHandlerR3) 3372 { 3373 pgmHandlerPhysicalExDestroy(pVM, pCur[iChunk].pPhysHandlerR3); 3374 pCur[iChunk].pPhysHandlerR3 = NULL; 3375 } 3376 3377 /* 3378 * Call kernel mode / worker to do the actual deregistration. 3379 */ 3380 const char * const pszDesc = pVM->pgm.s.apMmio2RamRanges[idx] ? pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc : NULL; 3381 int rc2; 3382 if (SUPR3IsDriverless()) 3383 { 3384 Assert(PGM_IS_IN_NEM_MODE(pVM)); 3385 rc2 = pgmPhysMmio2DeregisterWorker(pVM, idx, cChunks, pDevIns); 3386 AssertLogRelMsgStmt(RT_SUCCESS(rc2), 3387 ("pgmPhysMmio2DeregisterWorker: rc=%Rrc idx=%#x cChunks=%#x %s\n", 3388 rc2, idx, cChunks, pszDesc), 3389 rc = RT_SUCCESS(rc) ? rc2 : rc); 3390 } 3214 3391 else 3215 pVM->pgm.s.pRegMmioRangesR3 = pNext; 3216 pCur->pNextR3 = NULL; 3217 3218 uint8_t idMmio2 = pCur->idMmio2; 3219 Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)); 3220 if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3)) 3221 { 3222 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur); 3223 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL; 3224 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR; 3392 { 3393 PGMPHYSMMIO2DEREGISTERREQ Mmio2DeregReq; 3394 Mmio2DeregReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 3395 Mmio2DeregReq.Hdr.cbReq = sizeof(Mmio2DeregReq); 3396 Mmio2DeregReq.idMmio2 = idx + 1; 3397 Mmio2DeregReq.cChunks = cChunks; 3398 Mmio2DeregReq.pDevIns = pDevIns; 3399 rc2 = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER, 0 /*u64Arg*/, &Mmio2DeregReq.Hdr); 3400 AssertLogRelMsgStmt(RT_SUCCESS(rc2), 3401 ("VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER: rc=%Rrc idx=%#x cChunks=%#x %s\n", 3402 rc2, idx, cChunks, pszDesc), 3403 rc = RT_SUCCESS(rc) ? rc2 : rc); 3225 3404 } 3226 3227 /* 3228 * Free the memory. 3229 */ 3230 uint32_t const cGuestPages = pCur->cbReal >> GUEST_PAGE_SHIFT; 3231 uint32_t const cHostPages = RT_ALIGN_T(pCur->cbReal, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT; 3232 #ifdef VBOX_WITH_PGM_NEM_MODE 3233 if (!pVM->pgm.s.fNemMode) 3234 #endif 3235 { 3236 int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages); 3237 AssertRC(rc2); 3238 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3239 rc = rc2; 3240 3241 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pCur->RamRange.pszDesc); 3242 AssertRC(rc2); 3243 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3405 if (RT_FAILURE(rc2)) 3406 { 3407 LogRel(("PGMR3PhysMmio2Deregister: Deregistration failed: %Rrc; cChunks=%u %s\n", rc, cChunks, pszDesc)); 3408 if (RT_SUCCESS(rc)) 3244 3409 rc = rc2; 3245 3410 } 3246 #ifdef VBOX_WITH_PGM_NEM_MODE 3247 else 3248 { 3249 int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages); 3250 AssertRC(rc2); 3251 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3252 rc = rc2; 3411 3412 /* 3413 * Adjust the memory reservation. 3414 */ 3415 if (!PGM_IS_IN_NEM_MODE(pVM) && RT_SUCCESS(rc2)) 3416 { 3417 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc); 3418 AssertLogRelMsgStmt(RT_SUCCESS(rc2), ("rc=%Rrc cGuestPages=%#x\n", rc, cGuestPages), 3419 rc = RT_SUCCESS(rc) ? rc2 : rc); 3253 3420 } 3254 #endif 3255 3256 if (pCur->pPhysHandlerR3) 3257 { 3258 pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3); 3259 pCur->pPhysHandlerR3 = NULL; 3260 } 3261 3262 /* we're leaking hyper memory here if done at runtime. */ 3263 #ifdef VBOX_STRICT 3264 VMSTATE const enmState = VMR3GetState(pVM); 3265 AssertMsg( enmState == VMSTATE_POWERING_OFF 3266 || enmState == VMSTATE_POWERING_OFF_LS 3267 || enmState == VMSTATE_OFF 3268 || enmState == VMSTATE_OFF_LS 3269 || enmState == VMSTATE_DESTROYING 3270 || enmState == VMSTATE_TERMINATED 3271 || enmState == VMSTATE_CREATING 3272 , ("%s\n", VMR3GetStateName(enmState))); 3273 #endif 3274 3275 if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) 3276 { 3277 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cGuestPages]); 3278 size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 3279 SUPR3PageFreeEx(pCur, cChunkPages); 3280 } 3281 /*else 3282 { 3283 rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call. 3284 AssertRCReturn(rc, rc); 3285 } */ 3286 3287 3288 /* update page count stats */ 3289 pVM->pgm.s.cAllPages -= cGuestPages; 3290 pVM->pgm.s.cPrivatePages -= cGuestPages; 3291 3292 /* next */ 3293 pCur = pNext; 3421 3422 /* Are we done? */ 3294 3423 if (hMmio2 != NIL_PGMMMIO2HANDLE) 3295 { 3296 if (fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3297 break; 3298 hMmio2++; 3299 Assert(pCur->idMmio2 == hMmio2); 3300 Assert(pCur->pDevInsR3 == pDevIns); 3301 Assert(!(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK)); 3302 } 3303 } 3304 else 3305 { 3306 pPrev = pCur; 3307 pCur = pCur->pNextR3; 3424 break; 3308 3425 } 3309 3426 } 3310 3427 pgmPhysInvalidatePageMapTLB(pVM); 3311 3428 PGM_UNLOCK(pVM); 3312 return !cFound && hMmio2 != NIL_PGMMMIO2HANDLE ? VERR_NOT_FOUND : rc; 3313 } 3314 3315 3316 /** 3317 * Maps a MMIO2 region. 3318 * 3319 * This is typically done when a guest / the bios / state loading changes the 3320 * PCI config. The replacing of base memory has the same restrictions as during 3321 * registration, of course. 3322 * 3323 * @returns VBox status code. 3324 * 3325 * @param pVM The cross context VM structure. 3326 * @param pDevIns The device instance owning the region. 3327 * @param hMmio2 The handle of the region to map. 3328 * @param GCPhys The guest-physical address to be remapped. 3329 */ 3330 VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys) 3331 { 3332 /* 3333 * Validate input. 3429 return !cFound && hMmio2 != NIL_PGMMMIO2HANDLE ? VERR_NOT_FOUND : rc; 3430 } 3431 3432 3433 /** 3434 * Worker form PGMR3PhysMmio2Map. 3435 */ 3436 static int pgmR3PhysMmio2MapLocked(PVM pVM, uint32_t const idxFirst, uint32_t const cChunks, 3437 RTGCPHYS const GCPhys, RTGCPHYS const GCPhysLast) 3438 { 3439 /* 3440 * Validate the mapped status now that we've got the lock. 3441 */ 3442 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 3443 { 3444 AssertReturn( pVM->pgm.s.aMmio2Ranges[idx].GCPhys == NIL_RTGCPHYS 3445 && !(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED), 3446 VERR_WRONG_ORDER); 3447 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3448 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_INTERNAL_ERROR_3); 3449 AssertReturn(pRamRange->GCPhysLast == NIL_RTGCPHYS, VERR_INTERNAL_ERROR_3); 3450 Assert(pRamRange->pbR3 == pVM->pgm.s.aMmio2Ranges[idx].pbR3); 3451 Assert(pRamRange->idRange == pVM->pgm.s.aMmio2Ranges[idx].idRamRange); 3452 } 3453 3454 const char * const pszDesc = pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc; 3455 #ifdef VBOX_WITH_NATIVE_NEM 3456 uint32_t const fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 3457 | (pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES 3458 ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0); 3459 #endif 3460 3461 /* 3462 * Now, check if this falls into a regular RAM range or if we should use 3463 * the ad-hoc one. 3334 3464 * 3335 * Note! It's safe to walk the MMIO/MMIO2 list since registrations only 3336 * happens during VM construction. 3337 */ 3338 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 3339 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3340 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER); 3341 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 3342 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3343 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE); 3344 3345 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3346 AssertReturn(pFirstMmio, VERR_NOT_FOUND); 3347 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK); 3348 3349 PPGMREGMMIO2RANGE pLastMmio = pFirstMmio; 3350 RTGCPHYS cbRange = 0; 3351 for (;;) 3352 { 3353 AssertReturn(!(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_WRONG_ORDER); 3354 Assert(pLastMmio->RamRange.GCPhys == NIL_RTGCPHYS); 3355 Assert(pLastMmio->RamRange.GCPhysLast == NIL_RTGCPHYS); 3356 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3); 3357 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev); 3358 Assert(pLastMmio->iRegion == pFirstMmio->iRegion); 3359 cbRange += pLastMmio->RamRange.cb; 3360 if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3361 break; 3362 pLastMmio = pLastMmio->pNextR3; 3363 } 3364 3365 RTGCPHYS GCPhysLast = GCPhys + cbRange - 1; 3366 AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 3367 3368 /* 3369 * Find our location in the ram range list, checking for restriction 3370 * we don't bother implementing yet (partially overlapping, multiple 3371 * ram ranges). 3372 */ 3373 PGM_LOCK_VOID(pVM); 3374 3375 AssertReturnStmt(!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), PGM_UNLOCK(pVM), VERR_WRONG_ORDER); 3376 3377 bool fRamExists = false; 3378 PPGMRAMRANGE pRamPrev = NULL; 3379 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 3380 while (pRam && GCPhysLast >= pRam->GCPhys) 3381 { 3382 if ( GCPhys <= pRam->GCPhysLast 3383 && GCPhysLast >= pRam->GCPhys) 3384 { 3385 /* Completely within? */ 3386 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys 3387 && GCPhysLast <= pRam->GCPhysLast, 3388 ("%RGp-%RGp (MMIOEx/%s) falls partly outside %RGp-%RGp (%s)\n", 3389 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, 3390 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 3391 PGM_UNLOCK(pVM), 3392 VERR_PGM_RAM_CONFLICT); 3393 3394 /* Check that all the pages are RAM pages. */ 3395 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT]; 3396 uint32_t cPagesLeft = cbRange >> GUEST_PAGE_SHIFT; 3397 while (cPagesLeft-- > 0) 3398 { 3399 AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 3400 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n", 3401 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc), 3402 PGM_UNLOCK(pVM), 3403 VERR_PGM_RAM_CONFLICT); 3404 pPage++; 3405 } 3406 3407 /* There can only be one MMIO/MMIO2 chunk matching here! */ 3408 AssertLogRelMsgReturnStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, 3409 ("%RGp-%RGp (MMIOEx/%s, flags %#X) consists of multiple chunks whereas the RAM somehow doesn't!\n", 3410 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags), 3411 PGM_UNLOCK(pVM), 3412 VERR_PGM_PHYS_MMIO_EX_IPE); 3413 3414 fRamExists = true; 3415 break; 3416 } 3417 3418 /* next */ 3419 pRamPrev = pRam; 3420 pRam = pRam->pNextR3; 3421 } 3422 Log(("PGMR3PhysMmio2Map: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pFirstMmio->RamRange.pszDesc)); 3423 3424 3425 /* 3426 * Make the changes. 3427 */ 3428 RTGCPHYS GCPhysCur = GCPhys; 3429 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3430 { 3431 pCurMmio->RamRange.GCPhys = GCPhysCur; 3432 pCurMmio->RamRange.GCPhysLast = GCPhysCur + pCurMmio->RamRange.cb - 1; 3433 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3434 { 3435 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast); 3436 break; 3437 } 3438 GCPhysCur += pCurMmio->RamRange.cb; 3439 } 3440 3441 if (fRamExists) 3442 { 3465 * Note! For reasons of simplictly, we're considering the whole MMIO2 area 3466 * here rather than individual chunks. 3467 */ 3468 int rc = VINF_SUCCESS; 3469 uint32_t idxInsert = UINT32_MAX; 3470 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert); 3471 if (pOverlappingRange) 3472 { 3473 /* Simplification: all within the same range. */ 3474 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys 3475 && GCPhysLast <= pOverlappingRange->GCPhysLast, 3476 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n", 3477 GCPhys, GCPhysLast, pszDesc, 3478 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 3479 VERR_PGM_RAM_CONFLICT); 3480 3481 /* Check that is isn't an ad hoc range, but a real RAM range. */ 3482 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange), 3483 ("%RGp-%RGp (MMIO2/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n", 3484 GCPhys, GCPhysLast, pszDesc, 3485 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 3486 VERR_PGM_RAM_CONFLICT); 3487 3488 /* There can only be one MMIO2 chunk matching here! */ 3489 AssertLogRelMsgReturn(cChunks == 1, 3490 ("%RGp-%RGp (MMIO2/%s) consists of %u chunks whereas the RAM (%s) somehow doesn't!\n", 3491 GCPhys, GCPhysLast, pszDesc, cChunks, pOverlappingRange->pszDesc), 3492 VERR_PGM_PHYS_MMIO_EX_IPE); 3493 3494 /* Check that it's all RAM pages. */ 3495 PCPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT]; 3496 uint32_t const cMmio2Pages = pVM->pgm.s.apMmio2RamRanges[idxFirst]->cb >> GUEST_PAGE_SHIFT; 3497 uint32_t cPagesLeft = cMmio2Pages; 3498 while (cPagesLeft-- > 0) 3499 { 3500 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 3501 ("%RGp-%RGp (MMIO2/%s): %RGp is not a RAM page - type=%d desc=%s\n", GCPhys, GCPhysLast, 3502 pszDesc, pOverlappingRange->GCPhys, PGM_PAGE_GET_TYPE(pPage), pOverlappingRange->pszDesc), 3503 VERR_PGM_RAM_CONFLICT); 3504 pPage++; 3505 } 3506 3507 #ifdef VBOX_WITH_PGM_NEM_MODE 3508 /* We cannot mix MMIO2 into a RAM range in simplified memory mode because pOverlappingRange->pbR3 can't point 3509 both at the RAM and MMIO2, so we won't ever write & read from the actual MMIO2 memory if we try. */ 3510 AssertLogRelMsgReturn(!VM_IS_NEM_ENABLED(pVM), 3511 ("Putting %s at %RGp-%RGp is not possible in NEM mode because existing %RGp-%RGp (%s) mapping\n", 3512 pszDesc, GCPhys, GCPhysLast, 3513 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 3514 VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 3515 #endif 3516 3443 3517 /* 3444 3518 * Make all the pages in the range MMIO/ZERO pages, freeing any 3445 * RAM pages currently mapped here. This might not be 100% correct 3446 * for PCI memory, but we're doing the same thing for MMIO2 pages. 3447 * 3448 * We replace these MMIO/ZERO pages with real pages in the MMIO2 case. 3519 * RAM pages currently mapped here. This might not be 100% correct, 3520 * but so what, we do the same from MMIO... 3449 3521 */ 3450 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */3451 Assert (pFirstMmio->pvR3 == pFirstMmio->RamRange.pvR3);3452 Assert(pFirstMmio->RamRange.pvR3 != NULL); 3453 3454 #ifdef VBOX_WITH_PGM_NEM_MODE 3455 /* We cannot mix MMIO2 into a RAM range in simplified memory mode because pRam->pvR3 can't point 3456 both at the RAM and MMIO2, so we won't ever write & read from the actual MMIO2 memory if we try. */3457 AssertLogRelMsgReturn(!pVM->pgm.s.fNemMode, ("%s at %RGp-%RGp\n", pFirstMmio->RamRange.pszDesc, GCPhys, GCPhysLast),3458 VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);3459 #endif 3460 3461 int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, pFirstMmio->RamRange.pvR3); 3462 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);3463 3464 /* Replace the pages, freeing all present RAM pages.*/3465 PPGMPAGE pPageSrc = &p FirstMmio->RamRange.aPages[0];3466 PPGMPAGE pPageDst = &p Ram->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];3467 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT;3522 rc = pgmR3PhysFreePageRange(pVM, pOverlappingRange, GCPhys, GCPhysLast, NULL); 3523 AssertRCReturn(rc, rc); 3524 3525 Log(("PGMR3PhysMmio2Map: %RGp-%RGp %s - inside %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc, 3526 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc)); 3527 3528 /* 3529 * We're all in for mapping it now. Update the MMIO2 range to reflect it. 3530 */ 3531 pVM->pgm.s.aMmio2Ranges[idxFirst].GCPhys = GCPhys; 3532 pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED; 3533 3534 /* 3535 * Replace the pages in the range. 3536 */ 3537 PPGMPAGE pPageSrc = &pVM->pgm.s.apMmio2RamRanges[idxFirst]->aPages[0]; 3538 PPGMPAGE pPageDst = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT]; 3539 cPagesLeft = cMmio2Pages; 3468 3540 while (cPagesLeft-- > 0) 3469 3541 { … … 3479 3551 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0); 3480 3552 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0); 3481 /* NEM state is set by pgmR3PhysFreePageRange. */3553 /* NEM state is not relevant, see VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE above. */ 3482 3554 3483 3555 pVM->pgm.s.cZeroPages--; 3484 GCPhys += GUEST_PAGE_SIZE;3485 3556 pPageSrc++; 3486 3557 pPageDst++; 3487 3558 } 3488 3559 3489 /* Flush physical page map TLB. */3490 pgmPhysInvalidatePageMapTLB(pVM);3491 3492 3560 /* Force a PGM pool flush as guest ram references have been changed. */ 3493 /** @todo not entirely SMP safe; assuming for now the guest takes care of 3494 * this internally (not touch mapped mmio while changing the mapping). */ 3561 /** @todo not entirely SMP safe; assuming for now the guest takes 3562 * care of this internally (not touch mapped mmio while changing the 3563 * mapping). */ 3495 3564 PVMCPU pVCpu = VMMGetCpu(pVM); 3496 3565 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; … … 3502 3571 * No RAM range, insert the ones prepared during registration. 3503 3572 */ 3504 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3505 { 3573 Log(("PGMR3PhysMmio2Map: %RGp-%RGp %s - no RAM overlap\n", GCPhys, GCPhysLast, pszDesc)); 3574 RTGCPHYS GCPhysCur = GCPhys; 3575 uint32_t iChunk = 0; 3576 uint32_t idx = idxFirst; 3577 for (; iChunk < cChunks; iChunk++, idx++) 3578 { 3579 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3580 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3581 Assert(pRamRange->idRange == pMmio2->idRamRange); 3582 Assert(pMmio2->GCPhys == NIL_RTGCPHYS); 3583 3506 3584 #ifdef VBOX_WITH_NATIVE_NEM 3507 3585 /* Tell NEM and get the new NEM state for the pages. */ … … 3509 3587 if (VM_IS_NEM_ENABLED(pVM)) 3510 3588 { 3511 int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, pCurMmio->RamRange.GCPhys, 3512 pCurMmio->RamRange.GCPhysLast - pCurMmio->RamRange.GCPhys + 1, 3513 NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 3514 | (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES 3515 ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0), 3516 NULL /*pvRam*/, pCurMmio->RamRange.pvR3, 3517 &u2NemState, &pCurMmio->RamRange.uNemRange); 3518 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc); 3589 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhysCur, pRamRange->cb, fNemFlags, NULL /*pvRam*/, pRamRange->pbR3, 3590 &u2NemState, &pRamRange->uNemRange); 3591 AssertLogRelMsgBreak(RT_SUCCESS(rc), 3592 ("%RGp LB %RGp fFlags=%#x (%s)\n", 3593 GCPhysCur, pRamRange->cb, pMmio2->fFlags, pRamRange->pszDesc)); 3594 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Set this early to indicate that NEM has been notified. */ 3519 3595 } 3520 3596 #endif 3521 3597 3522 3598 /* Clear the tracking data of pages we're going to reactivate. */ 3523 PPGMPAGE pPageSrc = &p CurMmio->RamRange.aPages[0];3524 uint32_t cPagesLeft = p CurMmio->RamRange.cb >> GUEST_PAGE_SHIFT;3599 PPGMPAGE pPageSrc = &pRamRange->aPages[0]; 3600 uint32_t cPagesLeft = pRamRange->cb >> GUEST_PAGE_SHIFT; 3525 3601 while (cPagesLeft-- > 0) 3526 3602 { … … 3533 3609 } 3534 3610 3535 /* link in the ram range */ 3536 pgmR3PhysLinkRamRange(pVM, &pCurMmio->RamRange, pRamPrev); 3537 3538 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3539 { 3540 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast); 3541 break; 3542 } 3543 pRamPrev = &pCurMmio->RamRange; 3611 /* Insert the RAM range into the lookup table. */ 3612 rc = pgmR3PhysRamRangeInsertLookup(pVM, pRamRange, GCPhysCur, &idxInsert); 3613 AssertRCBreak(rc); 3614 3615 /* Mark the range as fully mapped. */ 3616 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING; 3617 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; 3618 pMmio2->GCPhys = GCPhysCur; 3619 3620 /* Advance. */ 3621 GCPhysCur += pRamRange->cb; 3622 } 3623 if (RT_FAILURE(rc)) 3624 { 3625 /* 3626 * Bail out anything we've done so far. 3627 */ 3628 idxInsert -= 1; 3629 do 3630 { 3631 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3632 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3633 3634 #ifdef VBOX_WITH_NATIVE_NEM 3635 if ( VM_IS_NEM_ENABLED(pVM) 3636 && (pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED)) 3637 { 3638 uint8_t u2NemState = UINT8_MAX; 3639 NEMR3NotifyPhysMmioExUnmap(pVM, GCPhysCur, pRamRange->cb, fNemFlags, NULL, pRamRange->pbR3, 3640 &u2NemState, &pRamRange->uNemRange); 3641 if (u2NemState != UINT8_MAX) 3642 pgmPhysSetNemStateForPages(pRamRange->aPages, pRamRange->cb >> GUEST_PAGE_SHIFT, u2NemState); 3643 } 3644 #endif 3645 if (pMmio2->GCPhys != NIL_RTGCPHYS) 3646 pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxInsert); 3647 3648 pMmio2->GCPhys = NIL_RTGCPHYS; 3649 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED; 3650 3651 idx--; 3652 } while (iChunk-- > 0); 3653 return rc; 3544 3654 } 3545 3655 } … … 3554 3664 * release log. 3555 3665 */ 3556 if ( pFirstMmio->pPhysHandlerR3 3557 && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3558 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio); 3559 3560 /* 3561 * We're good, set the flags and invalid the mapping TLB. 3562 */ 3563 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3564 { 3565 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; 3566 if (fRamExists) 3567 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING; 3666 if ( pVM->pgm.s.aMmio2Ranges[idxFirst].pPhysHandlerR3 3667 && (pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3668 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, idxFirst, cChunks); 3669 3670 /* Flush physical page map TLB. */ 3671 pgmPhysInvalidatePageMapTLB(pVM); 3672 3673 #ifdef VBOX_WITH_NATIVE_NEM 3674 /* 3675 * Late NEM notification (currently unused). 3676 */ 3677 if (VM_IS_NEM_ENABLED(pVM)) 3678 { 3679 if (pOverlappingRange) 3680 { 3681 uint8_t * const pbRam = pOverlappingRange->pbR3 ? &pOverlappingRange->pbR3[GCPhys - pOverlappingRange->GCPhys] : NULL; 3682 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1U, 3683 fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, pbRam, 3684 pVM->pgm.s.aMmio2Ranges[idxFirst].pbR3, NULL /*puNemRange*/); 3685 } 3568 3686 else 3569 pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING; 3570 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3571 break; 3572 } 3573 pgmPhysInvalidatePageMapTLB(pVM); 3574 3575 #ifdef VBOX_WITH_NATIVE_NEM 3576 /* 3577 * Late NEM notification. 3578 */ 3579 if (VM_IS_NEM_ENABLED(pVM)) 3580 { 3581 int rc; 3582 uint32_t fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2; 3583 if (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) 3584 fNemFlags |= NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES; 3585 if (fRamExists) 3586 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 3587 pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL, pFirstMmio->pvR3, 3588 NULL /*puNemRange*/); 3589 else 3590 { 3591 rc = VINF_SUCCESS; 3592 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3593 { 3594 rc = NEMR3NotifyPhysMmioExMapLate(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags, 3595 NULL, pCurMmio->RamRange.pvR3, &pCurMmio->RamRange.uNemRange); 3596 if ((pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) || RT_FAILURE(rc)) 3597 break; 3687 { 3688 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 3689 { 3690 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3691 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3692 Assert(pMmio2->GCPhys == pRamRange->GCPhys); 3693 3694 rc = NEMR3NotifyPhysMmioExMapLate(pVM, pRamRange->GCPhys, pRamRange->cb, fNemFlags, NULL /*pvRam*/, 3695 pRamRange->pbR3, &pRamRange->uNemRange); 3696 AssertRCBreak(rc); 3598 3697 } 3599 3698 } 3600 AssertLogRelRCReturnStmt(rc, PGMR3PhysMmio2Unmap(pVM, pDevIns, hMmio2, GCPhys); PGM_UNLOCK(pVM), rc);3601 }3602 #endif 3603 3604 PGM_UNLOCK(pVM); 3699 AssertLogRelRCReturnStmt(rc, 3700 PGMR3PhysMmio2Unmap(pVM, pVM->pgm.s.aMmio2Ranges[idxFirst].pDevInsR3, idxFirst + 1, GCPhys), 3701 rc); 3702 } 3703 #endif 3605 3704 3606 3705 return VINF_SUCCESS; … … 3609 3708 3610 3709 /** 3611 * Unmaps anMMIO2 region.3710 * Maps a MMIO2 region. 3612 3711 * 3613 3712 * This is typically done when a guest / the bios / state loading changes the 3614 * PCI config. The replacing of base memory has the same restrictions as during3713 * PCI config. The replacing of base memory has the same restrictions as during 3615 3714 * registration, of course. 3616 */ 3617 VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys) 3618 { 3619 /* 3620 * Validate input 3715 * 3716 * @returns VBox status code. 3717 * 3718 * @param pVM The cross context VM structure. 3719 * @param pDevIns The device instance owning the region. 3720 * @param hMmio2 The handle of the region to map. 3721 * @param GCPhys The guest-physical address to be remapped. 3722 */ 3723 VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys) 3724 { 3725 /* 3726 * Validate input. 3621 3727 */ 3622 3728 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 3623 3729 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3730 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER); 3731 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 3732 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3624 3733 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE); 3625 if (GCPhys != NIL_RTGCPHYS) 3626 { 3627 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 3628 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3629 } 3630 3631 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3632 AssertReturn(pFirstMmio, VERR_NOT_FOUND); 3633 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK); 3634 3734 3735 uint32_t cChunks = 0; 3736 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 3737 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 3738 3739 /* Gather the full range size so we can validate the mapping address properly. */ 3740 RTGCPHYS cbRange = 0; 3741 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 3742 cbRange += pVM->pgm.s.apMmio2RamRanges[idx]->cb; 3743 3744 RTGCPHYS const GCPhysLast = GCPhys + cbRange - 1; 3745 AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 3746 3747 /* 3748 * Take the PGM lock and call worker. 3749 */ 3635 3750 int rc = PGM_LOCK(pVM); 3636 3751 AssertRCReturn(rc, rc); 3637 3752 3638 PPGMREGMMIO2RANGE pLastMmio = pFirstMmio; 3639 RTGCPHYS cbRange = 0; 3640 for (;;) 3641 { 3642 AssertReturnStmt(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER); 3643 AssertReturnStmt(pLastMmio->RamRange.GCPhys == GCPhys + cbRange || GCPhys == NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER); 3644 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3); 3645 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev); 3646 Assert(pLastMmio->iRegion == pFirstMmio->iRegion); 3647 cbRange += pLastMmio->RamRange.cb; 3648 if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3649 break; 3650 pLastMmio = pLastMmio->pNextR3; 3651 } 3652 3653 Log(("PGMR3PhysMmio2Unmap: %RGp-%RGp %s\n", 3654 pFirstMmio->RamRange.GCPhys, pLastMmio->RamRange.GCPhysLast, pFirstMmio->RamRange.pszDesc)); 3655 3656 uint16_t const fOldFlags = pFirstMmio->fFlags; 3657 AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER); 3753 rc = pgmR3PhysMmio2MapLocked(pVM, idxFirst, cChunks, GCPhys, GCPhysLast); 3754 #ifdef VBOX_STRICT 3755 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 3756 #endif 3757 3758 PGM_UNLOCK(pVM); 3759 return rc; 3760 } 3761 3762 3763 /** 3764 * Worker form PGMR3PhysMmio2Map. 3765 */ 3766 static int pgmR3PhysMmio2UnmapLocked(PVM pVM, uint32_t const idxFirst, uint32_t const cChunks, RTGCPHYS const GCPhysIn) 3767 { 3768 /* 3769 * Validate input. 3770 */ 3771 RTGCPHYS cbRange = 0; 3772 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 3773 { 3774 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3775 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3776 AssertReturn(pMmio2->idRamRange == pRamRange->idRange, VERR_INTERNAL_ERROR_3); 3777 AssertReturn(pMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED, VERR_WRONG_ORDER); 3778 AssertReturn(pMmio2->GCPhys != NIL_RTGCPHYS, VERR_WRONG_ORDER); 3779 cbRange += pRamRange->cb; 3780 } 3781 3782 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst]; 3783 PPGMRAMRANGE const pFirstRamRange = pVM->pgm.s.apMmio2RamRanges[idxFirst]; 3784 const char * const pszDesc = pFirstRamRange->pszDesc; 3785 AssertLogRelMsgReturn(GCPhysIn == pFirstMmio2->GCPhys || GCPhysIn == NIL_RTGCPHYS, 3786 ("GCPhys=%RGp, actual address is %RGp\n", GCPhysIn, pFirstMmio2->GCPhys), 3787 VERR_MISMATCH); 3788 RTGCPHYS const GCPhys = pFirstMmio2->GCPhys; /* (it's always NIL_RTGCPHYS) */ 3789 Log(("PGMR3PhysMmio2Unmap: %RGp-%RGp %s\n", GCPhys, GCPhys + cbRange - 1U, pszDesc)); 3790 3791 uint16_t const fOldFlags = pFirstMmio2->fFlags; 3792 Assert(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED); 3793 3794 /* Find the first entry in the lookup table and verify the overlapping flag. */ 3795 uint32_t idxLookup = pgmR3PhysRamRangeFindOverlappingIndex(pVM, GCPhys, GCPhys + pFirstRamRange->cb - 1U); 3796 AssertLogRelMsgReturn(idxLookup < pVM->pgm.s.RamRangeUnion.cLookupEntries, 3797 ("MMIO2 range not found at %RGp LB %RGp in the lookup table! (%s)\n", 3798 GCPhys, pFirstRamRange->cb, pszDesc), 3799 VERR_INTERNAL_ERROR_2); 3800 3801 uint32_t const idLookupRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 3802 AssertLogRelReturn(idLookupRange != 0 && idLookupRange <= pVM->pgm.s.idRamRangeMax, VERR_INTERNAL_ERROR_5); 3803 PPGMRAMRANGE const pLookupRange = pVM->pgm.s.apRamRanges[idLookupRange]; 3804 AssertLogRelReturn(pLookupRange, VERR_INTERNAL_ERROR_3); 3805 3806 AssertLogRelMsgReturn(fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING 3807 ? pLookupRange != pFirstRamRange : pLookupRange == pFirstRamRange, 3808 ("MMIO2 unmap mixup at %RGp LB %RGp fl=%#x (%s) vs %RGp LB %RGp (%s)\n", 3809 GCPhys, cbRange, fOldFlags, pszDesc, pLookupRange->GCPhys, pLookupRange->cb, pLookupRange->pszDesc), 3810 VERR_INTERNAL_ERROR_4); 3658 3811 3659 3812 /* 3660 3813 * If monitoring dirty pages, we must deregister the handlers first. 3661 3814 */ 3662 if ( pFirstMmio ->pPhysHandlerR33815 if ( pFirstMmio2->pPhysHandlerR3 3663 3816 && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3664 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio);3817 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, idxFirst, cChunks); 3665 3818 3666 3819 /* … … 3680 3833 * Note! This is where we might differ a little from a real system, because 3681 3834 * it's likely to just show the RAM pages as they were before the 3682 * MMIO /MMIO2 region was mapped here.3835 * MMIO2 region was mapped here. 3683 3836 */ 3684 3837 /* Only one chunk allowed when overlapping! */ 3685 Assert(fOldFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); 3838 Assert(cChunks == 1); 3839 /* No NEM stuff should ever get here, see assertion in the mapping function. */ 3840 AssertReturn(!VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4); 3686 3841 3687 3842 /* Restore the RAM pages we've replaced. */ 3688 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 3689 while (pRam->GCPhys > pFirstMmio->RamRange.GCPhysLast) 3690 pRam = pRam->pNextR3; 3691 3692 PPGMPAGE pPageDst = &pRam->aPages[(pFirstMmio->RamRange.GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT]; 3693 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT; 3694 pVM->pgm.s.cZeroPages += cPagesLeft; /** @todo not correct for NEM mode */ 3695 3696 #ifdef VBOX_WITH_NATIVE_NEM 3697 if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. Note! we cannot be here in simple memory mode, see mapping function. */ 3698 { 3699 uint8_t u2State = UINT8_MAX; 3700 rc = NEMR3NotifyPhysMmioExUnmap(pVM, pFirstMmio->RamRange.GCPhys, pFirstMmio->RamRange.cb, 3701 fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 3702 pRam->pvR3 3703 ? (uint8_t *)pRam->pvR3 + pFirstMmio->RamRange.GCPhys - pRam->GCPhys : NULL, 3704 pFirstMmio->pvR3, &u2State, &pRam->uNemRange); 3705 AssertRCStmt(rc, rcRet = rc); 3706 if (u2State != UINT8_MAX) 3707 pgmPhysSetNemStateForPages(pPageDst, cPagesLeft, u2State); 3708 } 3709 #endif 3710 3843 PPGMPAGE pPageDst = &pLookupRange->aPages[(pFirstRamRange->GCPhys - pLookupRange->GCPhys) >> GUEST_PAGE_SHIFT]; 3844 uint32_t cPagesLeft = pFirstRamRange->cb >> GUEST_PAGE_SHIFT; 3845 pVM->pgm.s.cZeroPages += cPagesLeft; 3711 3846 while (cPagesLeft-- > 0) 3712 3847 { … … 3715 3850 } 3716 3851 3717 /* Flush physical page map TLB. */3718 pgmPhysInvalidatePageMapTLB(pVM);3719 3720 3852 /* Update range state. */ 3721 pFirstMmio->RamRange.GCPhys = NIL_RTGCPHYS; 3722 pFirstMmio->RamRange.GCPhysLast = NIL_RTGCPHYS; 3723 pFirstMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED); 3853 pFirstMmio2->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED); 3854 pFirstMmio2->GCPhys = NIL_RTGCPHYS; 3855 Assert(pFirstRamRange->GCPhys == NIL_RTGCPHYS); 3856 Assert(pFirstRamRange->GCPhysLast == NIL_RTGCPHYS); 3724 3857 } 3725 3858 else … … 3728 3861 * Unlink the chunks related to the MMIO/MMIO2 region. 3729 3862 */ 3730 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) 3731 { 3863 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 3864 { 3865 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 3866 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 3867 Assert(pMmio2->idRamRange == pRamRange->idRange); 3868 Assert(pMmio2->GCPhys == pRamRange->GCPhys); 3869 3732 3870 #ifdef VBOX_WITH_NATIVE_NEM 3733 3871 if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. */ 3734 3872 { 3735 3873 uint8_t u2State = UINT8_MAX; 3736 rc = NEMR3NotifyPhysMmioExUnmap(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags, 3737 NULL, pCurMmio->pvR3, &u2State, &pCurMmio->RamRange.uNemRange); 3738 AssertRCStmt(rc, rcRet = rc); 3874 int rc = NEMR3NotifyPhysMmioExUnmap(pVM, pRamRange->GCPhys, pRamRange->cb, fNemFlags, 3875 NULL, pMmio2->pbR3, &u2State, &pRamRange->uNemRange); 3876 AssertLogRelMsgStmt(RT_SUCCESS(rc), 3877 ("NEMR3NotifyPhysMmioExUnmap failed: %Rrc - GCPhys=RGp LB %RGp fNemFlags=%#x pbR3=%p %s\n", 3878 rc, pRamRange->GCPhys, pRamRange->cb, fNemFlags, pMmio2->pbR3, pRamRange->pszDesc), 3879 rcRet = rc); 3739 3880 if (u2State != UINT8_MAX) 3740 pgmPhysSetNemStateForPages(p CurMmio->RamRange.aPages, pCurMmio->RamRange.cb >> GUEST_PAGE_SHIFT, u2State);3881 pgmPhysSetNemStateForPages(pRamRange->aPages, pRamRange->cb >> GUEST_PAGE_SHIFT, u2State); 3741 3882 } 3742 3883 #endif 3743 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange); 3744 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS; 3745 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS; 3746 pCurMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED); 3747 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3748 break; 3884 3885 int rc = pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxLookup); 3886 AssertLogRelMsgStmt(RT_SUCCESS(rc), 3887 ("pgmR3PhysRamRangeRemoveLookup failed: %Rrc - GCPhys=%RGp LB %RGp %s\n", 3888 rc, pRamRange->GCPhys, pRamRange->cb, pRamRange->pszDesc), 3889 rcRet = rc); 3890 3891 pMmio2->GCPhys = NIL_RTGCPHYS; 3892 pMmio2->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED); 3893 Assert(pRamRange->GCPhys == NIL_RTGCPHYS); 3894 Assert(pRamRange->GCPhysLast == NIL_RTGCPHYS); 3749 3895 } 3750 3896 } … … 3761 3907 pgmPhysInvalidRamRangeTlbs(pVM); 3762 3908 3909 return rcRet; 3910 } 3911 3912 3913 /** 3914 * Unmaps an MMIO2 region. 3915 * 3916 * This is typically done when a guest / the bios / state loading changes the 3917 * PCI config. The replacing of base memory has the same restrictions as during 3918 * registration, of course. 3919 */ 3920 VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys) 3921 { 3922 /* 3923 * Validate input 3924 */ 3925 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 3926 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3927 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE); 3928 if (GCPhys != NIL_RTGCPHYS) 3929 { 3930 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER); 3931 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 3932 } 3933 3934 uint32_t cChunks = 0; 3935 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 3936 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 3937 3938 3939 /* 3940 * Take the PGM lock and call worker. 3941 */ 3942 int rc = PGM_LOCK(pVM); 3943 AssertRCReturn(rc, rc); 3944 3945 rc = pgmR3PhysMmio2UnmapLocked(pVM, idxFirst, cChunks, GCPhys); 3946 #ifdef VBOX_STRICT 3947 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 3948 #endif 3949 3763 3950 PGM_UNLOCK(pVM); 3764 return rc Ret;3951 return rc; 3765 3952 } 3766 3953 … … 3770 3957 * 3771 3958 * This is mainly for dealing with old saved states after changing the default 3772 * size of a mapping region. See P GMDevHlpMMIOExReduce and3959 * size of a mapping region. See PDMDevHlpMmio2Reduce and 3773 3960 * PDMPCIDEV::pfnRegionLoadChangeHookR3. 3774 3961 * … … 3787 3974 * Validate input 3788 3975 */ 3789 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);3790 3976 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER); 3791 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE); 3792 AssertReturn(cbRegion >= X86_PAGE_SIZE, VERR_INVALID_PARAMETER); 3793 AssertReturn(!(cbRegion & X86_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT); 3794 VMSTATE enmVmState = VMR3GetState(pVM); 3977 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE && hMmio2 != 0 && hMmio2 <= RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), 3978 VERR_INVALID_HANDLE); 3979 AssertReturn(cbRegion >= GUEST_PAGE_SIZE, VERR_INVALID_PARAMETER); 3980 AssertReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT); 3981 3982 PVMCPU const pVCpu = VMMGetCpu(pVM); 3983 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 3984 3985 VMSTATE const enmVmState = VMR3GetState(pVM); 3795 3986 AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING 3796 3987 || enmVmState == VMSTATE_LOADING, … … 3798 3989 VERR_VM_INVALID_VM_STATE); 3799 3990 3991 /* 3992 * Grab the PGM lock and validate the request properly. 3993 */ 3800 3994 int rc = PGM_LOCK(pVM); 3801 3995 AssertRCReturn(rc, rc); 3802 3996 3803 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3804 if (pFirstMmio) 3805 { 3806 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK); 3807 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)) 3997 uint32_t cChunks = 0; 3998 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 3999 if ((int32_t)idxFirst >= 0) 4000 { 4001 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst]; 4002 PPGMRAMRANGE const pFirstRamRange = pVM->pgm.s.apMmio2RamRanges[idxFirst]; 4003 if ( !(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 4004 && pFirstMmio2->GCPhys == NIL_RTGCPHYS) 3808 4005 { 3809 4006 /* … … 3811 4008 * Implement when there is a real world need and thus a testcase. 3812 4009 */ 3813 AssertLogRelMsgStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, 3814 ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags), 3815 rc = VERR_NOT_SUPPORTED); 3816 if (RT_SUCCESS(rc)) 4010 if (cChunks == 1) 3817 4011 { 3818 4012 /* 3819 * Make the change.4013 * The request has to be within the initial size. 3820 4014 */ 3821 Log(("PGMR3PhysMmio2Reduce: %s changes from %RGp bytes (%RGp) to %RGp bytes.\n", 3822 pFirstMmio->RamRange.pszDesc, pFirstMmio->RamRange.cb, pFirstMmio->cbReal, cbRegion)); 3823 3824 AssertLogRelMsgStmt(cbRegion <= pFirstMmio->cbReal, 3825 ("%s: cbRegion=%#RGp cbReal=%#RGp\n", pFirstMmio->RamRange.pszDesc, cbRegion, pFirstMmio->cbReal), 3826 rc = VERR_OUT_OF_RANGE); 3827 if (RT_SUCCESS(rc)) 4015 if (cbRegion <= pFirstMmio2->cbReal) 3828 4016 { 3829 pFirstMmio->RamRange.cb = cbRegion; 4017 /* 4018 * All we have to do is modify the size stored in the RAM range, 4019 * as it is the one used when mapping it and such. 4020 * The two page counts stored in PGMR0PERVM remain unchanged. 4021 */ 4022 Log(("PGMR3PhysMmio2Reduce: %s changes from %#RGp bytes (%#RGp) to %#RGp bytes.\n", 4023 pFirstRamRange->pszDesc, pFirstRamRange->cb, pFirstMmio2->cbReal, cbRegion)); 4024 pFirstRamRange->cb = cbRegion; 4025 rc = VINF_SUCCESS; 4026 } 4027 else 4028 { 4029 AssertLogRelMsgFailed(("MMIO2/%s: cbRegion=%#RGp > cbReal=%#RGp\n", 4030 pFirstRamRange->pszDesc, cbRegion, pFirstMmio2->cbReal)); 4031 rc = VERR_OUT_OF_RANGE; 3830 4032 } 3831 4033 } 4034 else 4035 { 4036 AssertLogRelMsgFailed(("MMIO2/%s: more than one chunk: %d (flags=%#x)\n", 4037 pFirstRamRange->pszDesc, cChunks, pFirstMmio2->fFlags)); 4038 rc = VERR_NOT_SUPPORTED; 4039 } 3832 4040 } 3833 4041 else 4042 { 4043 AssertLogRelMsgFailed(("MMIO2/%s: cannot change size of mapped range: %RGp..%RGp\n", pFirstRamRange->pszDesc, 4044 pFirstMmio2->GCPhys, pFirstMmio2->GCPhys + pFirstRamRange->cb - 1U)); 3834 4045 rc = VERR_WRONG_ORDER; 4046 } 3835 4047 } 3836 4048 else 3837 rc = VERR_NOT_FOUND;4049 rc = (int32_t)idxFirst; 3838 4050 3839 4051 PGM_UNLOCK(pVM); … … 3859 4071 3860 4072 /* 3861 * Just do this the simple way. No need for locking as this is only taken at 3862 */ 3863 PGM_LOCK_VOID(pVM); 3864 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 4073 * Just do this the simple way. 4074 */ 4075 int rc = PGM_LOCK_VOID(pVM); 4076 AssertRCReturn(rc, rc); 4077 uint32_t cChunks; 4078 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 3865 4079 PGM_UNLOCK(pVM); 3866 AssertReturn(pFirstMmio, VERR_INVALID_HANDLE); 3867 AssertReturn(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_HANDLE); 4080 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 3868 4081 return VINF_SUCCESS; 3869 4082 } … … 3881 4094 VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2) 3882 4095 { 3883 AssertPtrReturn(pDevIns, NIL_RTGCPHYS); 3884 3885 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3886 AssertReturn(pFirstRegMmio, NIL_RTGCPHYS); 3887 3888 if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 3889 return pFirstRegMmio->RamRange.GCPhys; 4096 RTGCPHYS GCPhysRet = NIL_RTGCPHYS; 4097 4098 int rc = PGM_LOCK_VOID(pVM); 4099 AssertRCReturn(rc, NIL_RTGCPHYS); 4100 4101 uint32_t cChunks; 4102 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 4103 if ((int32_t)idxFirst >= 0) 4104 GCPhysRet = pVM->pgm.s.aMmio2Ranges[idxFirst].GCPhys; 4105 4106 PGM_UNLOCK(pVM); 3890 4107 return NIL_RTGCPHYS; 3891 4108 } … … 3903 4120 * Continue validation. 3904 4121 */ 3905 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 3906 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 3907 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3908 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK), 3909 VERR_INVALID_FUNCTION); 3910 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 3911 3912 RTGCPHYS cbTotal = 0; 3913 uint16_t fTotalDirty = 0; 3914 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 3915 { 3916 cbTotal += pCur->RamRange.cb; /* Not using cbReal here, because NEM is not in on the creating, only the mapping. */ 3917 fTotalDirty |= pCur->fFlags; 3918 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3919 break; 3920 pCur = pCur->pNextR3; 3921 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 3922 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 3923 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, 3924 VERR_INTERNAL_ERROR_4); 3925 } 3926 size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, GUEST_PAGE_SIZE * 64, RTGCPHYS) / GUEST_PAGE_SIZE / 8; 3927 3928 if (cbBitmap) 3929 { 4122 uint32_t cChunks; 4123 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 4124 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 4125 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst]; 4126 AssertReturn(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, VERR_INVALID_FUNCTION); 4127 4128 int rc = VINF_SUCCESS; 4129 if (cbBitmap || pvBitmap) 4130 { 4131 /* 4132 * Check the bitmap size and collect all the dirty flags. 4133 */ 4134 RTGCPHYS cbTotal = 0; 4135 uint16_t fTotalDirty = 0; 4136 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4137 { 4138 /* Not using cbReal here, because NEM is not in on the creating, only the mapping. */ 4139 cbTotal += pVM->pgm.s.apMmio2RamRanges[idx]->cb; 4140 fTotalDirty |= pVM->pgm.s.aMmio2Ranges[idx].fFlags; 4141 } 4142 size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, GUEST_PAGE_SIZE * 64, RTGCPHYS) / GUEST_PAGE_SIZE / 8; 4143 3930 4144 AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER); 3931 4145 AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER); 3932 4146 AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER); 3933 } 3934 3935 /* 3936 * Do the work. 3937 */ 3938 int rc = VINF_SUCCESS; 3939 if (pvBitmap) 3940 { 4147 3941 4148 #ifdef VBOX_WITH_PGM_NEM_MODE 3942 if (pFirstRegMmio->pPhysHandlerR3 == NULL) 4149 /* 4150 * If there is no physical handler we must be in NEM mode and NEM 4151 * taking care of the dirty bit collecting. 4152 */ 4153 if (pFirstMmio2->pPhysHandlerR3 == NULL) 3943 4154 { 3944 4155 /** @todo This does not integrate at all with --execute-all-in-iem, leaving the … … 3948 4159 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4); 3949 4160 uint8_t *pbBitmap = (uint8_t *)pvBitmap; 3950 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 3951 { 3952 size_t const cbBitmapChunk = pCur->RamRange.cb / GUEST_PAGE_SIZE / 8; 3953 Assert((RTGCPHYS)cbBitmapChunk * GUEST_PAGE_SIZE * 8 == pCur->RamRange.cb); 3954 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb, 3955 pCur->RamRange.uNemRange, pbBitmap, cbBitmapChunk); 4161 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4162 { 4163 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 4164 size_t const cbBitmapChunk = (pRamRange->cb / GUEST_PAGE_SIZE + 7) / 8; 4165 Assert((RTGCPHYS)cbBitmapChunk * GUEST_PAGE_SIZE * 8 == pRamRange->cb); 4166 Assert(pRamRange->GCPhys == pVM->pgm.s.aMmio2Ranges[idx].GCPhys); /* (No MMIO2 inside RAM in NEM mode!)*/ 4167 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pRamRange->GCPhys, pRamRange->cb, 4168 pRamRange->uNemRange, pbBitmap, cbBitmapChunk); 3956 4169 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3957 4170 rc = rc2; 3958 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3959 break; 3960 pbBitmap += pCur->RamRange.cb / GUEST_PAGE_SIZE / 8; 4171 pbBitmap += pRamRange->cb / GUEST_PAGE_SIZE / 8; 3961 4172 } 3962 4173 } 3963 4174 else 3964 4175 #endif 3965 if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY)3966 { 3967 if ( (pFirst RegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))3968 == 4176 if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY) 4177 { 4178 if ( (pFirstMmio2->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4179 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 3969 4180 { 3970 4181 /* … … 3972 4183 */ 3973 4184 RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */ 3974 uint32_t iPageNo = 0; 3975 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4185 for (uint32_t iChunk = 0, idx = idxFirst, iPageNo = 0; iChunk < cChunks; iChunk++, idx++) 3976 4186 { 3977 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4187 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 4188 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 3978 4189 { 3979 int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, p Cur->RamRange.GCPhys, pvBitmap, iPageNo);4190 int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pMmio2->GCPhys, pvBitmap, iPageNo); 3980 4191 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 3981 4192 rc = rc2; 3982 p Cur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;4193 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 3983 4194 } 3984 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3985 break; 3986 iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT; 4195 iPageNo += pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT; 3987 4196 } 3988 4197 } … … 3995 4204 */ 3996 4205 RT_BZERO(pvBitmap, cbBitmap); 3997 uint32_t iPageNo = 0; 3998 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4206 for (uint32_t iChunk = 0, idx = idxFirst, iPageNo = 0; iChunk < cChunks; iChunk++, idx++) 3999 4207 { 4000 if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4208 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 4209 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 4210 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY) 4001 4211 { 4002 ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (p Cur->RamRange.cb >> GUEST_PAGE_SHIFT));4003 p Cur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;4212 ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pRamRange->cb >> GUEST_PAGE_SHIFT)); 4213 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4004 4214 } 4005 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4006 break; 4007 iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT; 4215 iPageNo += pRamRange->cb >> GUEST_PAGE_SHIFT; 4008 4216 } 4009 4217 } … … 4018 4226 * No bitmap. Reset the region if tracking is currently enabled. 4019 4227 */ 4020 else if ( (pFirst RegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))4021 == 4228 else if ( (pFirstMmio2->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4229 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4022 4230 { 4023 4231 #ifdef VBOX_WITH_PGM_NEM_MODE 4024 if (pFirst RegMmio->pPhysHandlerR3 == NULL)4232 if (pFirstMmio2->pPhysHandlerR3 == NULL) 4025 4233 { 4026 4234 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4); 4027 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3) 4028 { 4029 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb, 4030 pCur->RamRange.uNemRange, NULL, 0); 4235 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4236 { 4237 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 4238 Assert(pRamRange->GCPhys == pVM->pgm.s.aMmio2Ranges[idx].GCPhys); /* (No MMIO2 inside RAM in NEM mode!)*/ 4239 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pRamRange->GCPhys, pRamRange->cb, 4240 pRamRange->uNemRange, NULL, 0); 4031 4241 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 4032 4242 rc = rc2; 4033 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)4034 break;4035 4243 } 4036 4244 } … … 4038 4246 #endif 4039 4247 { 4040 for ( PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)4041 { 4042 p Cur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;4043 int rc2 = PGMHandlerPhysicalReset(pVM, p Cur->RamRange.GCPhys);4248 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4249 { 4250 pVM->pgm.s.aMmio2Ranges[idx].fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY; 4251 int rc2 = PGMHandlerPhysicalReset(pVM, pVM->pgm.s.aMmio2Ranges[idx].GCPhys); 4044 4252 if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) 4045 4253 rc = rc2; 4046 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)4047 break;4048 4254 } 4049 4255 } … … 4102 4308 * Continue validation. 4103 4309 */ 4104 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 4105 AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE); 4106 AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4107 == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK) 4108 , VERR_INVALID_FUNCTION); 4109 AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER); 4310 uint32_t cChunks; 4311 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 4312 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst); 4313 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst]; 4314 AssertReturn(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, VERR_INVALID_FUNCTION); 4110 4315 4111 4316 #ifdef VBOX_WITH_PGM_NEM_MODE … … 4114 4319 * leave the tracking on all the time there. 4115 4320 */ 4116 if (pFirst RegMmio->pPhysHandlerR3 == NULL)4321 if (pFirstMmio2->pPhysHandlerR3 == NULL) 4117 4322 { 4118 4323 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4); … … 4122 4327 4123 4328 /* 4124 * Anyt ing needing doing?4125 */ 4126 if (fEnabled != RT_BOOL(pFirst RegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))4127 { 4128 LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, p FirstRegMmio->RamRange.pszDesc));4329 * Anything needing doing? 4330 */ 4331 if (fEnabled != RT_BOOL(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED)) 4332 { 4333 LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc)); 4129 4334 4130 4335 /* 4131 4336 * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag. 4132 4337 */ 4133 for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;) 4134 { 4338 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4135 4339 if (fEnabled) 4136 p Cur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED;4340 pVM->pgm.s.aMmio2Ranges[idx].fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4137 4341 else 4138 pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4139 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 4140 break; 4141 pCur = pCur->pNextR3; 4142 AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5); 4143 AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) 4144 == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES 4145 , VERR_INTERNAL_ERROR_4); 4146 } 4342 pVM->pgm.s.aMmio2Ranges[idx].fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED; 4147 4343 4148 4344 /* … … 4154 4350 * it's in the release log. 4155 4351 */ 4156 if (pFirst RegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)4352 if (pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED) 4157 4353 { 4158 4354 if (fEnabled) 4159 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio);4355 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, idxFirst, cChunks); 4160 4356 else 4161 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio);4357 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, idxFirst, cChunks); 4162 4358 } 4163 4359 } 4164 4360 else 4165 LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, p FirstRegMmio->RamRange.pszDesc));4361 LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc)); 4166 4362 4167 4363 return VINF_SUCCESS; … … 4216 4412 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 4217 4413 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE); 4218 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);4219 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);4220 4414 AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER); 4221 4222 AssertReturn(pVM->enmVMState == VMSTATE_LOADING, VERR_INVALID_STATE);4223 4415 4224 4416 int rc = PGM_LOCK(pVM); 4225 4417 AssertRCReturn(rc, rc); 4226 4418 4227 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2); 4228 AssertReturnStmt(pFirstRegMmio, PGM_UNLOCK(pVM), VERR_NOT_FOUND); 4229 AssertReturnStmt(pgmR3PhysMmio2Find(pVM, pDevIns, pFirstRegMmio->iSubDev, iNewRegion, NIL_PGMMMIO2HANDLE) == NULL, 4230 PGM_UNLOCK(pVM), VERR_RESOURCE_IN_USE); 4231 4232 /* 4233 * Make the change. 4234 */ 4235 pFirstRegMmio->iRegion = (uint8_t)iNewRegion; 4419 /* Validate and resolve the handle. */ 4420 uint32_t cChunks; 4421 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks); 4422 if ((int32_t)idxFirst >= 0) 4423 { 4424 /* Check that the new range number is unused. */ 4425 PPGMREGMMIO2RANGE const pConflict = pgmR3PhysMmio2Find(pVM, pDevIns, pVM->pgm.s.aMmio2Ranges[idxFirst].iSubDev, 4426 iNewRegion); 4427 if (!pConflict) 4428 { 4429 /* 4430 * Make the change. 4431 */ 4432 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++) 4433 pVM->pgm.s.aMmio2Ranges[idx].iRegion = (uint8_t)iNewRegion; 4434 rc = VINF_SUCCESS; 4435 } 4436 else 4437 { 4438 AssertLogRelMsgFailed(("MMIO2/%s: iNewRegion=%d conflicts with %s\n", pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc, 4439 iNewRegion, pVM->pgm.s.apMmio2RamRanges[pConflict->idRamRange]->pszDesc)); 4440 rc = VERR_RESOURCE_IN_USE; 4441 } 4442 } 4443 else 4444 rc = (int32_t)idxFirst; 4236 4445 4237 4446 PGM_UNLOCK(pVM); 4238 return VINF_SUCCESS;4447 return rc; 4239 4448 } 4240 4449 … … 4275 4484 AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER); 4276 4485 AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER); 4277 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);4486 RTGCPHYS const GCPhysLast = GCPhys + (cb - 1); 4278 4487 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER); 4279 4488 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER); 4280 4489 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER); 4281 4490 AssertReturn(!(fFlags & ~PGMPHYS_ROM_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER); 4491 4492 PVMCPU const pVCpu = VMMGetCpu(pVM); 4493 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT); 4282 4494 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); 4283 4495 4284 4496 const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT; 4497 AssertReturn(cGuestPages <= PGM_MAX_PAGES_PER_ROM_RANGE, VERR_OUT_OF_RANGE); 4498 4285 4499 #ifdef VBOX_WITH_PGM_NEM_MODE 4286 4500 const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT; … … 4288 4502 4289 4503 /* 4290 * Find the ROM location in the ROM list first. 4291 */ 4292 PPGMROMRANGE pRomPrev = NULL; 4293 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; 4294 while (pRom && GCPhysLast >= pRom->GCPhys) 4295 { 4296 if ( GCPhys <= pRom->GCPhysLast 4297 && GCPhysLast >= pRom->GCPhys) 4298 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n", 4299 GCPhys, GCPhysLast, pszDesc, 4300 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc), 4301 VERR_PGM_RAM_CONFLICT); 4302 /* next */ 4303 pRomPrev = pRom; 4304 pRom = pRom->pNextR3; 4504 * Make sure we've got a free ROM range. 4505 */ 4506 uint8_t const idRomRange = pVM->pgm.s.cRomRanges; 4507 AssertLogRelReturn(idRomRange < RT_ELEMENTS(pVM->pgm.s.apRomRanges), VERR_PGM_TOO_MANY_ROM_RANGES); 4508 4509 /* 4510 * Look thru the existing ROM range and make sure there aren't any 4511 * overlapping registration. 4512 */ 4513 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 4514 for (uint32_t idx = 0; idx < cRomRanges; idx++) 4515 { 4516 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 4517 AssertLogRelMsgReturn( GCPhys > pRom->GCPhysLast 4518 || GCPhysLast < pRom->GCPhys, 4519 ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n", 4520 GCPhys, GCPhysLast, pszDesc, 4521 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc), 4522 VERR_PGM_RAM_CONFLICT); 4305 4523 } 4306 4524 … … 4313 4531 * than one RAM range (lazy). 4314 4532 */ 4315 bool fRamExists = false; 4316 PPGMRAMRANGE pRamPrev = NULL; 4317 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 4318 while (pRam && GCPhysLast >= pRam->GCPhys) 4319 { 4320 if ( GCPhys <= pRam->GCPhysLast 4321 && GCPhysLast >= pRam->GCPhys) 4322 { 4323 /* completely within? */ 4324 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys 4325 && GCPhysLast <= pRam->GCPhysLast, 4326 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n", 4327 GCPhys, GCPhysLast, pszDesc, 4328 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc), 4329 VERR_PGM_RAM_CONFLICT); 4330 fRamExists = true; 4331 break; 4332 } 4333 4334 /* next */ 4335 pRamPrev = pRam; 4336 pRam = pRam->pNextR3; 4337 } 4338 if (fRamExists) 4339 { 4340 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT]; 4533 uint32_t idxInsert = UINT32_MAX; 4534 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert); 4535 if (pOverlappingRange) 4536 { 4537 /* completely within? */ 4538 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys 4539 && GCPhysLast <= pOverlappingRange->GCPhysLast, 4540 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n", 4541 GCPhys, GCPhysLast, pszDesc, 4542 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 4543 VERR_PGM_RAM_CONFLICT); 4544 4545 /* Check that is isn't an ad hoc range, but a real RAM range. */ 4546 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange), 4547 ("%RGp-%RGp (ROM/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n", 4548 GCPhys, GCPhysLast, pszDesc, 4549 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc), 4550 VERR_PGM_RAM_CONFLICT); 4551 4552 /* All the pages must be RAM pages. */ 4553 PPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT]; 4341 4554 uint32_t cPagesLeft = cGuestPages; 4342 4555 while (cPagesLeft-- > 0) … … 4344 4557 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 4345 4558 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n", 4346 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT), 4347 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT); 4348 Assert(PGM_PAGE_IS_ZERO(pPage) || PGM_IS_IN_NEM_MODE(pVM)); 4559 GCPhys + ((RTGCPHYS)cPagesLeft << GUEST_PAGE_SHIFT), pPage, GCPhys, GCPhysLast, pszDesc), 4560 VERR_PGM_RAM_CONFLICT); 4561 AssertLogRelMsgReturn(PGM_PAGE_IS_ZERO(pPage) || PGM_IS_IN_NEM_MODE(pVM), 4562 ("%RGp (%R[pgmpage]) is not a ZERO page - registering %RGp-%RGp (%s).\n", 4563 GCPhys + ((RTGCPHYS)cPagesLeft << GUEST_PAGE_SHIFT), pPage, GCPhys, GCPhysLast, pszDesc), 4564 VERR_PGM_UNEXPECTED_PAGE_STATE); 4349 4565 pPage++; 4350 4566 } … … 4354 4570 * Update the base memory reservation if necessary. 4355 4571 */ 4356 uint32_t cExtraBaseCost = fRamExists ? 0 : cGuestPages; 4357 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4358 cExtraBaseCost += cGuestPages; 4572 uint32_t const cExtraBaseCost = (pOverlappingRange ? 0 : cGuestPages) 4573 + (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? cGuestPages : 0); 4359 4574 if (cExtraBaseCost) 4360 4575 { 4361 4576 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost); 4362 if (RT_FAILURE(rc)) 4363 return rc; 4577 AssertRCReturn(rc, rc); 4364 4578 } 4365 4579 … … 4368 4582 * Early NEM notification before we've made any changes or anything. 4369 4583 */ 4370 uint32_t const fNemNotify = ( fRamExists? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)4584 uint32_t const fNemNotify = (pOverlappingRange ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0) 4371 4585 | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0); 4372 4586 uint8_t u2NemState = UINT8_MAX; … … 4375 4589 { 4376 4590 int rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cGuestPages << GUEST_PAGE_SHIFT, 4377 fRamExists ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL, 4378 fNemNotify, &u2NemState, fRamExists ? &pRam->uNemRange : &uNemRange); 4591 pOverlappingRange 4592 ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pOverlappingRange, GCPhys) : NULL, 4593 fNemNotify, &u2NemState, 4594 pOverlappingRange ? &pOverlappingRange->uNemRange : &uNemRange); 4379 4595 AssertLogRelRCReturn(rc, rc); 4380 4596 } … … 4382 4598 4383 4599 /* 4384 * Allocate memory for the virgin copy of the RAM. In simplified memory mode, 4385 * we allocate memory for any ad-hoc RAM range and for shadow pages. 4386 */ 4600 * Allocate memory for the virgin copy of the RAM. In simplified memory 4601 * mode, we allocate memory for any ad-hoc RAM range and for shadow pages. 4602 */ 4603 int rc; 4387 4604 PGMMALLOCATEPAGESREQ pReq = NULL; 4388 4605 #ifdef VBOX_WITH_PGM_NEM_MODE 4389 4606 void *pvRam = NULL; 4390 4607 void *pvAlt = NULL; 4391 if ( pVM->pgm.s.fNemMode)4392 { 4393 if (! fRamExists)4394 { 4395 intrc = SUPR3PageAlloc(cHostPages, 0, &pvRam);4608 if (PGM_IS_IN_NEM_MODE(pVM)) 4609 { 4610 if (!pOverlappingRange) 4611 { 4612 rc = SUPR3PageAlloc(cHostPages, 0, &pvRam); 4396 4613 if (RT_FAILURE(rc)) 4397 4614 return rc; … … 4399 4616 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4400 4617 { 4401 intrc = SUPR3PageAlloc(cHostPages, 0, &pvAlt);4618 rc = SUPR3PageAlloc(cHostPages, 0, &pvAlt); 4402 4619 if (RT_FAILURE(rc)) 4403 4620 { … … 4411 4628 #endif 4412 4629 { 4413 intrc = GMMR3AllocatePagesPrepare(pVM, &pReq, cGuestPages, GMMACCOUNT_BASE);4630 rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cGuestPages, GMMACCOUNT_BASE); 4414 4631 AssertRCReturn(rc, rc); 4415 4632 … … 4431 4648 4432 4649 /* 4433 * Allocate the new ROM range and RAM range (if necessary). 4434 */ 4435 PPGMROMRANGE pRomNew = NULL; 4436 RTR0PTR pRomNewR0 = NIL_RTR0PTR; 4437 size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]), 128); 4438 size_t const cbRamRange = fRamExists ? 0 : RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]); 4439 size_t const cRangePages = RT_ALIGN_Z(cbRomRange + cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT; 4440 int rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pRomNew, &pRomNewR0, NULL /*paPages*/); 4650 * Allocate a RAM range if required. 4651 * Note! We don't clean up the RAM range here on failure, VM destruction does that. 4652 */ 4653 rc = VINF_SUCCESS; 4654 PPGMRAMRANGE pRamRange = NULL; 4655 if (!pOverlappingRange) 4656 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cGuestPages, PGM_RAM_RANGE_FLAGS_AD_HOC_ROM, &pRamRange); 4441 4657 if (RT_SUCCESS(rc)) 4442 4658 { 4443 4444 4659 /* 4445 * Initialize and insert the RAM range (if required). 4660 * Allocate a ROM range. 4661 * Note! We don't clean up the ROM range here on failure, VM destruction does that. 4446 4662 */ 4447 PPGMRAMRANGE pRamNew; 4448 uint32_t const idxFirstRamPage = fRamExists ? (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT : 0; 4449 PPGMROMPAGE pRomPage = &pRomNew->aPages[0]; 4450 if (!fRamExists) 4451 { 4452 /* New RAM range. */ 4453 pRamNew = (PPGMRAMRANGE)((uintptr_t)pRomNew + cbRomRange); 4454 pRamNew->pSelfR0 = !pRomNewR0 ? NIL_RTR0PTR : pRomNewR0 + cbRomRange; 4455 pRamNew->GCPhys = GCPhys; 4456 pRamNew->GCPhysLast = GCPhysLast; 4457 pRamNew->cb = cb; 4458 pRamNew->pszDesc = pszDesc; 4459 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM; 4460 pRamNew->pvR3 = NULL; 4461 pRamNew->paLSPages = NULL; 4663 if (SUPR3IsDriverless()) 4664 rc = pgmPhysRomRangeAllocCommon(pVM, cGuestPages, idRomRange, fFlags); 4665 else 4666 { 4667 PGMPHYSROMALLOCATERANGEREQ RomRangeReq; 4668 RomRangeReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 4669 RomRangeReq.Hdr.cbReq = sizeof(RomRangeReq); 4670 RomRangeReq.cbGuestPage = GUEST_PAGE_SIZE; 4671 RomRangeReq.cGuestPages = cGuestPages; 4672 RomRangeReq.idRomRange = idRomRange; 4673 RomRangeReq.fFlags = fFlags; 4674 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE, 0 /*u64Arg*/, &RomRangeReq.Hdr); 4675 } 4676 } 4677 if (RT_SUCCESS(rc)) 4678 { 4679 /* 4680 * Initialize and map the RAM range (if required). 4681 */ 4682 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idRomRange]; 4683 AssertPtr(pRomRange); 4684 uint32_t const idxFirstRamPage = pOverlappingRange ? (GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT : 0; 4685 PPGMROMPAGE pRomPage = &pRomRange->aPages[0]; 4686 if (!pOverlappingRange) 4687 { 4688 /* Initialize the new RAM range and insert it into the lookup table. */ 4689 pRamRange->pszDesc = pszDesc; 4462 4690 #ifdef VBOX_WITH_NATIVE_NEM 4463 pRam New->uNemRange= uNemRange;4464 #endif 4465 4466 PPGMPAGE pRamPage = &pRam New->aPages[idxFirstRamPage];4691 pRamRange->uNemRange = uNemRange; 4692 #endif 4693 4694 PPGMPAGE pRamPage = &pRamRange->aPages[idxFirstRamPage]; 4467 4695 #ifdef VBOX_WITH_PGM_NEM_MODE 4468 if ( pVM->pgm.s.fNemMode)4696 if (PGM_IS_IN_NEM_MODE(pVM)) 4469 4697 { 4470 4698 AssertPtr(pvRam); Assert(pReq == NULL); 4471 pRam New->pvR3 =pvRam;4699 pRamRange->pbR3 = (uint8_t *)pvRam; 4472 4700 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++) 4473 4701 { … … 4479 4707 else 4480 4708 #endif 4709 { 4710 Assert(!pRamRange->pbR3); Assert(!pvRam); 4481 4711 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++) 4482 4712 { … … 4489 4719 pRomPage->Virgin = *pRamPage; 4490 4720 } 4721 } 4491 4722 4492 4723 pVM->pgm.s.cAllPages += cGuestPages; 4493 4724 pVM->pgm.s.cPrivatePages += cGuestPages; 4494 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev); 4725 4726 rc = pgmR3PhysRamRangeInsertLookup(pVM, pRamRange, GCPhys, &idxInsert); 4495 4727 } 4496 4728 else 4497 4729 { 4498 /* Existing RAM range. */4499 PPGMPAGE pRamPage = &p Ram->aPages[idxFirstRamPage];4730 /* Insert the ROM into an existing RAM range. */ 4731 PPGMPAGE pRamPage = &pOverlappingRange->aPages[idxFirstRamPage]; 4500 4732 #ifdef VBOX_WITH_PGM_NEM_MODE 4501 if ( pVM->pgm.s.fNemMode)4733 if (PGM_IS_IN_NEM_MODE(pVM)) 4502 4734 { 4503 4735 Assert(pvRam == NULL); Assert(pReq == NULL); … … 4534 4766 pVM->pgm.s.cPrivatePages += cGuestPages; 4535 4767 } 4536 pRamNew = pRam; 4537 } 4538 4768 pRamRange = pOverlappingRange; 4769 } 4770 4771 if (RT_SUCCESS(rc)) 4772 { 4539 4773 #ifdef VBOX_WITH_NATIVE_NEM 4540 /* Set the NEM state of the pages if needed. */ 4541 if (u2NemState != UINT8_MAX) 4542 pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState); 4543 #endif 4544 4545 /* Flush physical page map TLB. */ 4546 pgmPhysInvalidatePageMapTLB(pVM); 4547 4548 /* 4549 * Register the ROM access handler. 4550 */ 4551 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType, GCPhys, pszDesc); 4552 if (RT_SUCCESS(rc)) 4553 { 4774 /* Set the NEM state of the pages if needed. */ 4775 if (u2NemState != UINT8_MAX) 4776 pgmPhysSetNemStateForPages(&pRamRange->aPages[idxFirstRamPage], cGuestPages, u2NemState); 4777 #endif 4778 4779 /* Flush physical page map TLB. */ 4780 pgmPhysInvalidatePageMapTLB(pVM); 4781 4554 4782 /* 4555 * Copy the image over to the virgin pages. 4556 * This must be done after linking in the RAM range. 4783 * Register the ROM access handler. 4557 4784 */ 4558 size_t cbBinaryLeft = cbBinary; 4559 PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage]; 4560 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++) 4561 { 4562 void *pvDstPage; 4563 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << GUEST_PAGE_SHIFT), &pvDstPage); 4564 if (RT_FAILURE(rc)) 4785 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType, idRomRange, pszDesc); 4786 if (RT_SUCCESS(rc)) 4787 { 4788 /* 4789 * Copy the image over to the virgin pages. 4790 * This must be done after linking in the RAM range. 4791 */ 4792 size_t cbBinaryLeft = cbBinary; 4793 PPGMPAGE pRamPage = &pRamRange->aPages[idxFirstRamPage]; 4794 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++) 4565 4795 { 4566 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys); 4567 break; 4568 } 4569 if (cbBinaryLeft >= GUEST_PAGE_SIZE) 4570 { 4571 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), GUEST_PAGE_SIZE); 4572 cbBinaryLeft -= GUEST_PAGE_SIZE; 4573 } 4574 else 4575 { 4576 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE); /* (shouldn't be necessary, but can't hurt either) */ 4577 if (cbBinaryLeft > 0) 4796 void *pvDstPage; 4797 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << GUEST_PAGE_SHIFT), &pvDstPage); 4798 if (RT_FAILURE(rc)) 4578 4799 { 4579 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), cbBinaryLeft);4580 cbBinaryLeft = 0;4800 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys); 4801 break; 4581 4802 } 4582 } 4583 } 4584 if (RT_SUCCESS(rc)) 4585 { 4586 /* 4587 * Initialize the ROM range. 4588 * Note that the Virgin member of the pages has already been initialized above. 4589 */ 4590 pRomNew->pSelfR0 = pRomNewR0; 4591 pRomNew->GCPhys = GCPhys; 4592 pRomNew->GCPhysLast = GCPhysLast; 4593 pRomNew->cb = cb; 4594 pRomNew->fFlags = fFlags; 4595 pRomNew->idSavedState = UINT8_MAX; 4596 pRomNew->cbOriginal = cbBinary; 4597 pRomNew->pszDesc = pszDesc; 4598 #ifdef VBOX_WITH_PGM_NEM_MODE 4599 pRomNew->pbR3Alternate = (uint8_t *)pvAlt; 4600 #endif 4601 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY 4602 ? pvBinary : RTMemDup(pvBinary, cbBinary); 4603 if (pRomNew->pvOriginal) 4604 { 4605 for (unsigned iPage = 0; iPage < cGuestPages; iPage++) 4803 if (cbBinaryLeft >= GUEST_PAGE_SIZE) 4606 4804 { 4607 PPGMROMPAGE pPage = &pRomNew->aPages[iPage]; 4608 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE; 4609 #ifdef VBOX_WITH_PGM_NEM_MODE 4610 if (pVM->pgm.s.fNemMode) 4611 PGM_PAGE_INIT(&pPage->Shadow, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID, 4612 PGMPAGETYPE_ROM_SHADOW, PGM_PAGE_STATE_ALLOCATED); 4613 else 4614 #endif 4615 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW); 4616 } 4617 4618 /* update the page count stats for the shadow pages. */ 4619 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4620 { 4621 #ifdef VBOX_WITH_PGM_NEM_MODE 4622 if (pVM->pgm.s.fNemMode) 4623 pVM->pgm.s.cPrivatePages += cGuestPages; 4624 else 4625 #endif 4626 pVM->pgm.s.cZeroPages += cGuestPages; 4627 pVM->pgm.s.cAllPages += cGuestPages; 4628 } 4629 4630 /* 4631 * Insert the ROM range, tell REM and return successfully. 4632 */ 4633 pRomNew->pNextR3 = pRom; 4634 pRomNew->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR; 4635 4636 if (pRomPrev) 4637 { 4638 pRomPrev->pNextR3 = pRomNew; 4639 pRomPrev->pNextR0 = pRomNew->pSelfR0; 4805 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), GUEST_PAGE_SIZE); 4806 cbBinaryLeft -= GUEST_PAGE_SIZE; 4640 4807 } 4641 4808 else 4642 4809 { 4643 pVM->pgm.s.pRomRangesR3 = pRomNew; 4644 pVM->pgm.s.pRomRangesR0 = pRomNew->pSelfR0; 4810 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE); /* (shouldn't be necessary, but can't hurt either) */ 4811 if (cbBinaryLeft > 0) 4812 { 4813 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), cbBinaryLeft); 4814 cbBinaryLeft = 0; 4815 } 4645 4816 } 4646 4647 pgmPhysInvalidatePageMapTLB(pVM); 4817 } 4818 if (RT_SUCCESS(rc)) 4819 { 4820 /* 4821 * Initialize the ROM range. 4822 * Note that the Virgin member of the pages has already been initialized above. 4823 */ 4824 Assert(pRomRange->cb == cb); 4825 Assert(pRomRange->fFlags == fFlags); 4826 Assert(pRomRange->idSavedState == UINT8_MAX); 4827 pRomRange->GCPhys = GCPhys; 4828 pRomRange->GCPhysLast = GCPhysLast; 4829 pRomRange->cbOriginal = cbBinary; 4830 pRomRange->pszDesc = pszDesc; 4648 4831 #ifdef VBOX_WITH_PGM_NEM_MODE 4649 if (!pVM->pgm.s.fNemMode) 4650 #endif 4651 GMMR3AllocatePagesCleanup(pReq); 4832 pRomRange->pbR3Alternate = (uint8_t *)pvAlt; 4833 #endif 4834 pRomRange->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY 4835 ? pvBinary : RTMemDup(pvBinary, cbBinary); 4836 if (pRomRange->pvOriginal) 4837 { 4838 for (unsigned iPage = 0; iPage < cGuestPages; iPage++) 4839 { 4840 PPGMROMPAGE const pPage = &pRomRange->aPages[iPage]; 4841 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE; 4842 #ifdef VBOX_WITH_PGM_NEM_MODE 4843 if (PGM_IS_IN_NEM_MODE(pVM)) 4844 PGM_PAGE_INIT(&pPage->Shadow, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID, 4845 PGMPAGETYPE_ROM_SHADOW, PGM_PAGE_STATE_ALLOCATED); 4846 else 4847 #endif 4848 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW); 4849 } 4850 4851 /* update the page count stats for the shadow pages. */ 4852 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4853 { 4854 if (PGM_IS_IN_NEM_MODE(pVM)) 4855 pVM->pgm.s.cPrivatePages += cGuestPages; 4856 else 4857 pVM->pgm.s.cZeroPages += cGuestPages; 4858 pVM->pgm.s.cAllPages += cGuestPages; 4859 } 4652 4860 4653 4861 #ifdef VBOX_WITH_NATIVE_NEM 4654 /* 4655 * Notify NEM again. 4656 */ 4657 if (VM_IS_NEM_ENABLED(pVM)) 4658 { 4659 u2NemState = UINT8_MAX; 4660 rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamNew, GCPhys), 4661 fNemNotify, &u2NemState, 4662 fRamExists ? &pRam->uNemRange : &pRamNew->uNemRange); 4663 if (u2NemState != UINT8_MAX) 4664 pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState); 4862 /* 4863 * Notify NEM again. 4864 */ 4865 if (VM_IS_NEM_ENABLED(pVM)) 4866 { 4867 u2NemState = UINT8_MAX; 4868 rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamRange, GCPhys), 4869 fNemNotify, &u2NemState, &pRamRange->uNemRange); 4870 if (u2NemState != UINT8_MAX) 4871 pgmPhysSetNemStateForPages(&pRamRange->aPages[idxFirstRamPage], cGuestPages, u2NemState); 4872 } 4873 else 4874 #endif 4875 GMMR3AllocatePagesCleanup(pReq); 4665 4876 if (RT_SUCCESS(rc)) 4877 { 4878 /* 4879 * Done! 4880 */ 4881 #ifdef VBOX_STRICT 4882 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/); 4883 #endif 4666 4884 return rc; 4885 } 4886 4887 /* 4888 * bail out 4889 */ 4890 #ifdef VBOX_WITH_NATIVE_NEM 4891 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4892 { 4893 Assert(VM_IS_NEM_ENABLED(pVM)); 4894 pVM->pgm.s.cPrivatePages -= cGuestPages; 4895 pVM->pgm.s.cAllPages -= cGuestPages; 4896 } 4897 #endif 4667 4898 } 4668 4899 else 4669 #endif 4670 return rc; 4671 4672 /* 4673 * bail out 4674 */ 4675 #ifdef VBOX_WITH_NATIVE_NEM 4676 /* unlink */ 4677 if (pRomPrev) 4678 { 4679 pRomPrev->pNextR3 = pRom; 4680 pRomPrev->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR; 4681 } 4682 else 4683 { 4684 pVM->pgm.s.pRomRangesR3 = pRom; 4685 pVM->pgm.s.pRomRangesR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR; 4686 } 4687 4688 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 4689 { 4690 # ifdef VBOX_WITH_PGM_NEM_MODE 4691 if (pVM->pgm.s.fNemMode) 4692 pVM->pgm.s.cPrivatePages -= cGuestPages; 4693 else 4694 # endif 4695 pVM->pgm.s.cZeroPages -= cGuestPages; 4696 pVM->pgm.s.cAllPages -= cGuestPages; 4697 } 4698 #endif 4900 rc = VERR_NO_MEMORY; 4699 4901 } 4700 else 4701 rc = VERR_NO_MEMORY; 4902 4903 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys); 4904 AssertRC(rc2); 4702 4905 } 4703 4906 4704 i nt rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);4705 AssertRC(rc2);4706 }4707 4708 if (!fRamExists)4709 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev); 4710 else4711 { 4712 PPGMPAGE pRamPage = &p Ram->aPages[idxFirstRamPage];4907 idxInsert -= 1; 4908 if (!pOverlappingRange) 4909 pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxInsert); 4910 } 4911 /* else: lookup insertion failed. */ 4912 4913 if (pOverlappingRange) 4914 { 4915 PPGMPAGE pRamPage = &pOverlappingRange->aPages[idxFirstRamPage]; 4713 4916 #ifdef VBOX_WITH_PGM_NEM_MODE 4714 if ( pVM->pgm.s.fNemMode)4917 if (PGM_IS_IN_NEM_MODE(pVM)) 4715 4918 { 4716 4919 Assert(pvRam == NULL); Assert(pReq == NULL); … … 4733 4936 } 4734 4937 } 4735 4736 SUPR3PageFreeEx(pRomNew, cRangePages); 4737 } 4738 4739 /** @todo Purge the mapping cache or something... */ 4938 } 4939 pgmPhysInvalidatePageMapTLB(pVM); 4940 pgmPhysInvalidRamRangeTlbs(pVM); 4941 4740 4942 #ifdef VBOX_WITH_PGM_NEM_MODE 4741 if ( pVM->pgm.s.fNemMode)4943 if (PGM_IS_IN_NEM_MODE(pVM)) 4742 4944 { 4743 4945 Assert(!pReq); … … 4753 4955 GMMR3AllocatePagesCleanup(pReq); 4754 4956 } 4957 4958 /* We don't bother to actually free either the ROM nor the RAM ranges 4959 themselves, as already mentioned above, we'll leave that to the VM 4960 termination cleanup code. */ 4755 4961 return rc; 4756 4962 } … … 4764 4970 * where we can select where the reads go and where the writes go. On real 4765 4971 * hardware the chipset provides means to configure this. We provide 4766 * PGMR3Phys ProtectROM() for this purpose.4972 * PGMR3PhysRomProtect() for this purpose. 4767 4973 * 4768 4974 * A read-only copy of the ROM image will always be kept around while we … … 4793 4999 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc)); 4794 5000 PGM_LOCK_VOID(pVM); 5001 4795 5002 int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc); 5003 4796 5004 PGM_UNLOCK(pVM); 4797 5005 return rc; … … 4812 5020 { 4813 5021 PGM_LOCK_ASSERT_OWNER(pVM); 4814 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 4815 { 4816 const uint32_t cGuestPages = pRom->cb >> GUEST_PAGE_SHIFT; 5022 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 5023 for (uint32_t idx = 0; idx < cRomRanges; idx++) 5024 { 5025 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 5026 uint32_t const cGuestPages = pRom->cb >> GUEST_PAGE_SHIFT; 4817 5027 4818 5028 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) … … 4830 5040 */ 4831 5041 #ifdef VBOX_WITH_PGM_NEM_MODE 4832 if ( pVM->pgm.s.fNemMode)5042 if (PGM_IS_IN_NEM_MODE(pVM)) 4833 5043 { 4834 5044 /* Clear all the shadow pages (currently using alternate backing). */ … … 4871 5081 continue; 4872 5082 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow)); 4873 void *pvDstPage;4874 const RTGCPHYSGCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);5083 void *pvDstPage; 5084 RTGCPHYS const GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT); 4875 5085 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage); 4876 5086 if (RT_FAILURE(rc)) … … 4895 5105 for (uint32_t iPage = 0; iPage < cGuestPages && cbSrcLeft > 0; iPage++, pbSrcPage += GUEST_PAGE_SIZE) 4896 5106 { 4897 const RTGCPHYSGCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);5107 RTGCPHYS const GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT); 4898 5108 PPGMPAGE const pPage = pgmPhysGetPage(pVM, GCPhys); 4899 5109 void const *pvDstPage = NULL; … … 4942 5152 * Free the heap copy of the original bits. 4943 5153 */ 4944 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 4945 { 5154 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 5155 for (uint32_t idx = 0; idx < cRomRanges; idx++) 5156 { 5157 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 4946 5158 if ( pRom->pvOriginal 4947 5159 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)) … … 4990 5202 int rc = VINF_SUCCESS; 4991 5203 bool fFlushTLB = false; 4992 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 4993 { 5204 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 5205 for (uint32_t idx = 0; idx < cRomRanges; idx++) 5206 { 5207 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 4994 5208 if ( GCPhys <= pRom->GCPhysLast 4995 5209 && GCPhysLast >= pRom->GCPhys … … 5036 5250 # ifdef VBOX_WITH_PGM_NEM_MODE 5037 5251 /* In simplified mode we have to switch the page data around too. */ 5038 if ( pVM->pgm.s.fNemMode)5252 if (PGM_IS_IN_NEM_MODE(pVM)) 5039 5253 { 5040 5254 uint8_t abPage[GUEST_PAGE_SIZE]; … … 5291 5505 5292 5506 5507 5293 5508 /********************************************************************************************************************************* 5294 5509 * Write Monitoring * … … 5317 5532 #endif 5318 5533 5319 /** @todo pointless to write protect the physical page pointed to by RSP. */ 5320 5321 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 5322 pRam; 5323 pRam = pRam->CTX_SUFF(pNext)) 5324 { 5534 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 5535 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 5536 { 5537 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 5538 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 5539 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 5540 AssertContinue(pRam); 5541 5325 5542 uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT; 5326 5543 for (uint32_t iPage = 0; iPage < cPages; iPage++) 5327 5544 { 5328 PPGMPAGE pPage= &pRam->aPages[iPage];5329 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);5545 PPGMPAGE const pPage = &pRam->aPages[iPage]; 5546 PGMPAGETYPE const enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage); 5330 5547 5331 5548 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM) … … 5927 6144 if (idPage != NIL_GMM_PAGEID) 5928 6145 { 5929 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 5930 pRam; 5931 pRam = pRam->pNextR3) 6146 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 6147 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 5932 6148 { 6149 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 6150 Assert(pRam || idRamRange == 0); 6151 if (!pRam) continue; 6152 Assert(pRam->idRange == idRamRange); 6153 5933 6154 uint32_t const cPages = pRam->cb >> GUEST_PAGE_SHIFT; 5934 6155 for (uint32_t iPage = 0; iPage < cPages; iPage++) -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r103015 r104840 715 715 * Clear all the GCPhys links and rebuild the phys ext free list. 716 716 */ 717 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX);718 pRam;719 pRam = pRam->CTX_SUFF(pNext))717 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 718 Assert(pVM->pgm.s.apRamRanges[0] == NULL); 719 for (uint32_t idx = 1; idx <= idRamRangeMax; idx++) 720 720 { 721 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idx]; 721 722 iPage = pRam->cb >> GUEST_PAGE_SHIFT; 722 723 while (iPage-- > 0) -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r104557 r104840 224 224 static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */ 225 225 { 226 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);227 pRomRange;228 pRomRange = pRomRange->CTX_SUFF(pNext))229 {230 RTGCPHYS off= GCPhys - pRomRange->GCPhys;226 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 227 for (uint32_t idx = 0; idx < cRomRanges; idx++) 228 { 229 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idx]; 230 RTGCPHYS const off = GCPhys - pRomRange->GCPhys; 231 231 if (GCPhys - pRomRange->GCPhys < pRomRange->cb) 232 232 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT]; … … 248 248 */ 249 249 PGM_LOCK_VOID(pVM); 250 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 251 { 252 PPGMRAMRANGE pRamHint = NULL;; 253 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT; 250 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 251 for (uint32_t idx = 0; idx < cRomRanges; idx++) 252 { 253 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 254 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT; 255 PPGMRAMRANGE pRamHint = NULL; 254 256 255 257 for (uint32_t iPage = 0; iPage < cPages; iPage++) … … 297 299 { 298 300 PGM_LOCK_VOID(pVM); 299 uint8_t id = 1; 300 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++) 301 { 302 pRom->idSavedState = id; 303 SSMR3PutU8(pSSM, id); 301 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 302 for (uint32_t idx = 0; idx < cRomRanges; idx++) 303 { 304 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 305 uint8_t const idSavedState = (uint8_t)(idx + 1); 306 pRom->idSavedState = idSavedState; 307 SSMR3PutU8(pSSM, idSavedState); 304 308 SSMR3PutStrZ(pSSM, ""); /* device name */ 305 309 SSMR3PutU32(pSSM, 0); /* device instance */ … … 328 332 PGM_LOCK_ASSERT_OWNER(pVM); 329 333 330 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 331 pRom->idSavedState = UINT8_MAX; 334 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 335 for (uint32_t idx = 0; idx < cRomRanges; idx++) 336 pVM->pgm.s.apRomRanges[idx]->idSavedState = UINT8_MAX; 332 337 333 338 for (;;) … … 342 347 if (id == UINT8_MAX) 343 348 { 344 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 349 /* 350 * End of ROM ranges. Check that all are accounted for. 351 */ 352 for (uint32_t idx = 0; idx < cRomRanges; idx++) 353 { 354 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 345 355 if (pRom->idSavedState != UINT8_MAX) 346 356 { /* likely */ } … … 352 362 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n", 353 363 pRom->pszDesc)); 364 } 354 365 return VINF_SUCCESS; /* the end */ 355 366 } … … 384 395 && iRegion == 0 385 396 && szDevName[0] == '\0', 386 ("GCPhys=%RGp %s\n", GCPhys, szDesc),397 ("GCPhys=%RGp LB %RGp %s\n", GCPhys, cb, szDesc), 387 398 VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 388 PPGMROMRANGE pRom; 389 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 390 { 391 if ( pRom->idSavedState == UINT8_MAX 392 && !strcmp(pRom->pszDesc, szDesc)) 399 uint32_t idx; 400 for (idx = 0; idx < cRomRanges; idx++) 401 { 402 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 403 if ( pRom->idSavedState == UINT8_MAX 404 && !strcmp(pRom->pszDesc, szDesc)) 393 405 { 394 406 pRom->idSavedState = id; … … 396 408 } 397 409 } 398 if (!pRom) 399 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc); 410 if (idx >= cRomRanges) 411 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp LB %RGp by the name '%s' was not found"), 412 GCPhys, cb, szDesc); 400 413 } /* forever */ 401 414 } … … 413 426 */ 414 427 PGM_LOCK_VOID(pVM); 415 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 416 { 428 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 429 for (uint32_t idx = 0; idx < cRomRanges; idx++) 430 { 431 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 417 432 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 418 433 { … … 455 470 { 456 471 PGM_LOCK_VOID(pVM); 457 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 458 { 459 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT; 472 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 473 for (uint32_t idx = 0; idx < cRomRanges; idx++) 474 { 475 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 476 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT; 460 477 for (uint32_t iPage = 0; iPage < cPages; iPage++) 461 478 { … … 478 495 void const *pvPage; 479 496 #ifdef VBOX_WITH_PGM_NEM_MODE 480 if (!PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)497 if (!PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM)) 481 498 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT]; 482 499 else … … 541 558 */ 542 559 PGM_LOCK_VOID(pVM); 543 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 544 { 560 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 561 for (uint32_t idx = 0; idx < cRomRanges; idx++) 562 { 563 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx]; 545 564 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED) 546 565 { … … 569 588 void const *pvPage; 570 589 #ifdef VBOX_WITH_PGM_NEM_MODE 571 if (PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)590 if (PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM)) 572 591 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT]; 573 592 else … … 662 681 */ 663 682 PGM_LOCK_VOID(pVM); 664 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 665 { 666 uint32_t const cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT; 683 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 684 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 685 { 686 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 687 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 688 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT; 667 689 PGM_UNLOCK(pVM); 668 690 … … 682 704 683 705 PGM_LOCK_VOID(pVM); 684 pRegMmio ->paLSPages = paLSPages;706 pRegMmio2->paLSPages = paLSPages; 685 707 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages; 686 708 } … … 700 722 { 701 723 PGM_LOCK_VOID(pVM); 702 uint8_t id = 1; 703 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 704 { 705 pRegMmio->idSavedState = id; 706 SSMR3PutU8(pSSM, id); 707 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName); 708 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance); 709 SSMR3PutU8(pSSM, pRegMmio->iRegion); 710 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc); 711 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb); 724 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 725 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 726 { 727 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 728 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 729 uint8_t const idSavedState = (uint8_t)(idx + 1); 730 pRegMmio2->idSavedState = idSavedState; 731 SSMR3PutU8(pSSM, idSavedState); 732 SSMR3PutStrZ(pSSM, pRegMmio2->pDevInsR3->pReg->szName); 733 SSMR3PutU32(pSSM, pRegMmio2->pDevInsR3->iInstance); 734 SSMR3PutU8(pSSM, pRegMmio2->iRegion); 735 SSMR3PutStrZ(pSSM, pRamRange->pszDesc); 736 int rc = SSMR3PutGCPhys(pSSM, pRamRange->cb); 712 737 if (RT_FAILURE(rc)) 713 738 break; 714 id++;715 739 } 716 740 PGM_UNLOCK(pVM); … … 731 755 PGM_LOCK_ASSERT_OWNER(pVM); 732 756 733 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 734 pRegMmio->idSavedState = UINT8_MAX; 757 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 758 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 759 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX; 735 760 736 761 for (;;) … … 745 770 if (id == UINT8_MAX) 746 771 { 747 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 748 AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX, ("%s\n", pRegMmio->RamRange.pszDesc)); 772 /* 773 * End of MMIO2 ranges. Check that all are accounted for. 774 */ 775 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 776 AssertLogRelMsg(pVM->pgm.s.aMmio2Ranges[idx].idSavedState != UINT8_MAX, 777 ("%s\n", pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc)); 749 778 return VINF_SUCCESS; /* the end */ 750 779 } … … 772 801 * Locate a matching MMIO2 range. 773 802 */ 774 PPGMREGMMIO2RANGE pRegMmio; 775 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 776 { 777 if ( pRegMmio->idSavedState == UINT8_MAX 778 && pRegMmio->iRegion == iRegion 779 && pRegMmio->pDevInsR3->iInstance == uInstance 780 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName)) 803 uint32_t idx; 804 for (idx = 0; idx < cMmio2Ranges; idx++) 805 { 806 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 807 if ( pRegMmio2->idSavedState == UINT8_MAX 808 && pRegMmio2->iRegion == iRegion 809 && pRegMmio2->pDevInsR3->iInstance == uInstance 810 && !strcmp(pRegMmio2->pDevInsR3->pReg->szName, szDevName)) 781 811 { 782 pRegMmio ->idSavedState = id;812 pRegMmio2->idSavedState = id; 783 813 break; 784 814 } 785 815 } 786 if ( !pRegMmio)816 if (idx >= cMmio2Ranges) 787 817 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"), 788 818 szDesc, szDevName, uInstance, iRegion); … … 792 822 * the same. 793 823 */ 794 if (cb != pRegMmio->RamRange.cb)795 {796 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",797 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));798 if (cb > pR egMmio->RamRange.cb) /* bad idea? */824 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 825 if (cb != pRamRange->cb) 826 { 827 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n", pRamRange->pszDesc, cb, pRamRange->cb)); 828 if (cb > pRamRange->cb) /* bad idea? */ 799 829 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"), 800 pR egMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);830 pRamRange->pszDesc, cb, pRamRange->cb); 801 831 } 802 832 } /* forever */ … … 896 926 897 927 PGM_LOCK_VOID(pVM); /* paranoia */ 898 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 899 { 900 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 901 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT; 928 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 929 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 930 { 931 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 932 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio2->paLSPages; 933 uint32_t cPages = pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT; 902 934 PGM_UNLOCK(pVM); 903 935 904 936 for (uint32_t iPage = 0; iPage < cPages; iPage++) 905 937 { 906 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * GUEST_PAGE_SIZE;938 uint8_t const *pbPage = &pRegMmio2->pbR3[iPage * GUEST_PAGE_SIZE]; 907 939 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]); 908 940 } … … 936 968 */ 937 969 PGM_LOCK_VOID(pVM); 938 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; 939 pRegMmio && RT_SUCCESS(rc); 940 pRegMmio = pRegMmio->pNextR3) 941 { 942 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 943 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3; 944 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT; 945 uint32_t iPageLast = cPages; 970 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 971 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++) 972 { 973 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 974 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 975 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages; 976 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT; 977 uint8_t const *pbPage = pRamRange->pbR3; 978 uint32_t iPageLast = cPages; 946 979 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE) 947 980 { … … 972 1005 { 973 1006 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 974 SSMR3PutU8(pSSM, pRegMmio ->idSavedState);1007 SSMR3PutU8(pSSM, pRegMmio2->idSavedState); 975 1008 rc = SSMR3PutU32(pSSM, iPage); 976 1009 } … … 993 1026 { 994 1027 PGM_LOCK_VOID(pVM); 995 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; 996 pRegMmio && RT_SUCCESS(rc); 997 pRegMmio = pRegMmio->pNextR3) 998 { 999 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages; 1000 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3; 1001 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT; 1002 uint32_t iPageLast = cPages; 1028 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1029 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++) 1030 { 1031 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 1032 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 1033 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages; 1034 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT; 1035 uint8_t const *pbPage = pRamRange->pbR3; 1036 uint32_t iPageLast = cPages; 1003 1037 PGM_UNLOCK(pVM); 1004 1038 … … 1028 1062 { 1029 1063 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR); 1030 SSMR3PutU8(pSSM, pRegMmio ->idSavedState);1064 SSMR3PutU8(pSSM, pRegMmio2->idSavedState); 1031 1065 rc = SSMR3PutU32(pSSM, iPage); 1032 1066 } … … 1067 1101 */ 1068 1102 PGM_LOCK_VOID(pVM); 1069 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 1070 { 1071 void *pvMmio2ToFree = pRegMmio->paLSPages; 1103 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 1104 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 1105 { 1106 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 1107 void *pvMmio2ToFree = pRegMmio2->paLSPages; 1072 1108 if (pvMmio2ToFree) 1073 1109 { 1074 pRegMmio ->paLSPages = NULL;1110 pRegMmio2->paLSPages = NULL; 1075 1111 PGM_UNLOCK(pVM); 1076 1112 MMR3HeapFree(pvMmio2ToFree); … … 1101 1137 * for cleaning up. 1102 1138 */ 1103 PPGMRAMRANGE pCur;1104 1139 PGM_LOCK_VOID(pVM); 1140 uint32_t idRamRange; 1105 1141 do 1106 1142 { 1107 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1108 { 1143 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 1144 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 1145 { 1146 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange]; 1147 Assert(pCur || idRamRange == 0); 1148 if (!pCur) continue; 1149 Assert(pCur->idRange == idRamRange); 1150 1109 1151 if ( !pCur->paLSPages 1110 1152 && !PGM_RAM_RANGE_IS_AD_HOC(pCur)) 1111 1153 { 1112 uint32_t const idRamRangesGen = pVM->pgm.s. idRamRangesGen;1154 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration; 1113 1155 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT; 1114 1156 PGM_UNLOCK(pVM); … … 1117 1159 return VERR_NO_MEMORY; 1118 1160 PGM_LOCK_VOID(pVM); 1119 if (pVM->pgm.s. idRamRangesGen != idRamRangesGen)1161 if (pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen) 1120 1162 { 1121 1163 PGM_UNLOCK(pVM); … … 1215 1257 } 1216 1258 } 1217 } while ( pCur);1259 } while (idRamRange <= RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U)); 1218 1260 PGM_UNLOCK(pVM); 1219 1261 … … 1366 1408 */ 1367 1409 RTGCPHYS GCPhysCur = 0; 1368 PPGMRAMRANGE pCur; 1410 uint32_t idxLookup; 1411 uint32_t cLookupEntries; 1369 1412 PGM_LOCK_VOID(pVM); 1370 1413 do 1371 1414 { 1372 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 1373 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1374 { 1415 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT const RamRangeUnion = pVM->pgm.s.RamRangeUnion; 1416 Assert(pVM->pgm.s.RamRangeUnion.cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 1417 cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries; 1418 for (idxLookup = 0; idxLookup < cLookupEntries; idxLookup++) 1419 { 1420 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1421 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 1422 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange]; 1423 AssertContinue(pCur); 1424 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup])); 1425 1375 1426 if ( pCur->GCPhysLast > GCPhysCur 1376 1427 && !PGM_RAM_RANGE_IS_AD_HOC(pCur)) … … 1388 1439 #endif 1389 1440 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX) 1390 && pVM->pgm.s. idRamRangesGen != idRamRangesGen)1441 && pVM->pgm.s.RamRangeUnion.u64Combined != RamRangeUnion.u64Combined) 1391 1442 { 1392 1443 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT); … … 1556 1607 } 1557 1608 } /* for each range */ 1558 } while (pCur); 1609 1610 /* We must use the starting lookup count here to determine whether we've 1611 been thru all or not, since using the current count could lead us to 1612 skip the final range if one was umapped while we yielded the lock. */ 1613 } while (idxLookup < cLookupEntries); 1559 1614 PGM_UNLOCK(pVM); 1560 1615 } … … 1579 1634 RTGCPHYS GCPhysLast = NIL_RTGCPHYS; 1580 1635 RTGCPHYS GCPhysCur = 0; 1581 PPGMRAMRANGE pCur; 1636 uint32_t idxLookup; 1637 uint32_t cRamRangeLookupEntries; 1582 1638 1583 1639 PGM_LOCK_VOID(pVM); 1584 1640 do 1585 1641 { 1586 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 1587 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1588 { 1642 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration; 1643 cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 1644 for (idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++) 1645 { 1646 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 1647 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 1648 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange]; 1649 AssertContinue(pCur); 1650 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup])); 1651 1589 1652 if ( pCur->GCPhysLast > GCPhysCur 1590 1653 && !PGM_RAM_RANGE_IS_AD_HOC(pCur)) … … 1600 1663 && (iPage & 0x7ff) == 0x100 1601 1664 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX) 1602 && pVM->pgm.s. idRamRangesGen != idRamRangesGen)1665 && pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen) 1603 1666 { 1604 1667 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT); … … 1737 1800 pVM->pgm.s.LiveSave.cSavedPages++; 1738 1801 } 1739 if (idRamRangesGen != pVM->pgm.s. idRamRangesGen)1802 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration) 1740 1803 { 1741 1804 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK; … … 1750 1813 } 1751 1814 } /* for each range */ 1752 } while (pCur); 1815 1816 /* We must use the starting lookup count here to determine whether we've 1817 been thru all or not, since using the current count could lead us to 1818 skip the final range if one was umapped while we yielded the lock. */ 1819 } while (idxLookup < cRamRangeLookupEntries); 1753 1820 1754 1821 PGM_UNLOCK(pVM); … … 1773 1840 * write monitored pages. 1774 1841 */ 1775 void *pvToFree = NULL; 1776 PPGMRAMRANGE pCur; 1842 void *pvToFree = NULL; 1777 1843 uint32_t cMonitoredPages = 0; 1844 uint32_t idRamRangeMax; 1845 uint32_t idRamRange; 1778 1846 PGM_LOCK_VOID(pVM); 1779 1847 do 1780 1848 { 1781 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1782 { 1849 idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 1850 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 1851 { 1852 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange]; 1853 Assert(pCur || idRamRange == 0); 1854 if (!pCur) continue; 1855 Assert(pCur->idRange == idRamRange); 1856 1783 1857 if (pCur->paLSPages) 1784 1858 { 1785 1859 if (pvToFree) 1786 1860 { 1787 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;1861 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration; 1788 1862 PGM_UNLOCK(pVM); 1789 1863 MMR3HeapFree(pvToFree); 1790 1864 pvToFree = NULL; 1791 1865 PGM_LOCK_VOID(pVM); 1792 if (idRamRangesGen != pVM->pgm.s. idRamRangesGen)1866 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration) 1793 1867 break; /* start over again. */ 1794 1868 } … … 1810 1884 } 1811 1885 } 1812 } while ( pCur);1886 } while (idRamRange <= idRamRangeMax); 1813 1887 1814 1888 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages); … … 2346 2420 } 2347 2421 2422 2348 2423 /** 2349 2424 * Ram range flags and bits for older versions of the saved state. … … 2357 2432 static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 2358 2433 { 2359 PPGM pPGM = &pVM->pgm.s;2360 2361 2434 /* 2362 2435 * Ram range flags and bits. 2363 2436 */ 2364 uint32_t i = 0; 2365 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++) 2366 { 2437 uint32_t iSeqNo = 0; 2438 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, 2439 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup)); 2440 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++) 2441 { 2442 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]); 2443 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges)); 2444 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 2445 AssertContinue(pRam); 2446 2367 2447 /* Check the sequence number / separator. */ 2368 2448 uint32_t u32Sep; … … 2372 2452 if (u32Sep == ~0U) 2373 2453 break; 2374 if (u32Sep != i )2454 if (u32Sep != iSeqNo) 2375 2455 { 2376 2456 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep)); … … 2432 2512 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n" 2433 2513 "State : %RGp-%RGp %RGp bytes %s %s\n", 2434 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->p vR3 ? "bits" : "nobits", pRam->pszDesc,2514 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc, 2435 2515 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc)); 2436 2516 /* … … 2443 2523 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"), 2444 2524 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc, 2445 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->p vR3 ? "bits" : "nobits", pRam->pszDesc);2525 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc); 2446 2526 2447 2527 AssertMsgFailed(("debug skipping not implemented, sorry\n")); 2528 iSeqNo++; 2448 2529 continue; 2449 2530 } … … 2525 2606 } 2526 2607 } 2527 else if (pRam->p vR3)2608 else if (pRam->pbR3) 2528 2609 { 2529 2610 /* … … 2533 2614 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc), 2534 2615 VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 2535 AssertLogRelMsgReturn(pRam->p vR3,2616 AssertLogRelMsgReturn(pRam->pbR3, 2536 2617 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), 2537 2618 VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 2538 2619 2539 rc = SSMR3GetMem(pSSM, pRam->p vR3, pRam->cb);2620 rc = SSMR3GetMem(pSSM, pRam->pbR3, pRam->cb); 2540 2621 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc); 2541 2622 } … … 2584 2665 } 2585 2666 } 2667 2668 iSeqNo++; 2586 2669 } 2587 2670 … … 2610 2693 * Process page records until we hit the terminator. 2611 2694 */ 2612 RTGCPHYS GCPhys = NIL_RTGCPHYS; 2613 PPGMRAMRANGE pRamHint = NULL; 2614 uint8_t id = UINT8_MAX; 2615 uint32_t iPage = UINT32_MAX - 10; 2616 PPGMROMRANGE pRom = NULL; 2617 PPGMREGMMIO2RANGE pRegMmio = NULL; 2695 RTGCPHYS GCPhys = NIL_RTGCPHYS; 2696 PPGMRAMRANGE pRamHint = NULL; 2697 uint8_t id = UINT8_MAX; 2698 uint32_t iPage = UINT32_MAX - 10; 2699 PPGMROMRANGE pRom = NULL; 2700 PPGMREGMMIO2RANGE pRegMmio2 = NULL; 2701 PPGMRAMRANGE pMmio2RamRange = NULL; 2618 2702 2619 2703 /* … … 2675 2759 PPGMPAGE pPage; 2676 2760 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 2677 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc); 2761 if (RT_SUCCESS(rc)) 2762 { /* likely */ } 2763 else if ( rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS 2764 && GCPhys < _1M 2765 && GCPhys >= 640U*_1K 2766 && (u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_RAM_ZERO) 2767 { 2768 rc = VINF_SUCCESS; /* We've kicked out unused pages between 640K and 1MB, but older states may include them. */ 2769 id = UINT8_MAX; 2770 break; 2771 } 2772 else 2773 AssertLogRelMsgFailedReturn(("rc=%Rrc %RGp u8=%#x\n", rc, GCPhys, u8), rc); 2678 2774 2679 2775 /* … … 2705 2801 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM 2706 2802 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW 2707 #ifdef VBOX_WITH_PGM_NEM_MODE 2708 || pVM->pgm.s.fNemMode 2709 #endif 2803 || PGM_IS_IN_NEM_MODE(pVM) 2710 2804 || pVM->pgm.s.fRamPreAlloc) 2711 2805 { … … 2792 2886 return rc; 2793 2887 } 2794 if ( !pRegMmio 2795 || pRegMmio ->idSavedState != id)2888 if ( !pRegMmio2 2889 || pRegMmio2->idSavedState != id) 2796 2890 { 2797 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3) 2798 if (pRegMmio->idSavedState == id) 2891 pMmio2RamRange = NULL; 2892 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges)); 2893 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++) 2894 if (pVM->pgm.s.aMmio2Ranges[idx].idSavedState == id) 2895 { 2896 pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx]; 2897 pMmio2RamRange = pVM->pgm.s.apMmio2RamRanges[idx]; 2799 2898 break; 2800 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND); 2899 } 2900 AssertLogRelMsgReturn(pRegMmio2 && pMmio2RamRange, ("id=%#u iPage=%#x\n", id, iPage), 2901 VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND); 2801 2902 } 2802 AssertLogRelMsgReturn(iPage < (p RegMmio->RamRange.cb >> GUEST_PAGE_SHIFT),2803 ("iPage=%#x cb=%RGp %s\n", iPage, p RegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc),2903 AssertLogRelMsgReturn(iPage < (pMmio2RamRange->cb >> GUEST_PAGE_SHIFT), 2904 ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2RamRange->cb, pMmio2RamRange->pszDesc), 2804 2905 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND); 2805 void * pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << GUEST_PAGE_SHIFT);2906 void * const pvDstPage = &pMmio2RamRange->pbR3[(size_t)iPage << GUEST_PAGE_SHIFT]; 2806 2907 2807 2908 /* … … 2840 2941 return rc; 2841 2942 } 2842 if ( 2843 || 2943 if ( !pRom 2944 || pRom->idSavedState != id) 2844 2945 { 2845 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3) 2946 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges)); 2947 uint32_t idx; 2948 for (idx = 0; idx < cRomRanges; idx++) 2949 { 2950 pRom = pVM->pgm.s.apRomRanges[idx]; 2846 2951 if (pRom->idSavedState == id) 2847 2952 break; 2848 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND); 2953 } 2954 AssertLogRelMsgReturn(idx < cRomRanges, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND); 2849 2955 } 2850 2956 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), … … 2928 3034 case PGM_STATE_REC_ROM_SHW_RAW: 2929 3035 #ifdef VBOX_WITH_PGM_NEM_MODE 2930 if (fAltPage && pVM->pgm.s.fNemMode)3036 if (fAltPage && PGM_IS_IN_NEM_MODE(pVM)) 2931 3037 pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT]; 2932 3038 else … … 2999 3105 for (VMCPUID i = 0; i < pVM->cCpus; i++) 3000 3106 { 3001 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE) 3107 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_PAE) 3108 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]); 3109 else 3002 3110 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]); 3003 else3004 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);3005 3111 AssertLogRelRCReturn(rc, rc); 3006 3112 } -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r98103 r104840 356 356 PGM_LOCK_VOID(pVM); 357 357 358 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 359 { 358 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U); 359 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++) 360 { 361 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange]; 362 Assert(pRam || idRamRange == 0); 363 if (!pRam) continue; 364 Assert(pRam->idRange == idRamRange); 365 360 366 PPGMPAGE pPage = &pRam->aPages[0]; 361 367 RTGCPHYS GCPhys = pRam->GCPhys; … … 413 419 } 414 420 } 421 415 422 PGM_UNLOCK(pVM); 416 423 -
trunk/src/VBox/VMM/include/IOMInternal.h
r103152 r104840 275 275 bool volatile fMapped; 276 276 /** Set if there is an ring-0 entry too. */ 277 bool fRing0 ;277 bool fRing0 : 1; 278 278 /** Set if there is an raw-mode entry too. */ 279 bool fRawMode; 280 uint8_t bPadding; 279 bool fRawMode : 1; 280 bool fPadding : 6; 281 /** Pre-registered ad-hoc RAM range ID. */ 282 uint16_t idRamRange; 281 283 /** Same as the handle index. */ 282 284 uint16_t idxSelf; -
trunk/src/VBox/VMM/include/PGMInternal.h
r100998 r104840 114 114 # define PGM_SYNC_NR_PAGES 8 115 115 #endif 116 117 /** Maximum number of RAM ranges. 118 * @note This can be increased to 4096 (at least when targeting x86). */ 119 #define PGM_MAX_RAM_RANGES 3072 120 121 /** Maximum pages per RAM range. 122 * 123 * The PGMRAMRANGE structures for the high memory can get very big. There 124 * used to be some limitations on SUPR3PageAllocEx allocation sizes, so 125 * traditionally we limited this to 16MB chunks. These days we do ~64 MB 126 * chunks each covering 16GB of guest RAM, making sure each range is a 127 * multiple of 1GB to enable eager hosts to use 1GB pages for NEM mode. 128 * 129 * See also pgmPhysMmio2CalcChunkCount. 130 */ 131 #define PGM_MAX_PAGES_PER_RAM_RANGE _4M 132 #if defined(X86_PD_PAE_SHIFT) && defined(AssertCompile) 133 AssertCompile(RT_ALIGN_32(PGM_MAX_PAGES_PER_RAM_RANGE, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */ 134 #endif 135 136 /** The maximum number of MMIO2 ranges. */ 137 #define PGM_MAX_MMIO2_RANGES 32 138 /** The maximum number of pages in a MMIO2 PCI region. 139 * 140 * The memory for a MMIO2 PCI region is a single chunk of host virtual memory, 141 * but may be handled internally by PGM as a set of multiple MMIO2/RAM ranges, 142 * since PGM_MAX_PAGES_PER_RAM_RANGE is currently lower than this value (4 GiB 143 * vs 16 GiB). 144 */ 145 #define PGM_MAX_PAGES_PER_MMIO2_REGION _16M 146 147 /** Maximum number of ROM ranges. */ 148 #define PGM_MAX_ROM_RANGES 16 149 /** The maximum pages per ROM range. 150 * Currently 512K pages, or 2GB with 4K pages. */ 151 #define PGM_MAX_PAGES_PER_ROM_RANGE _512K 152 AssertCompile(PGM_MAX_PAGES_PER_ROM_RANGE <= PGM_MAX_PAGES_PER_RAM_RANGE); 116 153 117 154 /** … … 1289 1326 1290 1327 /** 1291 * RAM range for GC Phys to HC Phys conversion. 1292 * 1293 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys 1294 * conversions too, but we'll let MM handle that for now. 1295 * 1296 * This structure is used by linked lists in both GC and HC. 1328 * RAM range lookup table entry. 1329 */ 1330 typedef union PGMRAMRANGELOOKUPENTRY 1331 { 1332 RT_GCC_EXTENSION struct 1333 { 1334 /** Page aligned start address of the range, with page offset holding the ID. */ 1335 RTGCPHYS GCPhysFirstAndId; 1336 /** The last address in the range (inclusive). Page aligned (-1). */ 1337 RTGCPHYS GCPhysLast; 1338 }; 1339 /** Alternative 128-bit view for atomic updating. */ 1340 RTUINT128U volatile u128Volatile; 1341 /** Alternative 128-bit view for atomic updating. */ 1342 RTUINT128U u128Normal; 1343 } PGMRAMRANGELOOKUPENTRY; 1344 /** Pointer to a lookup table entry. */ 1345 typedef PGMRAMRANGELOOKUPENTRY *PPGMRAMRANGELOOKUPENTRY; 1346 1347 /** Extracts the ID from PGMRAMRANGELOOKUPENTRY::GCPhysFirstAndId. */ 1348 #define PGMRAMRANGELOOKUPENTRY_GET_ID(a_LookupEntry) ((uint32_t)((a_LookupEntry).GCPhysFirstAndId & GUEST_PAGE_OFFSET_MASK)) 1349 /** Extracts the GCPhysFirst from PGMRAMRANGELOOKUPENTRY::GCPhysFirstAndId. */ 1350 #define PGMRAMRANGELOOKUPENTRY_GET_FIRST(a_LookupEntry) (((a_LookupEntry).GCPhysFirstAndId) & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) 1351 1352 1353 /** 1354 * RAM range for GC Phys to HC Phys & R3 Ptr conversion. 1355 * 1356 * This structure is addressed via context specific pointer tables. Lookup is 1357 * organized via the lookup table (PGMRAMRANGELOOKUPENTRY). 1297 1358 */ 1298 1359 typedef struct PGMRAMRANGE … … 1300 1361 /** Start of the range. Page aligned. */ 1301 1362 RTGCPHYS GCPhys; 1302 /** Size of the range. (Page aligned of course). */ 1363 /** Size of the range. (Page aligned of course). 1364 * Ring-0 duplicates this in a PGMR0PERVM::acRamRangePages (shifted by 1365 * guest page size). */ 1303 1366 RTGCPHYS cb; 1304 /** Pointer to the next RAM range - for R3. */1305 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;1306 /** Pointer to the next RAM range - for R0. */1307 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;1308 1367 /** PGM_RAM_RANGE_FLAGS_* flags. */ 1309 1368 uint32_t fFlags; … … 1313 1372 RTGCPHYS GCPhysLast; 1314 1373 /** Start of the HC mapping of the range. This is only used for MMIO2 and in NEM mode. */ 1315 R3PTRTYPE(void *) pvR3; 1374 R3PTRTYPE(uint8_t *) pbR3; 1375 /** The RAM range identifier (index into the pointer table). */ 1376 uint32_t idRange; 1377 #if HC_ARCH_BITS != 32 1378 /** Padding to make aPage aligned on sizeof(PGMPAGE). */ 1379 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 0 : 1]; 1380 #endif 1316 1381 /** Live save per page tracking data. */ 1317 1382 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages; 1318 1383 /** The range description. */ 1319 1384 R3PTRTYPE(const char *) pszDesc; 1320 /** Pointer to self - R0 pointer. */ 1321 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0; 1322 1323 /** Pointer to the left search three node - ring-3 context. */ 1324 R3PTRTYPE(struct PGMRAMRANGE *) pLeftR3; 1325 /** Pointer to the right search three node - ring-3 context. */ 1326 R3PTRTYPE(struct PGMRAMRANGE *) pRightR3; 1327 /** Pointer to the left search three node - ring-0 context. */ 1328 R0PTRTYPE(struct PGMRAMRANGE *) pLeftR0; 1329 /** Pointer to the right search three node - ring-0 context. */ 1330 R0PTRTYPE(struct PGMRAMRANGE *) pRightR0; 1331 1332 /** Padding to make aPage aligned on sizeof(PGMPAGE). */ 1333 #if HC_ARCH_BITS == 32 1334 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 2 : 0]; 1335 #endif 1385 1336 1386 /** Array of physical guest page tracking structures. 1337 1387 * @note Number of entries is PGMRAMRANGE::cb / GUEST_PAGE_SIZE. */ 1338 1388 PGMPAGE aPages[1]; 1339 1389 } PGMRAMRANGE; 1390 AssertCompileMemberAlignment(PGMRAMRANGE, aPages, 16); 1340 1391 /** Pointer to RAM range for GC Phys to HC Phys conversion. */ 1341 1392 typedef PGMRAMRANGE *PPGMRAMRANGE; … … 1343 1394 /** @name PGMRAMRANGE::fFlags 1344 1395 * @{ */ 1345 /** The RAM range is floating around as an independent guest mapping. */1346 #define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)1347 1396 /** Ad hoc RAM range for an ROM mapping. */ 1348 1397 #define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21) … … 1351 1400 /** Ad hoc RAM range for an MMIO2 or pre-registered MMIO mapping. */ 1352 1401 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX RT_BIT(23) 1402 /** Valid RAM range flags. */ 1403 #define PGM_RAM_RANGE_FLAGS_VALID_MASK (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX) 1353 1404 /** @} */ 1354 1405 … … 1377 1428 */ 1378 1429 #define PGM_RAMRANGE_CALC_PAGE_R3PTR(a_pRam, a_GCPhysPage) \ 1379 ( (a_pRam)->p vR3 ? (R3PTRTYPE(uint8_t *))(a_pRam)->pvR3 + (a_GCPhysPage) - (a_pRam)->GCPhys : NULL )1430 ( (a_pRam)->pbR3 ? (a_pRam)->pbR3 + (a_GCPhysPage) - (a_pRam)->GCPhys : NULL ) 1380 1431 1381 1432 … … 1427 1478 typedef struct PGMROMRANGE 1428 1479 { 1429 /** Pointer to the next range - R3. */1430 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;1431 /** Pointer to the next range - R0. */1432 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;1433 /** Pointer to the this range - R0. */1434 R0PTRTYPE(struct PGMROMRANGE *) pSelfR0;1435 1480 /** Address of the range. */ 1436 1481 RTGCPHYS GCPhys; … … 1443 1488 /** The saved state range ID. */ 1444 1489 uint8_t idSavedState; 1445 /** Alignment padding. */ 1446 uint8_t au8Alignment[2]; 1490 /** The ID of the associated RAM range. */ 1491 #ifdef IN_RING0 1492 volatile 1493 #endif 1494 uint16_t idRamRange; 1447 1495 /** The size bits pvOriginal points to. */ 1448 1496 uint32_t cbOriginal; … … 1460 1508 R3PTRTYPE(uint8_t *) pbR3Alternate; 1461 1509 RTR3PTR pvAlignment2; 1510 #else 1511 RTR3PTR apvUnused[2]; 1462 1512 #endif 1463 1513 /** The per page tracking structures. */ … … 1524 1574 typedef struct PGMREGMMIO2RANGE 1525 1575 { 1526 /** The owner of the range . (a device)*/1576 /** The owner of the range (a device). */ 1527 1577 PPDMDEVINSR3 pDevInsR3; 1528 1578 /** Pointer to the ring-3 mapping of the allocation. */ 1529 RTR3PTR pvR3; 1530 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1531 /** Pointer to the ring-0 mapping of the allocation. */ 1532 RTR0PTR pvR0; 1533 #endif 1534 /** Pointer to the next range - R3. */ 1535 R3PTRTYPE(struct PGMREGMMIO2RANGE *) pNextR3; 1579 R3PTRTYPE(uint8_t *) pbR3; 1536 1580 /** Flags (PGMREGMMIO2RANGE_F_XXX). */ 1537 1581 uint16_t fFlags; … … 1544 1588 /** MMIO2 range identifier, for page IDs (PGMPAGE::s.idPage). */ 1545 1589 uint8_t idMmio2; 1546 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */ 1547 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1548 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 4 : 2]; 1549 #else 1550 uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 8 : 2 + 8]; 1551 #endif 1590 /** The ID of the associated RAM range. */ 1591 #ifdef IN_RING0 1592 volatile 1593 #endif 1594 uint16_t idRamRange; 1595 /** The mapping address if mapped, NIL_RTGCPHYS if not. */ 1596 RTGCPHYS GCPhys; 1552 1597 /** The real size. 1553 1598 * This may be larger than indicated by RamRange.cb if the range has been … … 1560 1605 /** Live save per page tracking data for MMIO2. */ 1561 1606 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages; 1562 /** The associated RAM range. */ 1563 PGMRAMRANGE RamRange; 1607 RTR3PTR R3PtrPadding; 1564 1608 } PGMREGMMIO2RANGE; 1565 AssertCompileMemberAlignment(PGMREGMMIO2RANGE, RamRange, 16);1566 1609 /** Pointer to a MMIO2 or pre-registered MMIO range. */ 1567 1610 typedef PGMREGMMIO2RANGE *PPGMREGMMIO2RANGE; … … 1588 1631 1589 1632 1590 /** @name Internal MMIO2 constants.1633 /** @name Internal MMIO2 macros. 1591 1634 * @{ */ 1592 /** The maximum number of MMIO2 ranges. */1593 #define PGM_MMIO2_MAX_RANGES 321594 /** The maximum number of pages in a MMIO2 range. */1595 #define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x01000000)1596 1635 /** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */ 1597 1636 #define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage) ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) ) … … 2948 2987 uint8_t abMmioPg[RT_MAX(HOST_PAGE_SIZE, GUEST_PAGE_SIZE)]; 2949 2988 2989 /** @name RAM, MMIO2 and ROM ranges 2990 * @{ */ 2991 /** The RAM range lookup table. */ 2992 PGMRAMRANGELOOKUPENTRY aRamRangeLookup[PGM_MAX_RAM_RANGES]; 2993 /** The ring-3 RAM range pointer table. */ 2994 R3PTRTYPE(PPGMRAMRANGE) apRamRanges[PGM_MAX_RAM_RANGES]; 2995 /** MMIO2 ranges. Indexed by idMmio2 minus 1. */ 2996 PGMREGMMIO2RANGE aMmio2Ranges[PGM_MAX_MMIO2_RANGES]; 2997 /** The ring-3 RAM range pointer table running parallel to aMmio2Ranges. */ 2998 R3PTRTYPE(PPGMRAMRANGE) apMmio2RamRanges[PGM_MAX_MMIO2_RANGES]; 2999 /** The ring-3 ROM range pointer table. */ 3000 R3PTRTYPE(PPGMROMRANGE) apRomRanges[PGM_MAX_ROM_RANGES]; 3001 /** Union of generation ID and lookup count. */ 3002 union PGMRAMRANGEGENANDLOOKUPCOUNT 3003 { 3004 /* Combined view of both the generation ID and the count for atomic updating/reading. */ 3005 uint64_t volatile u64Combined; 3006 RT_GCC_EXTENSION struct 3007 { 3008 /** Generation ID for the RAM ranges. 3009 * This member is incremented twice everytime a RAM range is mapped or 3010 * unmapped, so odd numbers means aRamRangeLookup is being modified and even 3011 * means the update has completed. */ 3012 uint32_t volatile idGeneration; 3013 /** The number of active entries in aRamRangeLookup. */ 3014 uint32_t volatile cLookupEntries; 3015 }; 3016 } RamRangeUnion; 3017 /** The max RAM range ID (mirroring PGMR0PERVM::idRamRangeMax). */ 3018 uint32_t idRamRangeMax; 3019 /** The number of MMIO2 ranges (serves as the next MMIO2 ID). */ 3020 uint8_t cMmio2Ranges; 3021 /** The number of ROM ranges. */ 3022 uint8_t cRomRanges; 3023 uint8_t abAlignment1[2]; 3024 /** @} */ 3025 2950 3026 /** @name The zero page (abPagePg). 2951 3027 * @{ */ … … 3005 3081 * Whether PCI passthrough is enabled. */ 3006 3082 bool fPciPassthrough; 3007 /** The number of MMIO2 regions (serves as the next MMIO2 ID). */3008 uint8_t cMmio2Regions;3009 3083 /** Restore original ROM page content when resetting after loading state. 3010 3084 * The flag is set by pgmR3LoadRomRanges and cleared at reset. This … … 3018 3092 /** Alignment padding. */ 3019 3093 #ifndef VBOX_WITH_PGM_NEM_MODE 3020 bool afAlignment3[1]; 3094 bool afAlignment2[2]; 3095 #else 3096 bool afAlignment2[1]; 3021 3097 #endif 3022 3098 /** The host paging mode. (This is what SUPLib reports.) */ 3023 3099 SUPPAGINGMODE enmHostMode; 3024 bool afAlignment3b[2];3025 3026 /** Generation ID for the RAM ranges. This member is incremented everytime3027 * a RAM range is linked or unlinked. */3028 uint32_t volatile idRamRangesGen;3029 3100 3030 3101 /** Physical access handler type for ROM protection. */ … … 3042 3113 /** RAM range TLB for R3. */ 3043 3114 R3PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR3[PGM_RAMRANGE_TLB_ENTRIES]; 3044 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.3045 * This is sorted by physical address and contains no overlapping ranges. */3046 R3PTRTYPE(PPGMRAMRANGE) pRamRangesXR3;3047 /** Root of the RAM range search tree for ring-3. */3048 R3PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR3;3049 3115 /** Shadow Page Pool - R3 Ptr. */ 3050 3116 R3PTRTYPE(PPGMPOOL) pPoolR3; … … 3052 3118 * This is sorted by physical address and contains no overlapping ranges. */ 3053 3119 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3; 3054 /** Pointer to the list of MMIO2 ranges - for R3.3055 * Registration order. */3056 R3PTRTYPE(PPGMREGMMIO2RANGE) pRegMmioRangesR3;3057 /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */3058 R3PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];3059 3120 3060 3121 /** RAM range TLB for R0. */ 3061 3122 R0PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR0[PGM_RAMRANGE_TLB_ENTRIES]; 3062 /** R0 pointer corresponding to PGM::pRamRangesXR3. */3063 R0PTRTYPE(PPGMRAMRANGE) pRamRangesXR0;3064 /** Root of the RAM range search tree for ring-0. */3065 R0PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR0;3066 3123 /** Shadow Page Pool - R0 Ptr. */ 3067 3124 R0PTRTYPE(PPGMPOOL) pPoolR0; 3068 3125 /** R0 pointer corresponding to PGM::pRomRangesR3. */ 3069 3126 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0; 3070 /** MMIO2 lookup array for ring-0. Indexed by idMmio2 minus 1. */3071 R0PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];3072 3127 3073 3128 /** Hack: Number of deprecated page mapping locks taken by the current lock … … 3158 3213 /** Number of repeated long allocation times. */ 3159 3214 uint32_t cLargePageLongAllocRepeats; 3160 uint32_t uPadding 5;3215 uint32_t uPadding4; 3161 3216 3162 3217 /** … … 3254 3309 #ifndef IN_TSTVMSTRUCTGC /* HACK */ 3255 3310 AssertCompileMemberAlignment(PGM, CritSectX, 8); 3311 AssertCompileMemberAlignment(PGM, CritSectX, 16); 3312 AssertCompileMemberAlignment(PGM, CritSectX, 32); 3313 AssertCompileMemberAlignment(PGM, CritSectX, 64); 3256 3314 AssertCompileMemberAlignment(PGM, ChunkR3Map, 16); 3257 AssertCompileMemberAlignment(PGM, PhysTlbR3, 32); /** @todo 32 byte alignment! */ 3315 AssertCompileMemberAlignment(PGM, PhysTlbR3, 8); 3316 AssertCompileMemberAlignment(PGM, PhysTlbR3, 16); 3317 AssertCompileMemberAlignment(PGM, PhysTlbR3, 32); 3258 3318 AssertCompileMemberAlignment(PGM, PhysTlbR0, 32); 3259 3319 AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8); … … 3688 3748 typedef struct PGMR0PERVM 3689 3749 { 3750 /** @name RAM ranges 3751 * @{ */ 3752 /** The ring-0 RAM range pointer table. */ 3753 R0PTRTYPE(PPGMRAMRANGE) apRamRanges[PGM_MAX_RAM_RANGES]; 3754 /** Trusted RAM range page counts running parallel to apRamRanges. 3755 * This keeps the original page count when a range is reduced, 3756 * only the PGMRAMRANGE::cb member is changed then. */ 3757 uint32_t acRamRangePages[PGM_MAX_RAM_RANGES]; 3758 /** The memory objects for the RAM ranges (parallel to apRamRanges). */ 3759 RTR0MEMOBJ ahRamRangeMemObjs[PGM_MAX_RAM_RANGES]; 3760 /** The ring-3 mapping objects for the RAM ranges (parallel to apRamRanges). */ 3761 RTR0MEMOBJ ahRamRangeMapObjs[PGM_MAX_RAM_RANGES]; 3762 /** The max RAM range ID (safe). */ 3763 uint32_t idRamRangeMax; 3764 uint8_t abAlignment1[64 - sizeof(uint32_t)]; 3765 /** @} */ 3766 3767 /** @name MMIO2 ranges 3768 * @{ */ 3769 /** The ring-0 RAM range pointer table running parallel to aMmio2Ranges. */ 3770 R0PTRTYPE(PPGMRAMRANGE) apMmio2RamRanges[PGM_MAX_MMIO2_RANGES]; 3771 /** The memory objects for the MMIO2 backing memory (parallel to 3772 * apMmio2RamRanges). */ 3773 RTR0MEMOBJ ahMmio2MemObjs[PGM_MAX_MMIO2_RANGES]; 3774 /** The ring-3 mapping objects for the MMIO2 backing memory (parallel 3775 * to apMmio2RamRanges & ahMmio2MemObjs). */ 3776 RTR0MEMOBJ ahMmio2MapObjs[PGM_MAX_MMIO2_RANGES]; 3777 /** Trusted MMIO2 range sizes (count of guest pages). 3778 * This keeps the original page count when a range is reduced, 3779 * only the PGMRAMRANGE::cb member is changed then. */ 3780 uint32_t acMmio2RangePages[PGM_MAX_MMIO2_RANGES]; 3781 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 3782 /** Pointer to the ring-0 mapping of the MMIO2 backings (parallel to 3783 * apMmio2RamRanges). */ 3784 R0PTRTYPE(uint8_t *) apbMmio2Backing[PGM_MAX_MMIO2_RANGES]; 3785 #endif 3786 /** @} */ 3787 3788 /** @name ROM ranges 3789 * @{ */ 3790 /** The ring-0 ROM range pointer table. */ 3791 R0PTRTYPE(PPGMROMRANGE) apRomRanges[PGM_MAX_ROM_RANGES]; 3792 /** The memory objects for each ROM range (parallel to apRomRanges). */ 3793 RTR0MEMOBJ ahRomRangeMemObjs[PGM_MAX_ROM_RANGES]; 3794 /** The ring-3 mapping objects for each ROM range (parallel to apRomRanges 3795 * & ahRamRangeMemObjs). */ 3796 RTR0MEMOBJ ahRomRangeMapObjs[PGM_MAX_ROM_RANGES]; 3797 /** Trusted ROM range sizes (count of guest pages). */ 3798 uint32_t acRomRangePages[PGM_MAX_ROM_RANGES]; 3799 /** @} */ 3800 3690 3801 /** @name PGM Pool related stuff. 3691 3802 * @{ */ … … 3788 3899 DECLCALLBACK(FNPGMRZPHYSPFHANDLER) pgmPhysMmio2WritePfHandler; 3789 3900 #endif 3901 DECLHIDDEN(uint16_t) pgmPhysMmio2CalcChunkCount(RTGCPHYS cb, uint32_t *pcPagesPerChunk); 3902 DECLHIDDEN(int) pgmPhysMmio2RegisterWorker(PVMCC pVM, uint32_t const cGuestPages, uint8_t const idMmio2, 3903 const uint8_t cChunks, PPDMDEVINSR3 const pDevIns, uint8_t 3904 const iSubDev, uint8_t const iRegion, uint32_t const fFlags); 3905 DECLHIDDEN(int) pgmPhysMmio2DeregisterWorker(PVMCC pVM, uint8_t idMmio2, uint8_t cChunks, PPDMDEVINSR3 pDevIns); 3790 3906 int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys, 3791 3907 PGMPAGETYPE enmNewType); 3908 #ifdef VBOX_STRICT 3909 DECLHIDDEN(bool) pgmPhysAssertRamRangesLocked(PVMCC pVM, bool fInUpdate, bool fRamRelaxed); 3910 #endif 3792 3911 void pgmPhysInvalidRamRangeTlbs(PVMCC pVM); 3793 3912 void pgmPhysInvalidatePageMapTLB(PVMCC pVM); 3794 3913 void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys); 3795 PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys); 3796 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys); 3797 PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys); 3798 int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage); 3799 int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam); 3914 PPGMRAMRANGE pgmPhysGetRangeSlow(PVMCC pVM, RTGCPHYS GCPhys); 3915 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVMCC pVM, RTGCPHYS GCPhys); 3916 PPGMPAGE pgmPhysGetPageSlow(PVMCC pVM, RTGCPHYS GCPhys); 3917 int pgmPhysGetPageExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage); 3918 int pgmPhysGetPageAndRangeExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam); 3919 DECLHIDDEN(int) pgmPhysRamRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint32_t fFlags, uint32_t *pidNewRange); 3920 DECLHIDDEN(int) pgmPhysRomRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint8_t idRomRange, uint32_t fFlags); 3800 3921 #ifdef VBOX_WITH_NATIVE_NEM 3801 3922 void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State); … … 3803 3924 3804 3925 #ifdef IN_RING3 3805 void pgmR3PhysRelinkRamRanges(PVM pVM);3806 3926 int pgmR3PhysRamPreAllocate(PVM pVM); 3807 3927 int pgmR3PhysRamReset(PVM pVM); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r99671 r104840 325 325 CHECK_SIZE(PGMPAGE, 16); 326 326 CHECK_MEMBER_ALIGNMENT(PGMRAMRANGE, aPages, 16); 327 CHECK_MEMBER_ALIGNMENT(PGMREGMMIO2RANGE, RamRange, 16); 327 CHECK_SIZE_ALIGNMENT(PGMREGMMIO2RANGE, 16); 328 CHECK_MEMBER_ALIGNMENT(PGMROMRANGE, aPages, 16); 328 329 329 330 /* TM */
Note:
See TracChangeset
for help on using the changeset viewer.