- Timestamp:
- Jul 26, 2023 12:57:59 PM (19 months ago)
- svn:sync-xref-src-repo-rev:
- 158555
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp
r100166 r100705 390 390 } 391 391 392 #if 0 /* unused */ 393 DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt) 392 393 /** 394 * Changes the protection flags for the given guest physical address range. 395 * 396 * @returns VBox status code. 397 * @param GCPhys The guest physical address to start mapping. 398 * @param cb The size of the range, page aligned. 399 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX 400 * @param pu2State Where to store the state for the new page, optional. 401 */ 402 DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State) 394 403 { 395 404 hv_memory_flags_t fHvMemProt = 0; … … 401 410 fHvMemProt |= HV_MEMORY_EXEC; 402 411 403 hv_return_t hrc; 404 if (pVM->nem.s.fCreatedAsid) 405 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt); 406 else 407 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt); 408 412 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt); 413 if (hrc == HV_SUCCESS) 414 { 415 if (pu2State) 416 *pu2State = nemR3DarwinPageStateFromProt(fPageProt); 417 return VINF_SUCCESS; 418 } 419 420 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n", 421 GCPhys, cb, fPageProt, hrc)); 409 422 return nemR3DarwinHvSts2Rc(hrc); 410 423 } 411 #endif 424 412 425 413 426 #ifdef LOG_ENABLED … … 892 905 893 906 RT_NOREF(fL2Fault, GCPtrDataAbrt); 907 908 if (fWrite) 909 { 910 /* 911 * Check whether this is one of the dirty tracked regions, mark it as dirty 912 * and enable write support for this region again. 913 * 914 * This is required for proper VRAM tracking or the display might not get updated 915 * and it is impossible to use the PGM generic facility as it operates on guest page sizes 916 * but setting protection flags with Hypervisor.framework works only host page sized regions, so 917 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back) 918 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling 919 * write access again (due to a missing interpreter right now). 920 */ 921 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++) 922 { 923 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot]; 924 925 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart 926 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast) 927 { 928 pMmio2Region->fDirty = true; 929 930 uint8_t u2State; 931 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1, 932 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State); 933 934 /* Restart the instruction if there is no instruction syndrome available. */ 935 if (RT_FAILURE(rc) || !fIsv) 936 return rc; 937 } 938 } 939 } 940 894 941 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */ 895 942 … … 1528 1575 { 1529 1576 RT_NOREF(pVM); 1530 return false;1577 return true; 1531 1578 } 1532 1579 … … 1535 1582 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 1536 1583 { 1537 RT_NOREF(p VM, puNemRange, pvRam, fFlags);1584 RT_NOREF(pvRam); 1538 1585 1539 1586 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n", … … 1566 1613 { 1567 1614 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2); 1568 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State); 1615 1616 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */ 1617 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE; 1618 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) 1619 { 1620 /* Find a slot for dirty tracking. */ 1621 PNEMHVMMIO2REGION pMmio2Region = NULL; 1622 uint32_t idSlot; 1623 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++) 1624 { 1625 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0 1626 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0) 1627 { 1628 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot]; 1629 break; 1630 } 1631 } 1632 1633 if (!pMmio2Region) 1634 { 1635 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n")); 1636 return VERR_NEM_MAP_PAGES_FAILED; 1637 } 1638 1639 pMmio2Region->GCPhysStart = GCPhys; 1640 pMmio2Region->GCPhysLast = GCPhys + cb - 1; 1641 pMmio2Region->fDirty = false; 1642 *puNemRange = idSlot; 1643 } 1644 else 1645 fProt |= NEM_PAGE_PROT_WRITE; 1646 1647 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State); 1569 1648 if (RT_FAILURE(rc)) 1570 1649 { … … 1617 1696 rc = VERR_NEM_UNMAP_PAGES_FAILED; 1618 1697 } 1698 1699 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) 1700 { 1701 /* Reset tracking structure. */ 1702 uint32_t idSlot = *puNemRange; 1703 *puNemRange = UINT32_MAX; 1704 1705 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking)); 1706 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0; 1707 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0; 1708 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false; 1709 } 1619 1710 } 1620 1711 … … 1652 1743 void *pvBitmap, size_t cbBitmap) 1653 1744 { 1654 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap); 1655 AssertReleaseFailed(); 1656 return VERR_NOT_IMPLEMENTED; 1745 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange)); 1746 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking)); 1747 1748 /* Keep it simple for now and mark everything as dirty if it is. */ 1749 int rc = VINF_SUCCESS; 1750 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty) 1751 { 1752 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8); 1753 1754 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false; 1755 /* Restore as RX only. */ 1756 uint8_t u2State; 1757 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State); 1758 } 1759 else 1760 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8); 1761 1762 return rc; 1657 1763 } 1658 1764 … … 1663 1769 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange); 1664 1770 1665 Log5((" nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));1771 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags)); 1666 1772 *pu2State = UINT8_MAX; 1667 1773 *puNemRange = 0; … … 1673 1779 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange) 1674 1780 { 1675 Log5((" nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",1781 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n", 1676 1782 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange)); 1677 1783 *pu2State = UINT8_MAX; … … 1682 1788 */ 1683 1789 AssertPtrReturn(pvPages, VERR_INVALID_POINTER); 1684 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State); 1790 1791 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State); 1792 AssertRC(rc); 1793 1794 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State); 1685 1795 if (RT_FAILURE(rc)) 1686 1796 { … … 1701 1811 RTR3PTR pvMemR3, uint8_t *pu2State) 1702 1812 { 1703 RT_NOREF(pVM);1704 1705 1813 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n", 1706 1814 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State)); … … 1710 1818 if (pvMemR3) 1711 1819 { 1712 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State); 1820 /* Unregister what was there before. */ 1821 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State); 1822 AssertRC(rc); 1823 1824 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State); 1713 1825 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n", 1714 1826 pvMemR3, GCPhys, cb, rc)); … … 1750 1862 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 1751 1863 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 1752 RT_NOREF(HCPhys, fPageProt, enmType); 1753 1754 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State); 1864 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State); 1865 1866 AssertFailed(); 1867 return VINF_SUCCESS; 1755 1868 } 1756 1869 … … 1761 1874 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 1762 1875 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 1763 RT_NOREF(HCPhys, pvR3, fPageProt, enmType) 1764 1765 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State); 1876 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State); 1766 1877 } 1767 1878 … … 1772 1883 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 1773 1884 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State)); 1774 RT_NOREF( HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);1775 1776 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);1885 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State); 1886 1887 AssertFailed(); 1777 1888 } 1778 1889 -
trunk/src/VBox/VMM/include/NEMInternal.h
r100102 r100705 140 140 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM) 141 141 142 143 # if defined(VBOX_VMM_TARGET_ARMV8) 144 /** 145 * MMIO2 tracking region. 146 */ 147 typedef struct 148 { 149 /* Start of the region. */ 150 RTGCPHYS GCPhysStart; 151 /** End of the region. */ 152 RTGCPHYS GCPhysLast; 153 /** Whether the region was accessed since last time. */ 154 bool fDirty; 155 } NEMHVMMIO2REGION; 156 /** Pointer to a MMIO2 tracking region. */ 157 typedef NEMHVMMIO2REGION *PNEMHVMMIO2REGION; 158 # endif 159 142 160 #endif 143 161 … … 293 311 /** The vTimer offset programmed. */ 294 312 uint64_t u64VTimerOff; 313 /** Dirty tracking slots. */ 314 NEMHVMMIO2REGION aMmio2DirtyTracking[8]; 295 315 /** @} */ 296 316 # else
Note:
See TracChangeset
for help on using the changeset viewer.