VirtualBox

Changeset 100705 in vbox for trunk


Ignore:
Timestamp:
Jul 26, 2023 12:57:59 PM (19 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
158555
Message:

VMM/NEMR3Native-darwin-armv8.cpp: Need to implement our own simplistic dirty page tracking for MMIO2 regions as Hypervisor.framework doesn't work with guest page sizes but only with host page sized regions so it conflicts with our generic implementation in PGM. With that it is possible to get screen updates for guests which access the standard framebuffer and don't use the SVGA 3 interface to notify the device about dirty regions, bugref:10390

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp

    r100166 r100705  
    390390}
    391391
    392 #if 0 /* unused */
    393 DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
     392
     393/**
     394 * Changes the protection flags for the given guest physical address range.
     395 *
     396 * @returns VBox status code.
     397 * @param   GCPhys              The guest physical address to start mapping.
     398 * @param   cb                  The size of the range, page aligned.
     399 * @param   fPageProt           The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
     400 * @param   pu2State            Where to store the state for the new page, optional.
     401 */
     402DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
    394403{
    395404    hv_memory_flags_t fHvMemProt = 0;
     
    401410        fHvMemProt |= HV_MEMORY_EXEC;
    402411
    403     hv_return_t hrc;
    404     if (pVM->nem.s.fCreatedAsid)
    405         hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
    406     else
    407         hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
    408 
     412    hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
     413    if (hrc == HV_SUCCESS)
     414    {
     415        if (pu2State)
     416            *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
     417        return VINF_SUCCESS;
     418    }
     419
     420    LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
     421            GCPhys, cb, fPageProt, hrc));
    409422    return nemR3DarwinHvSts2Rc(hrc);
    410423}
    411 #endif
     424
    412425
    413426#ifdef LOG_ENABLED
     
    892905
    893906    RT_NOREF(fL2Fault, GCPtrDataAbrt);
     907
     908    if (fWrite)
     909    {
     910        /*
     911         * Check whether this is one of the dirty tracked regions, mark it as dirty
     912         * and enable write support for this region again.
     913         *
     914         * This is required for proper VRAM tracking or the display might not get updated
     915         * and it is impossible to use the PGM generic facility as it operates on guest page sizes
     916         * but setting protection flags with Hypervisor.framework works only host page sized regions, so
     917         * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
     918         * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
     919         * write access again (due to a missing interpreter right now).
     920         */
     921        for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
     922        {
     923            PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
     924
     925            if (   GCPhysDataAbrt >= pMmio2Region->GCPhysStart
     926                && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
     927            {
     928                pMmio2Region->fDirty = true;
     929
     930                uint8_t u2State;
     931                int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
     932                                            NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
     933
     934                /* Restart the instruction if there is no instruction syndrome available. */
     935                if (RT_FAILURE(rc) || !fIsv)
     936                    return rc;
     937            }
     938        }
     939    }
     940
    894941    AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
    895942
     
    15281575{
    15291576    RT_NOREF(pVM);
    1530     return false;
     1577    return true;
    15311578}
    15321579
     
    15351582                                                  void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
    15361583{
    1537     RT_NOREF(pVM, puNemRange, pvRam, fFlags);
     1584    RT_NOREF(pvRam);
    15381585
    15391586    Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
     
    15661613    {
    15671614        Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
    1568         int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
     1615
     1616        /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
     1617        uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
     1618        if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
     1619        {
     1620            /* Find a slot for dirty tracking. */
     1621            PNEMHVMMIO2REGION pMmio2Region = NULL;
     1622            uint32_t idSlot;
     1623            for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
     1624            {
     1625                if (   pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
     1626                    && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
     1627                {
     1628                    pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
     1629                    break;
     1630                }
     1631            }
     1632
     1633            if (!pMmio2Region)
     1634            {
     1635                LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
     1636                return VERR_NEM_MAP_PAGES_FAILED;
     1637            }
     1638
     1639            pMmio2Region->GCPhysStart = GCPhys;
     1640            pMmio2Region->GCPhysLast  = GCPhys + cb - 1;
     1641            pMmio2Region->fDirty      = false;
     1642            *puNemRange = idSlot;
     1643        }
     1644        else
     1645            fProt |= NEM_PAGE_PROT_WRITE;
     1646
     1647        int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
    15691648        if (RT_FAILURE(rc))
    15701649        {
     
    16171696            rc = VERR_NEM_UNMAP_PAGES_FAILED;
    16181697        }
     1698
     1699        if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
     1700        {
     1701            /* Reset tracking structure. */
     1702            uint32_t idSlot = *puNemRange;
     1703            *puNemRange = UINT32_MAX;
     1704
     1705            Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
     1706            pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
     1707            pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast  = 0;
     1708            pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty      = false;
     1709        }
    16191710    }
    16201711
     
    16521743                                                           void *pvBitmap, size_t cbBitmap)
    16531744{
    1654     RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
    1655     AssertReleaseFailed();
    1656     return VERR_NOT_IMPLEMENTED;
     1745    LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
     1746    Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
     1747
     1748    /* Keep it simple for now and mark everything as dirty if it is. */
     1749    int rc = VINF_SUCCESS;
     1750    if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
     1751    {
     1752        ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
     1753
     1754        pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
     1755        /* Restore as RX only. */
     1756        uint8_t u2State;
     1757        rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
     1758    }
     1759    else
     1760        ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
     1761
     1762    return rc;
    16571763}
    16581764
     
    16631769    RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
    16641770
    1665     Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
     1771    Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
    16661772    *pu2State   = UINT8_MAX;
    16671773    *puNemRange = 0;
     
    16731779                                                    uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
    16741780{
    1675     Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
     1781    Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
    16761782          GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
    16771783    *pu2State = UINT8_MAX;
     
    16821788     */
    16831789    AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
    1684     int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
     1790
     1791    int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
     1792    AssertRC(rc);
     1793
     1794    rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
    16851795    if (RT_FAILURE(rc))
    16861796    {
     
    17011811                                                        RTR3PTR pvMemR3, uint8_t *pu2State)
    17021812{
    1703     RT_NOREF(pVM);
    1704 
    17051813    Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
    17061814          GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
     
    17101818    if (pvMemR3)
    17111819    {
    1712         int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
     1820        /* Unregister what was there before. */
     1821        int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
     1822        AssertRC(rc);
     1823
     1824        rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
    17131825        AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
    17141826                          pvMemR3, GCPhys, cb, rc));
     
    17501862    Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    17511863          GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    1752     RT_NOREF(HCPhys, fPageProt, enmType);
    1753 
    1754     return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
     1864    RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
     1865
     1866    AssertFailed();
     1867    return VINF_SUCCESS;
    17551868}
    17561869
     
    17611874    Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    17621875          GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    1763     RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
    1764 
    1765     nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
     1876    RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
    17661877}
    17671878
     
    17721883    Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    17731884          GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
    1774     RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
    1775 
    1776     nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
     1885    RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
     1886
     1887    AssertFailed();
    17771888}
    17781889
  • trunk/src/VBox/VMM/include/NEMInternal.h

    r100102 r100705  
    140140# define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM)
    141141
     142
     143# if defined(VBOX_VMM_TARGET_ARMV8)
     144/**
     145 * MMIO2 tracking region.
     146 */
     147typedef struct
     148{
     149    /* Start of the region. */
     150    RTGCPHYS                    GCPhysStart;
     151    /** End of the region. */
     152    RTGCPHYS                    GCPhysLast;
     153    /** Whether the region was accessed since last time. */
     154    bool                        fDirty;
     155} NEMHVMMIO2REGION;
     156/** Pointer to a MMIO2 tracking region. */
     157typedef NEMHVMMIO2REGION *PNEMHVMMIO2REGION;
     158# endif
     159
    142160#endif
    143161
     
    293311    /** The vTimer offset programmed. */
    294312    uint64_t                    u64VTimerOff;
     313    /** Dirty tracking slots. */
     314    NEMHVMMIO2REGION            aMmio2DirtyTracking[8];
    295315    /** @} */
    296316# else
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette