VirtualBox

Changeset 4219 in vbox for trunk/src/VBox/Runtime


Ignore:
Timestamp:
Aug 18, 2007 11:42:23 PM (17 years ago)
Author:
vboxsync
Message:

RTR0MemObj for NT (untested).

Location:
trunk/src/VBox/Runtime
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/Makefile.kmk

    r4178 r4219  
    768768        generic/RTAssertDoBreakpoint-generic.cpp \
    769769        nt/RTErrConvertFromNtStatus.cpp \
     770        r0drv/memobj-r0drv.cpp \
    770771        r0drv/nt/alloc-r0drv-nt.cpp \
    771772        r0drv/nt/initterm-r0drv-nt.cpp \
     773        r0drv/nt/memobj-r0drv-nt.cpp \
    772774        r0drv/nt/process-r0drv-nt.cpp \
    773775        r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp \
     
    776778        r0drv/nt/thread-r0drv-nt.cpp \
    777779        string/strncmp.cpp
    778 
    779 #RuntimeR0Drv_SOURCES.win += \
    780 #       r0drv/memobj-r0drv.cpp \
    781 #       r0drv/nt/memobj-r0drv-nt.cpp
    782780
    783781RuntimeR0Drv_SOURCES.win.amd64 = \
  • trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp

    r4154 r4219  
    6262#endif
    6363    /** The number of PMDLs (memory descriptor lists) in the array. */
    64     unsigned            cMdls;
     64    uint32_t            cMdls;
    6565    /** Array of MDL pointers. (variable size) */
    6666    PMDL                apMdls[1];
     
    7878    {
    7979        case RTR0MEMOBJTYPE_LOW:
    80 #ifdef IPRT_TARGET_NT4
     80#ifndef IPRT_TARGET_NT4
    8181            if (pMemNt->fAllocatedPagesForMdl)
    8282            {
     
    9191            }
    9292#endif
    93             /* fall thru */
     93            AssertFailed();
     94            break;
     95
    9496        case RTR0MEMOBJTYPE_PAGE:
    9597            Assert(pMemNt->Core.pv);
     
    116118        case RTR0MEMOBJTYPE_PHYS:
    117119        case RTR0MEMOBJTYPE_PHYS_NC:
    118 #ifdef IPRT_TARGET_NT4
     120#ifndef IPRT_TARGET_NT4
    119121            if (pMemNt->fAllocatedPagesForMdl)
    120122            {
     
    122124                pMemNt->apMdls[0] = NULL;
    123125                pMemNt->cMdls = 0;
     126                break;
    124127            }
    125128#endif
     129            AssertFailed();
    126130            break;
    127131
    128132        case RTR0MEMOBJTYPE_LOCK:
    129             for (unsigned i = 0; i < pMemNt->cMdls; i++)
    130             {
    131                 MmUnlockPages(pMemNt->apMdl[i]);
    132                 IoFreeMdl(pMemNt->apMdl[i]);
    133                 pMemNt->apMdl[i] = NULL;
     133            for (uint32_t i = 0; i < pMemNt->cMdls; i++)
     134            {
     135                MmUnlockPages(pMemNt->apMdls[i]);
     136                IoFreeMdl(pMemNt->apMdls[i]);
     137                pMemNt->apMdls[i] = NULL;
    134138            }
    135139            break;
    136140
    137141        case RTR0MEMOBJTYPE_RES_VIRT:
    138             if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
    139             {
    140                 MmMapIoSpace
     142/*            if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
     143            {
    141144            }
    142145            else
    143146            {
    144             }
     147            }*/
    145148            AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
    146149            return VERR_INTERNAL_ERROR;
     
    149152        case RTR0MEMOBJTYPE_MAPPING:
    150153        {
    151             Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     154            Assert(pMemNt->Core.pv);
    152155            PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
    153156            Assert(pMemNtParent);
    154             Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
    155             MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls);
     157            if (pMemNtParent->cMdls)
     158            {
     159                Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     160                Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
     161                Assert(     pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
     162                       ||   pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
     163                MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
     164            }
     165            else
     166            {
     167                Assert(     pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
     168                       &&   !pMemNtParent->Core.u.Phys.fAllocated);
     169                MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
     170            }
    156171            pMemNt->Core.pv = NULL;
    157172            break;
     
    169184int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    170185{
     186    AssertMsgReturn(cb > _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
     187
    171188    /*
    172189     * Try allocate the memory and create an MDL for them so
     
    178195    if (pv)
    179196    {
    180         PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);
     197        PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
    181198        if (pMdl)
    182199        {
    183200            MmBuildMdlForNonPagedPool(pMdl);
    184             /** @todo if (fExecutable) */
     201#ifdef RT_ARCH_AMD64
     202            MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
     203#endif
    185204
    186205            /*
     
    207226int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    208227{
     228    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
     229
    209230    /*
    210231     * Try see if we get lucky first...
     
    234255     */
    235256    PHYSICAL_ADDRESS Zero;
     257    Zero.QuadPart = 0;
    236258    PHYSICAL_ADDRESS HighAddr;
    237     Zero.QuadPart = 0;
    238     High.QuadPart = _4G - 1;
     259    HighAddr.QuadPart = _4G - 1;
    239260    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    240261    if (pMdl)
     
    289310static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
    290311{
     312    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
     313
    291314    /*
    292315     * Allocate the memory and create an MDL for it.
     
    298321        return VERR_NO_MEMORY;
    299322
    300     PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);
     323    PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
    301324    if (pMdl)
    302325    {
    303326        MmBuildMdlForNonPagedPool(pMdl);
    304         /** @todo fExecutable */
     327#ifdef RT_ARCH_AMD64
     328        MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
     329#endif
    305330
    306331        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
     
    333358     * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
    334359     *
    335      * If the allocation is big, the chances are *probably* not very good. The current
    336      * max limit is kind of random.
     360     * This is preferable to using MmAllocateContiguousMemory because there are
     361     * a few situations where the memory shouldn't be mapped, like for instance
     362     * VT-x control memory. Since these are rather small allocations (one or
     363     * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
     364     * request.
     365     *
     366     * If the allocation is big, the chances are *probably* not very good. The
     367     * current limit is kind of random...
    337368     */
    338369    if (cb < _128K)
    339370    {
    340371        PHYSICAL_ADDRESS Zero;
     372        Zero.QuadPart = 0;
    341373        PHYSICAL_ADDRESS HighAddr;
    342         Zero.QuadPart = 0;
    343         High.QuadPart = _4G - 1;
     374        HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
    344375        PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    345376        if (pMdl)
     
    349380                PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
    350381                PFN_NUMBER Pfn = paPfns[0] + 1;
    351                 const size_t cPages = cb >> PAGE_SIZE;
     382                const size_t cPages = cb >> PAGE_SHIFT;
    352383                size_t iPage;
    353384                for (iPage = 1; iPage < cPages; iPage++, Pfn++)
     
    372403        }
    373404    }
    374 #endif
     405#endif /* !IPRT_TARGET_NT4 */
    375406
    376407    return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
     
    382413#ifndef IPRT_TARGET_NT4
    383414    PHYSICAL_ADDRESS Zero;
     415    Zero.QuadPart = 0;
    384416    PHYSICAL_ADDRESS HighAddr;
    385     Zero.QuadPart = 0;
    386     High.QuadPart = _4G - 1;
     417    HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
    387418    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    388419    if (pMdl)
     
    390421        if (MmGetMdlByteCount(pMdl) >= cb)
    391422        {
    392             PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
     423            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
    393424            if (pMemNt)
    394425            {
     
    424455    if (pMemNt)
    425456    {
    426         pMemNt->Core.u.Phys.PhysBase = PhysAddr;
     457        pMemNt->Core.u.Phys.PhysBase = Phys;
    427458        pMemNt->Core.u.Phys.fAllocated = false;
    428         pMemNt->pMemDesc = pMemDesc;
    429459        *ppMem = &pMemNt->Core;
    430460        return VINF_SUCCESS;
     
    449479     * Calc the number of MDLs we need and allocate the memory object structure.
    450480     */
    451     unsigned cMdls = pMem->cb / MAX_LOCK_MEM_SIZE;
    452     if ((pMem->cb % MAX_LOCK_MEM_SIZE) > 0)
     481    size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
     482    if (cb % MAX_LOCK_MEM_SIZE)
    453483        cMdls++;
     484    if (cMdls >= UINT32_MAX)
     485        return VERR_OUT_OF_RANGE;
    454486    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
    455                                                         RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
     487                                                        RTR0MEMOBJTYPE_LOCK, pv, cb);
    456488    if (!pMemNt)
    457489        return VERR_NO_MEMORY;
     
    462494    int         rc = VINF_SUCCESS;
    463495    size_t      cbTotal = 0;
    464     uint8_t    *pb = pv;
    465     unsigned    iMdl;
     496    uint8_t    *pb = (uint8_t *)pv;
     497    uint32_t    iMdl;
    466498    for (iMdl = 0; iMdl < cMdls; iMdl++)
    467499    {
     
    473505            cbCur = MAX_LOCK_MEM_SIZE;
    474506        AssertMsg(cbCur, ("cbCur: 0!\n"));
    475         PMDL pMdl = IoAllocateMdl(pb, cbCur, FALSE, FALSE, NULL);
     507        PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
    476508        if (!pMdl)
    477509        {
     
    513545    while (iMdl-- > 0)
    514546    {
    515         MmUnlockPages(pMemNt->apMdl[iMdl]);
    516         IoFreeMdl(pMemNt->apMdl[iMdl]);
    517         pMemNt->apMdl[iMdl] = NULL;
    518     }
    519     rtR0MemObjDelete(pMemNt);
    520     return SUPDRV_ERR_LOCK_FAILED;
     547        MmUnlockPages(pMemNt->apMdls[iMdl]);
     548        IoFreeMdl(pMemNt->apMdls[iMdl]);
     549        pMemNt->apMdls[iMdl] = NULL;
     550    }
     551    rtR0MemObjDelete(&pMemNt->Core);
     552    return VERR_LOCK_FAILED;
    521553}
    522554
     
    525557{
    526558    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
    527     /* ( Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
     559    /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
    528560    return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
    529561}
     
    532564int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
    533565{
    534     return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, NIL_RTR0PROCESS);
     566    return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
    535567}
    536568
     
    547579}
    548580
    549 
    550 int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
    551 {
    552     /*
    553      * Must have a memory descriptor.
    554      */
    555     int rc = VERR_INVALID_PARAMETER;
    556     PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap;
    557     if (pMemToMapDarwin->pMemDesc)
    558     {
    559         IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere,
    560                                                               kIOMapAnywhere | kIOMapDefaultCache);
    561         if (pMemMap)
    562         {
    563             IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
    564             void *pv = (void *)(uintptr_t)VirtAddr;
    565             if ((uintptr_t)pv == VirtAddr)
    566             {
    567                 /*
    568                  * Create the IPRT memory object.
    569                  */
    570                 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING,
    571                                                                                 pv, pMemToMapDarwin->Core.cb);
     581/**
     582 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
     583 *
     584 * @returns IPRT status code.
     585 * @param   ppMem       Where to store the memory object for the mapping.
     586 * @param   pMemToMap   The memory object to map.
     587 * @param   pvFixed     Where to map it. (void *)-1 if anywhere is fine.
     588 * @param   uAlignment  The alignment requirement for the mapping.
     589 * @param   fProt       The desired page protection for the mapping.
     590 * @param   R0Process   If NIL_RTR0PROCESS map into system (kernel) memory.
     591 *                      If not nil, it's the current process.
     592 */
     593static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
     594                           unsigned fProt, RTR0PROCESS R0Process)
     595{
     596    int rc = VERR_MAP_FAILED;
     597
     598    /*
     599     * There are two basic cases here, either we've got an MDL and can
     600     * map it using MmMapLockedPages, or we've got a contiguous physical
     601     * range (MMIO most likely) and can use MmMapIoSpace.
     602     */
     603    PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
     604    if (pMemNtToMap->cMdls)
     605    {
     606        /* don't attempt map locked regions with more than one mdl. */
     607        if (pMemNtToMap->cMdls != 1)
     608            return VERR_NOT_SUPPORTED;
     609
     610        /* we can't map anything to the first page, sorry. */
     611        if (pvFixed == 0)
     612            return VERR_NOT_SUPPORTED;
     613
     614        /* only one system mapping for now - no time to figure out MDL restrictions right now. */
     615        if (    pMemNtToMap->Core.uRel.Parent.cMappings
     616            &&  R0Process == NIL_RTR0PROCESS)
     617            return VERR_NOT_SUPPORTED;
     618
     619        __try
     620        {
     621            /** @todo uAlignment */
     622            /** @todo How to set the protection on the pages? */
     623            void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
     624                                                    R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
     625                                                    MmCached,
     626                                                    pvFixed == (void *)-1 ? pvFixed : NULL,
     627                                                    FALSE /* no bug check on failure */,
     628                                                    NormalPagePriority);
     629            if (pv)
     630            {
     631                NOREF(fProt);
     632
     633                PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
     634                                                                    pMemNtToMap->Core.cb);
    572635                if (pMemNt)
    573636                {
    574                     pMemNt->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
    575                     pMemNt->pMemMap = pMemMap;
     637                    pMemNt->Core.u.Mapping.R0Process = R0Process;
    576638                    *ppMem = &pMemNt->Core;
    577639                    return VINF_SUCCESS;
     
    579641
    580642                rc = VERR_NO_MEMORY;
    581             }
    582             else
    583                 rc = VERR_ADDRESS_TOO_BIG;
    584             pMemMap->release();
    585         }
    586         else
     643                MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
     644            }
     645        }
     646        __except(EXCEPTION_EXECUTE_HANDLER)
     647        {
     648            /* nothing */
    587649            rc = VERR_MAP_FAILED;
    588     }
     650        }
     651    }
     652    else
     653    {
     654        AssertReturn(   pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
     655                     && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
     656
     657        /* cannot map phys mem to user space (yet). */
     658        if (R0Process != NIL_RTR0PROCESS)
     659            return VERR_NOT_SUPPORTED;
     660
     661        /** @todo uAlignment */
     662        /** @todo How to set the protection on the pages? */
     663        PHYSICAL_ADDRESS Phys;
     664        Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
     665        void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
     666        if (pv)
     667        {
     668            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
     669                                                                pMemNtToMap->Core.cb);
     670            if (pMemNt)
     671            {
     672                pMemNt->Core.u.Mapping.R0Process = R0Process;
     673                *ppMem = &pMemNt->Core;
     674                return VINF_SUCCESS;
     675            }
     676
     677            rc = VERR_NO_MEMORY;
     678            MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
     679        }
     680    }
     681
     682    NOREF(uAlignment); NOREF(fProt);
    589683    return rc;
    590684}
    591685
    592686
     687int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
     688{
     689    return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
     690}
     691
     692
    593693int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
    594694{
    595     /*
    596      * Must have a memory descriptor.
    597      */
    598     int rc = VERR_INVALID_PARAMETER;
    599     PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap;
    600     if (pMemToMapDarwin->pMemDesc)
    601     {
    602         IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, kIOMapAnywhere,
    603                                                               kIOMapAnywhere | kIOMapDefaultCache);
    604         if (pMemMap)
    605         {
    606             IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
    607             void *pv = (void *)(uintptr_t)VirtAddr;
    608             if ((uintptr_t)pv == VirtAddr)
    609             {
    610                 /*
    611                  * Create the IPRT memory object.
    612                  */
    613                 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING,
    614                                                                                 pv, pMemToMapDarwin->Core.cb);
    615                 if (pMemNt)
    616                 {
    617                     pMemNt->Core.u.Mapping.R0Process = R0Process;
    618                     pMemNt->pMemMap = pMemMap;
    619                     *ppMem = &pMemNt->Core;
    620                     return VINF_SUCCESS;
    621                 }
    622 
    623                 rc = VERR_NO_MEMORY;
    624             }
    625             else
    626                 rc = VERR_ADDRESS_TOO_BIG;
    627             pMemMap->release();
    628         }
    629         else
    630             rc = VERR_MAP_FAILED;
    631     }
    632     return rc;
    633 }
    634 
    635 
    636 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage)
    637 {
    638     RTHCPHYS            PhysAddr;
    639     PRTR0MEMOBJNT   pMemNt = (PRTR0MEMOBJNT)pMem;
    640 
    641 #ifdef USE_VM_MAP_WIRE
    642     /*
    643      * Locked memory doesn't have a memory descriptor and
    644      * needs to be handled differently.
    645      */
    646     if (pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK)
    647     {
    648         ppnum_t PgNo;
    649         if (pMemNt->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
    650             PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE);
    651         else
    652         {
    653             /*
    654              * From what I can tell, Apple seems to have locked up the all the
    655              * available interfaces that could help us obtain the pmap_t of a task
    656              * or vm_map_t.
    657 
    658              * So, we'll have to figure out where in the vm_map_t  structure it is
    659              * and read it our selves. ASSUMING that kernel_pmap is pointed to by
    660              * kernel_map->pmap, we scan kernel_map to locate the structure offset.
    661              * Not nice, but it will hopefully do the job in a reliable manner...
    662              *
    663              * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
    664              */
    665             static int s_offPmap = -1;
    666             if (RT_UNLIKELY(s_offPmap == -1))
    667             {
    668                 pmap_t const *p = (pmap_t *)kernel_map;
    669                 pmap_t const * const pEnd = p + 64;
    670                 for (; p < pEnd; p++)
    671                     if (*p == kernel_pmap)
    672                     {
    673                         s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
    674                         break;
    675                     }
    676                 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
    677             }
    678             pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemNt->Core.u.Lock.R0Process) + s_offPmap);
    679             PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE);
    680         }
    681 
    682         AssertReturn(PgNo, NIL_RTHCPHYS);
    683         PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
    684         Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
    685     }
    686     else
    687 #endif /* USE_VM_MAP_WIRE */
    688     {
    689         /*
    690          * Get the memory descriptor.
    691          */
    692         IOMemoryDescriptor *pMemDesc = pMemNt->pMemDesc;
    693         if (!pMemDesc)
    694             pMemDesc = pMemNt->pMemMap->getMemoryDescriptor();
    695         AssertReturn(pMemDesc, NIL_RTHCPHYS);
    696 
    697         /*
    698          * If we've got a memory descriptor, use getPhysicalSegment64().
    699          */
    700         addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
    701         AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
    702         PhysAddr = Addr;
    703         AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
    704     }
    705 
    706     return PhysAddr;
    707 }
    708 
     695    AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
     696    return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
     697}
     698
     699
     700RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
     701{
     702    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
     703
     704    if (pMemNt->cMdls)
     705    {
     706        if (pMemNt->cMdls == 1)
     707        {
     708            PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
     709            return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
     710        }
     711
     712        size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
     713        size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
     714        PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
     715        return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
     716    }
     717
     718    switch (pMemNt->Core.enmType)
     719    {
     720        case RTR0MEMOBJTYPE_MAPPING:
     721            return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
     722
     723        case RTR0MEMOBJTYPE_PHYS:
     724            return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
     725
     726        case RTR0MEMOBJTYPE_PAGE:
     727        case RTR0MEMOBJTYPE_PHYS_NC:
     728        case RTR0MEMOBJTYPE_LOW:
     729        case RTR0MEMOBJTYPE_CONT:
     730        case RTR0MEMOBJTYPE_LOCK:
     731        default:
     732            AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
     733        case RTR0MEMOBJTYPE_RES_VIRT:
     734            return NIL_RTHCPHYS;
     735    }
     736}
     737
  • trunk/src/VBox/Runtime/r0drv/nt/the-nt-kernel.h

    r4071 r4219  
    4545#endif
    4646
     47#include <iprt/param.h>
    4748#ifndef PAGE_OFFSET_MASK
    4849# define PAGE_OFFSET_MASK (PAGE_SIZE - 1)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette