VirtualBox

Changeset 4154 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Aug 15, 2007 2:48:53 AM (18 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
23641
Message:

work in progress. (bed time)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp

    r4136 r4154  
    5757    /** The core structure. */
    5858    RTR0MEMOBJINTERNAL  Core;
     59#ifndef IPRT_TARGET_NT4
     60    /** Used MmAllocatePagesForMdl(). */
     61    bool                fAllocatedPagesForMdl;
     62#endif
    5963    /** The number of PMDLs (memory descriptor lists) in the array. */
    6064    unsigned            cMdls;
     
    6973
    7074    /*
    71      * Release any memory that we've allocated or locked.
     75     * Deal with it on a per type basis (just as a variation).
    7276     */
    7377    switch (pMemNt->Core.enmType)
    7478    {
    7579        case RTR0MEMOBJTYPE_LOW:
     80#ifdef IPRT_TARGET_NT4
     81            if (pMemNt->fAllocatedPagesForMdl)
     82            {
     83                Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     84                MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
     85                pMemNt->Core.pv = NULL;
     86
     87                MmFreePagesFromMdl(pMemNt->apMdls[0]);
     88                pMemNt->apMdls[0] = NULL;
     89                pMemNt->cMdls = 0;
     90                break;
     91            }
     92#endif
     93            /* fall thru */
    7694        case RTR0MEMOBJTYPE_PAGE:
     95            Assert(pMemNt->Core.pv);
     96            ExFreePool(pMemNt->Core.pv);
     97            pMemNt->Core.pv = NULL;
     98
     99            Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     100            IoFreeMdl(pMemNt->apMdls[0]);
     101            pMemNt->apMdls[0] = NULL;
     102            pMemNt->cMdls = 0;
    77103            break;
    78104
    79105        case RTR0MEMOBJTYPE_CONT:
     106            Assert(pMemNt->Core.pv);
     107            MmFreeContiguousMemory(pMemNt->Core.pv);
     108            pMemNt->Core.pv = NULL;
     109
     110            Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     111            IoFreeMdl(pMemNt->apMdls[0]);
     112            pMemNt->apMdls[0] = NULL;
     113            pMemNt->cMdls = 0;
     114            break;
     115
     116        case RTR0MEMOBJTYPE_PHYS:
     117        case RTR0MEMOBJTYPE_PHYS_NC:
     118#ifdef IPRT_TARGET_NT4
     119            if (pMemNt->fAllocatedPagesForMdl)
     120            {
     121                MmFreePagesFromMdl(pMemNt->apMdls[0]);
     122                pMemNt->apMdls[0] = NULL;
     123                pMemNt->cMdls = 0;
     124            }
     125#endif
    80126            break;
    81127
    82128        case RTR0MEMOBJTYPE_LOCK:
    83129            for (unsigned i = 0; i < pMemNt->cMdls; i++)
     130            {
    84131                MmUnlockPages(pMemNt->apMdl[i]);
     132                IoFreeMdl(pMemNt->apMdl[i]);
     133                pMemNt->apMdl[i] = NULL;
     134            }
    85135            break;
    86136
    87         case RTR0MEMOBJTYPE_PHYS:
    88             Assert(!pMemNt->Core.u.Phys.fAllocated);
    89             break;
    90 
    91137        case RTR0MEMOBJTYPE_RES_VIRT:
     138            if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
     139            {
     140                MmMapIoSpace
     141            }
     142            else
     143            {
     144            }
    92145            AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
    93146            return VERR_INTERNAL_ERROR;
     
    95148
    96149        case RTR0MEMOBJTYPE_MAPPING:
    97             /* nothing to do here. */
     150        {
     151            Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
     152            PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
     153            Assert(pMemNtParent);
     154            Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
     155            MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls);
     156            pMemNt->Core.pv = NULL;
    98157            break;
     158        }
    99159
    100160        default:
     
    103163    }
    104164
    105     /*
    106      * Free any MDLs.
    107      */
    108     for (unsigned i = 0; i < pMemNt->cMdls; i++)
    109     {
    110         MmUnlockPages(pMemNt->apMdl[i]);
    111         IoFreeMdl(pMemNt->u.locked.papMdl[i]);
    112     }
    113165    return VINF_SUCCESS;
    114166}
     
    118170{
    119171    /*
    120      * Try allocate the memory and create it's IOMemoryDescriptor first.
     172     * Try allocate the memory and create an MDL for them so
     173     * we can query the physical addresses and do mappings later
     174     * without running into out-of-memory conditions and similar problems.
    121175     */
    122176    int rc = VERR_NO_PAGE_MEMORY;
    123     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    124     void *pv = IOMallocAligned(cb, PAGE_SIZE);
     177    void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
    125178    if (pv)
    126179    {
    127         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    128         if (pMemDesc)
    129         {
     180        PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);
     181        if (pMdl)
     182        {
     183            MmBuildMdlForNonPagedPool(pMdl);
     184            /** @todo if (fExecutable) */
     185
    130186            /*
    131187             * Create the IPRT memory object.
     
    134190            if (pMemNt)
    135191            {
    136                 pMemNt->pMemDesc = pMemDesc;
     192                pMemNt->cMdls = 1;
     193                pMemNt->apMdls[0] = pMdl;
    137194                *ppMem = &pMemNt->Core;
    138195                return VINF_SUCCESS;
     
    140197
    141198            rc = VERR_NO_MEMORY;
    142             pMemDesc->release();
    143         }
    144         else
    145             rc = VERR_MEMOBJ_INIT_FAILED;
    146         IOFreeAligned(pv, cb);
     199            IoFreeMdl(pMdl);
     200        }
     201        ExFreePool(pv);
    147202    }
    148203    return rc;
     
    152207int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    153208{
    154 #if 1
    155     /*
    156      * Allocating 128KB for the low page pool can bit a bit exhausting on the kernel,
    157      * it frequnetly causes the entire box to lock up on startup.
     209    /*
     210     * Try see if we get lucky first...
     211     * (We could probably just assume we're lucky on NT4.)
     212     */
     213    int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
     214    if (RT_SUCCESS(rc))
     215    {
     216        size_t iPage = cb >> PAGE_SHIFT;
     217        while (iPage-- > 0)
     218            if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
     219            {
     220                rc = VERR_NO_MEMORY;
     221                break;
     222            }
     223        if (RT_SUCCESS(rc))
     224            return rc;
     225
     226        /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
     227        RTR0MemObjFree(*ppMem, false);
     228        *ppMem = NULL;
     229    }
     230
     231#ifndef IPRT_TARGET_NT4
     232    /*
     233     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
     234     */
     235    PHYSICAL_ADDRESS Zero;
     236    PHYSICAL_ADDRESS HighAddr;
     237    Zero.QuadPart = 0;
     238    High.QuadPart = _4G - 1;
     239    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
     240    if (pMdl)
     241    {
     242        if (MmGetMdlByteCount(pMdl) >= cb)
     243        {
     244            __try
     245            {
     246                void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
     247                                                        FALSE /* no bug check on failure */, NormalPagePriority);
     248                if (pv)
     249                {
     250                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
     251                    if (pMemNt)
     252                    {
     253                        pMemNt->fAllocatedPagesForMdl = true;
     254                        pMemNt->cMdls = 1;
     255                        pMemNt->apMdls[0] = pMdl;
     256                        *ppMem = &pMemNt->Core;
     257                        return VINF_SUCCESS;
     258                    }
     259                    MmUnmapLockedPages(pv, pMdl);
     260                }
     261            }
     262            __except(EXCEPTION_EXECUTE_HANDLER)
     263            {
     264                /* nothing */
     265            }
     266        }
     267        MmFreePagesFromMdl(pMdl);
     268    }
     269#endif /* !IPRT_TARGET_NT4 */
     270
     271    /*
     272     * Fall back on contiguous memory...
     273     */
     274    return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
     275}
     276
     277
     278/**
     279 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
     280 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
     281 * to what rtR0MemObjNativeAllocCont() does.
     282 *
     283 * @returns IPRT status code.
     284 * @param   ppMem           Where to store the pointer to the ring-0 memory object.
     285 * @param   cb              The size.
     286 * @param   fExecutable     Whether the mapping should be executable or not.
     287 * @param   PhysHighest     The highest physical address for the pages in allocation.
     288 */
     289static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
     290{
     291    /*
     292     * Allocate the memory and create an MDL for it.
     293     */
     294    PHYSICAL_ADDRESS PhysAddrHighest;
     295    PhysAddrHighest.QuadPart = PhysHighest;
     296    void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
     297    if (!pv)
     298        return VERR_NO_MEMORY;
     299
     300    PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);
     301    if (pMdl)
     302    {
     303        MmBuildMdlForNonPagedPool(pMdl);
     304        /** @todo fExecutable */
     305
     306        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
     307        if (pMemNt)
     308        {
     309            pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
     310            pMemNt->cMdls = 1;
     311            pMemNt->apMdls[0] = pMdl;
     312            *ppMem = &pMemNt->Core;
     313            return VINF_SUCCESS;
     314        }
     315
     316        IoFreeMdl(pMdl);
     317    }
     318    MmFreeContiguousMemory(pv);
     319    return VERR_NO_MEMORY;
     320}
     321
     322
     323int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
     324{
     325    return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
     326}
     327
     328
     329int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
     330{
     331#ifndef IPRT_TARGET_NT4
     332    /*
     333     * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
    158334     *
    159      * So, try allocate the memory using IOMallocAligned first and if we get any high
    160      * physical memory we'll release it and fall back on IOMAllocContiguous.
    161      */
    162     int rc = VERR_NO_PAGE_MEMORY;
    163     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    164     void *pv = IOMallocAligned(cb, PAGE_SIZE);
    165     if (pv)
    166     {
    167         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    168         if (pMemDesc)
    169         {
    170             /*
    171              * Check if it's all below 4GB.
    172              */
    173             for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
    174             {
    175                 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
    176                 if (Addr > (uint32_t)(_4G - PAGE_SIZE))
     335     * If the allocation is big, the chances are *probably* not very good. The current
     336     * max limit is kind of random.
     337     */
     338    if (cb < _128K)
     339    {
     340        PHYSICAL_ADDRESS Zero;
     341        PHYSICAL_ADDRESS HighAddr;
     342        Zero.QuadPart = 0;
     343        High.QuadPart = _4G - 1;
     344        PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
     345        if (pMdl)
     346        {
     347            if (MmGetMdlByteCount(pMdl) >= cb)
     348            {
     349                PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
     350                PFN_NUMBER Pfn = paPfns[0] + 1;
     351                const size_t cPages = cb >> PAGE_SIZE;
     352                size_t iPage;
     353                for (iPage = 1; iPage < cPages; iPage++, Pfn++)
     354                    if (paPfns[iPage] != Pfn)
     355                        break;
     356                if (iPage >= cPages)
    177357                {
    178                     /* Ok, we failed, fall back on contiguous allocation. */
    179                     pMemDesc->release();
    180                     IOFreeAligned(pv, cb);
    181                     return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
     358                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
     359                    if (pMemNt)
     360                    {
     361                        pMemNt->Core.u.Phys.fAllocated = true;
     362                        pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
     363                        pMemNt->fAllocatedPagesForMdl = true;
     364                        pMemNt->cMdls = 1;
     365                        pMemNt->apMdls[0] = pMdl;
     366                        *ppMem = &pMemNt->Core;
     367                        return VINF_SUCCESS;
     368                    }
    182369                }
    183370            }
    184 
    185             /*
    186              * Create the IPRT memory object.
    187              */
    188             PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
     371            MmFreePagesFromMdl(pMdl);
     372        }
     373    }
     374#endif
     375
     376    return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
     377}
     378
     379
     380int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
     381{
     382#ifndef IPRT_TARGET_NT4
     383    PHYSICAL_ADDRESS Zero;
     384    PHYSICAL_ADDRESS HighAddr;
     385    Zero.QuadPart = 0;
     386    High.QuadPart = _4G - 1;
     387    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
     388    if (pMdl)
     389    {
     390        if (MmGetMdlByteCount(pMdl) >= cb)
     391        {
     392            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
    189393            if (pMemNt)
    190394            {
    191                 pMemNt->pMemDesc = pMemDesc;
     395                pMemNt->fAllocatedPagesForMdl = true;
     396                pMemNt->cMdls = 1;
     397                pMemNt->apMdls[0] = pMdl;
    192398                *ppMem = &pMemNt->Core;
    193399                return VINF_SUCCESS;
    194400            }
    195 
    196             rc = VERR_NO_MEMORY;
    197             pMemDesc->release();
    198         }
    199         else
    200             rc = VERR_MEMOBJ_INIT_FAILED;
    201         IOFreeAligned(pv, cb);
    202     }
    203     return rc;
    204 
    205 #else
    206 
    207     /*
    208      * IOMallocContiguous is the most suitable API.
    209      */
    210     return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
    211 #endif
    212 }
    213 
    214 
    215 int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    216 {
    217     /*
    218      * Try allocate the memory and create it's IOMemoryDescriptor first.
    219      */
    220     int rc = VERR_NO_CONT_MEMORY;
    221     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    222     void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL);
    223     if (pv)
    224     {
    225         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    226         if (pMemDesc)
    227         {
    228             /* a bit of useful paranoia. */
    229             addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL);
    230             Assert(PhysAddr == pMemDesc->getPhysicalAddress());
    231             if (    PhysAddr > 0
    232                 &&  PhysAddr <= _4G
    233                 &&  PhysAddr + cb <= _4G)
    234             {
    235                 /*
    236                  * Create the IPRT memory object.
    237                  */
    238                 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
    239                 if (pMemNt)
    240                 {
    241                     pMemNt->Core.u.Cont.Phys = PhysAddr;
    242                     pMemNt->pMemDesc = pMemDesc;
    243                     *ppMem = &pMemNt->Core;
    244                     return VINF_SUCCESS;
    245                 }
    246 
    247                 rc = VERR_NO_MEMORY;
    248             }
    249             else
    250             {
    251                 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr));
    252                 rc = VERR_INTERNAL_ERROR;
    253             }
    254             pMemDesc->release();
    255         }
    256         else
    257             rc = VERR_MEMOBJ_INIT_FAILED;
    258         IOFreeContiguous(pv, cb);
    259     }
    260     return rc;
    261 }
    262 
    263 
    264 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
    265 {
    266 #if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */
    267     /*
    268      * Try allocate the memory and create it's IOMemoryDescriptor first.
    269      * Note that IOMallocPhysical is not working correctly (it's ignoring the mask).
    270      */
    271 
    272     /* first calc the mask (in the hope that it'll be used) */
    273     IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
    274     if (PhysHighest != NIL_RTHCPHYS)
    275     {
    276         PhysMask = ~(IOPhysicalAddress)0;
    277         while (PhysMask > PhysHighest)
    278             PhysMask >>= 1;
    279         AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER);
    280         PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
    281     }
    282 
    283     /* try allocate physical memory. */
    284     int rc = VERR_NO_PHYS_MEMORY;
    285     mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask);
    286     if (PhysAddr64)
    287     {
    288         IOPhysicalAddress PhysAddr = PhysAddr64;
    289         if (    PhysAddr == PhysAddr64
    290             &&  PhysAddr < PhysHighest
    291             &&  PhysAddr + cb <= PhysHighest)
    292         {
    293             /* create a descriptor. */
    294             IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
    295             if (pMemDesc)
    296             {
    297                 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
    298 
    299                 /*
    300                  * Create the IPRT memory object.
    301                  */
    302                 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    303                 if (pMemNt)
    304                 {
    305                     pMemNt->Core.u.Phys.PhysBase = PhysAddr;
    306                     pMemNt->Core.u.Phys.fAllocated = true;
    307                     pMemNt->pMemDesc = pMemDesc;
    308                     *ppMem = &pMemNt->Core;
    309                     return VINF_SUCCESS;
    310                 }
    311 
    312                 rc = VERR_NO_MEMORY;
    313                 pMemDesc->release();
    314             }
    315             else
    316                 rc = VERR_MEMOBJ_INIT_FAILED;
    317         }
    318         else
    319         {
    320             AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr,
    321                              (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest));
    322             rc = VERR_INTERNAL_ERROR;
    323         }
    324 
    325         IOFreePhysical(PhysAddr64, cb);
    326     }
    327 
    328     /*
    329      * Just in case IOMallocContiguous doesn't work right, we can try fall back
    330      * on a contiguous allcation.
    331      */
    332     if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY)
    333     {
    334         int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false);
    335         if (RT_SUCCESS(rc2))
    336             rc = rc2;
    337     }
    338 
    339     return rc;
    340 
    341 #else
    342 
    343     return rtR0MemObjNativeAllocCont(ppMem, cb, false);
    344 #endif
    345 }
    346 
    347 
    348 int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
    349 {
    350     /** @todo rtR0MemObjNativeAllocPhys / darwin. */
    351     return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest);
     401        }
     402        MmFreePagesFromMdl(pMdl);
     403    }
     404    return VERR_NO_MEMORY;
     405#else   /* IPRT_TARGET_NT4 */
     406    return VERR_NOT_SUPPORTED;
     407#endif  /* IPRT_TARGET_NT4 */
    352408}
    353409
     
    358414     * Validate the address range and create a descriptor for it.
    359415     */
    360     int rc = VERR_ADDRESS_TOO_BIG;
    361     IOPhysicalAddress PhysAddr = Phys;
    362     if (PhysAddr == Phys)
    363     {
    364         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
    365         if (pMemDesc)
    366         {
    367             Assert(PhysAddr == pMemDesc->getPhysicalAddress());
    368 
    369             /*
    370              * Create the IPRT memory object.
    371              */
    372             PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    373             if (pMemNt)
    374             {
    375                 pMemNt->Core.u.Phys.PhysBase = PhysAddr;
    376                 pMemNt->Core.u.Phys.fAllocated = false;
    377                 pMemNt->pMemDesc = pMemDesc;
    378                 *ppMem = &pMemNt->Core;
    379                 return VINF_SUCCESS;
    380             }
    381 
    382             rc = VERR_NO_MEMORY;
    383             pMemDesc->release();
    384         }
    385     }
    386     else
    387         AssertMsgFailed(("%#llx\n", (unsigned long long)Phys));
    388     return rc;
     416    PFN_NUMBER Pfn = Phys >> PAGE_SHIFT;
     417    if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
     418        return VERR_ADDRESS_TOO_BIG;
     419
     420    /*
     421     * Create the IPRT memory object.
     422     */
     423    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
     424    if (pMemNt)
     425    {
     426        pMemNt->Core.u.Phys.PhysBase = PhysAddr;
     427        pMemNt->Core.u.Phys.fAllocated = false;
     428        pMemNt->pMemDesc = pMemDesc;
     429        *ppMem = &pMemNt->Core;
     430        return VINF_SUCCESS;
     431    }
     432    return VERR_NO_MEMORY;
    389433}
    390434
     
    400444 * @param Task      The task \a pv and \a cb refers to.
    401445 */
    402 static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
    403 {
    404 #ifdef USE_VM_MAP_WIRE
    405     vm_map_t Map = get_task_map(Task);
    406     Assert(Map);
    407 
    408     /*
    409      * First try lock the memory.
    410      */
    411     int rc = VERR_LOCK_FAILED;
    412     kern_return_t kr = vm_map_wire(get_task_map(Task),
    413                                    (vm_map_offset_t)pv,
    414                                    (vm_map_offset_t)pv + cb,
    415                                    VM_PROT_DEFAULT,
    416                                    0 /* not user */);
    417     if (kr == KERN_SUCCESS)
     446static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
     447{
     448    /*
     449     * Calc the number of MDLs we need and allocate the memory object structure.
     450     */
     451    unsigned cMdls = pMem->cb / MAX_LOCK_MEM_SIZE;
     452    if ((pMem->cb % MAX_LOCK_MEM_SIZE) > 0)
     453        cMdls++;
     454    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
     455                                                        RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
     456    if (!pMemNt)
     457        return VERR_NO_MEMORY;
     458
     459    /*
     460     * Loop locking down the sub parts of the memory.
     461     */
     462    int         rc = VINF_SUCCESS;
     463    size_t      cbTotal = 0;
     464    uint8_t    *pb = pv;
     465    unsigned    iMdl;
     466    for (iMdl = 0; iMdl < cMdls; iMdl++)
    418467    {
    419468        /*
    420          * Create the IPRT memory object.
     469         * Calc the Mdl size and allocate it.
    421470         */
    422         PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOCK, pv, cb);
    423         if (pMemNt)
    424         {
    425             pMemNt->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
    426             *ppMem = &pMemNt->Core;
    427             return VINF_SUCCESS;
    428         }
    429 
    430         kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
    431         Assert(kr == KERN_SUCCESS);
    432         rc = VERR_NO_MEMORY;
    433     }
    434 
    435 #else
    436 
    437     /*
    438      * Create a descriptor and try lock it (prepare).
    439      */
    440     int rc = VERR_MEMOBJ_INIT_FAILED;
    441     IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, Task);
    442     if (pMemDesc)
    443     {
    444         IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
    445         if (IORet == kIOReturnSuccess)
    446         {
    447             /*
    448              * Create the IPRT memory object.
    449              */
    450             PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOCK, pv, cb);
    451             if (pMemNt)
    452             {
    453                 pMemNt->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
    454                 pMemNt->pMemDesc = pMemDesc;
    455                 *ppMem = &pMemNt->Core;
    456                 return VINF_SUCCESS;
    457             }
    458 
    459             pMemDesc->complete();
     471        size_t cbCur = cb - cbTotal;
     472        if (cbCur > MAX_LOCK_MEM_SIZE)
     473            cbCur = MAX_LOCK_MEM_SIZE;
     474        AssertMsg(cbCur, ("cbCur: 0!\n"));
     475        PMDL pMdl = IoAllocateMdl(pb, cbCur, FALSE, FALSE, NULL);
     476        if (!pMdl)
     477        {
    460478            rc = VERR_NO_MEMORY;
    461         }
    462         else
     479            break;
     480        }
     481
     482        /*
     483         * Lock the pages.
     484         */
     485        __try
     486        {
     487            MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
     488            pMemNt->apMdls[iMdl] = pMdl;
     489            pMemNt->cMdls++;
     490        }
     491        __except(EXCEPTION_EXECUTE_HANDLER)
     492        {
     493            IoFreeMdl(pMdl);
    463494            rc = VERR_LOCK_FAILED;
    464         pMemDesc->release();
    465     }
    466 #endif
    467     return rc;
    468 }
    469 
    470 
    471 int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
    472 {
    473     return rtR0MemObjNativeLock(ppMem, pv, cb, (task_t)R0Process);
     495            break;
     496        }
     497
     498        /* next */
     499        cbTotal += cbCur;
     500        pb      += cbCur;
     501    }
     502    if (RT_SUCCESS(rc))
     503    {
     504        Assert(pMemNt->cMdls == cMdls);
     505        pMemNt->Core.u.Lock.R0Process = R0Process;
     506        *ppMem = &pMemNt->Core;
     507        return rc;
     508    }
     509
     510    /*
     511     * We failed, perform cleanups.
     512     */
     513    while (iMdl-- > 0)
     514    {
     515        MmUnlockPages(pMemNt->apMdl[iMdl]);
     516        IoFreeMdl(pMemNt->apMdl[iMdl]);
     517        pMemNt->apMdl[iMdl] = NULL;
     518    }
     519    rtR0MemObjDelete(pMemNt);
     520    return SUPDRV_ERR_LOCK_FAILED;
     521}
     522
     523
     524int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
     525{
     526    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
     527    /* ( Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
     528    return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
    474529}
    475530
     
    477532int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
    478533{
    479     return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
     534    return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, NIL_RTR0PROCESS);
    480535}
    481536
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette