VirtualBox

Changeset 16329 in vbox


Ignore:
Timestamp:
Jan 28, 2009 8:20:33 PM (16 years ago)
Author:
vboxsync
Message:

memobj-r0drv-darwin.cpp: A few APIs was retired or deprecated in 10.6/AMD64, simplified the allocators to all use IOBufferMemoryDescriptor::inTaskWithPhysicalMask() in various ways.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp

    r15835 r16329  
    9292        case RTR0MEMOBJTYPE_LOW:
    9393        case RTR0MEMOBJTYPE_PAGE:
    94             IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
    95             break;
    96 
    9794        case RTR0MEMOBJTYPE_CONT:
    98             IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
    9995            break;
    10096
     
    123119            AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
    124120            return VERR_INTERNAL_ERROR;
    125             break;
    126121
    127122        case RTR0MEMOBJTYPE_RES_VIRT:
    128123            AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
    129124            return VERR_INTERNAL_ERROR;
    130             break;
    131125
    132126        case RTR0MEMOBJTYPE_MAPPING:
     
    143137
    144138
    145 int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    146 {
    147     /*
    148      * Try allocate the memory and create it's IOMemoryDescriptor first.
    149      */
    150     int rc = VERR_NO_PAGE_MEMORY;
    151     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    152     void *pv = IOMallocAligned(cb, PAGE_SIZE);
    153     if (pv)
    154     {
    155         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    156         if (pMemDesc)
    157         {
    158             /*
    159              * Create the IPRT memory object.
    160              */
    161             PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb);
    162             if (pMemDarwin)
     139
     140/**
     141 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
     142 *
     143 * @returns IPRT status code.
     144 * @retval  VERR_ADDRESS_TOO_BIG try another way.
     145 *
     146 * @param   ppMem           Where to return the memory object.
     147 * @param   cb              The page aligned memory size.
     148 * @param   fExecutable     Whether the mapping needs to be executable.
     149 * @param   fContiguous     Whether the backing memory needs to be contiguous.
     150 * @param   PhysMask        The mask for the backing memory (i.e. range). Use 0 if
     151 *                          you don't care that much or is speculating.
     152 * @param   MaxPhysAddr     The max address to verify the result against. Use
     153 *                          UINT64_MAX if it doesn't matter.
     154 * @param   enmType         The object type.
     155 */
     156static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
     157                                       bool fExecutable, bool fContiguous,
     158                                       mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
     159                                       RTR0MEMOBJTYPE enmType)
     160{
     161    /*
     162     * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
     163     * actually respects the physical memory mask (10.5.x is certainly busted),
     164     * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
     165     *
     166     * The kIOMemorySharingTypeMask flag just forces the result to be page aligned.
     167     */
     168    int rc;
     169    IOBufferMemoryDescriptor *pMemDesc =
     170        IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
     171                                                           kIOMemorySharingTypeMask
     172                                                         | kIODirectionInOut
     173                                                         | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
     174                                                         cb,
     175                                                         PhysMask);
     176    if (pMemDesc)
     177    {
     178        IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
     179        if (IORet == kIOReturnSuccess)
     180        {
     181            void *pv = pMemDesc->getBytesNoCopy(0, cb);
     182            if (pv)
    163183            {
    164                 pMemDarwin->pMemDesc = pMemDesc;
    165                 *ppMem = &pMemDarwin->Core;
    166                 return VINF_SUCCESS;
    167             }
    168 
    169             rc = VERR_NO_MEMORY;
    170             pMemDesc->release();
    171         }
    172         else
    173             rc = VERR_MEMOBJ_INIT_FAILED;
    174         IOFreeAligned(pv, cb);
    175     }
    176     return rc;
    177 }
    178 
    179 
    180 int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    181 {
    182 #if 1
    183     /*
    184      * Allocating 128KB continguous memory for the low page pool can bit a bit
    185      * exhausting on the kernel, it frequently causes the entire box to lock
    186      * up on startup.
    187      *
    188      * So, try allocate the memory using IOMallocAligned first and if we get any high
    189      * physical memory we'll release it and fall back on IOMAllocContiguous.
    190      */
    191     int rc = VERR_NO_PAGE_MEMORY;
    192     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    193     void *pv = IOMallocAligned(cb, PAGE_SIZE);
    194     if (pv)
    195     {
    196         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    197         if (pMemDesc)
    198         {
    199             /*
    200              * Check if it's all below 4GB.
    201              */
    202             for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
    203             {
    204                 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
    205                 if (Addr > (uint32_t)(_4G - PAGE_SIZE))
     184                /*
     185                 * Check if it's all below 4GB.
     186                 */
     187                addr64_t AddrPrev = 0;
     188                MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
     189                for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
    206190                {
    207                     /* Ok, we failed, fall back on contiguous allocation. */
    208                     pMemDesc->release();
    209                     IOFreeAligned(pv, cb);
    210                     return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
     191#ifdef __LP64__ /* Grumble! */
     192                    addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
     193#else
     194                    addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
     195#endif
     196                    if (    Addr > MaxPhysAddr
     197                        ||  !Addr
     198                        || (Addr & PAGE_OFFSET_MASK)
     199                        ||  (   fContiguous
     200                             && !off
     201                             && Addr == AddrPrev + PAGE_SIZE))
     202                    {
     203                        /* Buggy API, try allocate the memory another way. */
     204                        pMemDesc->release();
     205                        if (PhysMask)
     206                            LogAlways(("rtR0MemObjNativeAllocLow: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
     207                                       off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
     208                        return VERR_ADDRESS_TOO_BIG;
     209                    }
     210                    AddrPrev = Addr;
    211211                }
    212             }
    213 
    214             /*
    215              * Create the IPRT memory object.
    216              */
    217             PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOW, pv, cb);
    218             if (pMemDarwin)
    219             {
    220                 pMemDarwin->pMemDesc = pMemDesc;
    221                 *ppMem = &pMemDarwin->Core;
    222                 return VINF_SUCCESS;
    223             }
    224 
    225             rc = VERR_NO_MEMORY;
    226             pMemDesc->release();
    227         }
    228         else
    229             rc = VERR_MEMOBJ_INIT_FAILED;
    230         IOFreeAligned(pv, cb);
    231     }
    232     return rc;
    233 
    234 #else
    235 
    236     /*
    237      * IOMallocContiguous is the most suitable API.
    238      */
    239     return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
    240 #endif
    241 }
    242 
    243 
    244 int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
    245 {
    246     /*
    247      * Try allocate the memory and create it's IOMemoryDescriptor first.
    248      */
    249     int rc = VERR_NO_CONT_MEMORY;
    250     AssertCompile(sizeof(IOPhysicalAddress) == 4);
    251 
    252     /// @todo
    253     // Use IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryKernelUserShared | kIODirectionInOut,
    254     //                                                      cb, (_4G - 1) ^ PAGE_OFFSET_MASK);
    255 #if 1 /* seems to work fine for cb == PAGE_SIZE, the other variant doesn't. */
    256     IOPhysicalAddress PhysAddrIgnored = 0;
    257     void *pv = IOMallocContiguous(cb, PAGE_SIZE, &PhysAddrIgnored);
    258 #else
    259     void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL);
    260 #endif
    261     if (pv)
    262     {
    263         IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
    264         if (pMemDesc)
    265         {
    266             /* a bit of useful paranoia. */
    267             addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL);
    268             Assert(PhysAddr == pMemDesc->getPhysicalAddress());
    269             if (    PhysAddr > 0
    270                 &&  PhysAddr <= _4G
    271                 &&  PhysAddr + cb <= _4G)
    272             {
     212
    273213                /*
    274214                 * Create the IPRT memory object.
    275215                 */
    276                 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb);
     216                PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
    277217                if (pMemDarwin)
    278218                {
    279                     pMemDarwin->Core.u.Cont.Phys = PhysAddr;
     219                    if (fContiguous)
     220                    {
     221#ifdef __LP64__ /* Grumble! */
     222                        addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
     223#else
     224                        addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
     225#endif
     226                        RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
     227                        if (enmType == RTR0MEMOBJTYPE_CONT)
     228                            pMemDarwin->Core.u.Cont.Phys = PhysBase;
     229                        else if (enmType == RTR0MEMOBJTYPE_PHYS)
     230                            pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
     231                        else
     232                            AssertMsgFailed(("enmType=%d\n", enmType));
     233                    }
     234
    280235                    pMemDarwin->pMemDesc = pMemDesc;
    281236                    *ppMem = &pMemDarwin->Core;
     
    286241            }
    287242            else
    288             {
    289                 printf("rtR0MemObjNativeAllocCont: PhysAddr=%llx cb=%#x\n", (unsigned long long)PhysAddr, cb);
    290                 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr));
    291                 rc = VERR_INTERNAL_ERROR;
    292             }
    293             pMemDesc->release();
     243                rc = VERR_MEMOBJ_INIT_FAILED;
    294244        }
    295245        else
    296             rc = VERR_MEMOBJ_INIT_FAILED;
    297         IOFreeContiguous(pv, cb);
    298     }
    299 
    300     /*
    301      * Workaround for odd IOMallocContiguous behavior, just in case.
    302      */
    303     if (rc == VERR_INTERNAL_ERROR && cb <= PAGE_SIZE)
    304         rc = rtR0MemObjNativeAllocCont(ppMem, cb + PAGE_SIZE, fExecutable);
     246            rc = RTErrConvertFromDarwinIO(IORet);
     247        pMemDesc->release();
     248    }
     249    else
     250        rc = VERR_MEMOBJ_INIT_FAILED;
     251    Assert(rc != VERR_ADDRESS_TOO_BIG);
     252    return rc;
     253}
     254
     255
     256int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
     257{
     258    return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
     259                                       0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
     260}
     261
     262
     263int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
     264{
     265    /*
     266     * Try IOMallocPhysical/IOMallocAligned first.
     267     * Then try optimistically without a physical address mask, which will always
     268     * end up using IOMallocAligned.
     269     *
     270     * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
     271     */
     272    int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
     273                                         ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
     274    if (rc == VERR_ADDRESS_TOO_BIG)
     275        rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
     276                                         0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
     277    return rc;
     278}
     279
     280
     281int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
     282{
     283    int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
     284                                         ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
     285                                         RTR0MEMOBJTYPE_CONT);
     286
     287    /*
     288     * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
     289     * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
     290     */
     291    if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
     292        rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
     293                                         ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
     294                                         RTR0MEMOBJTYPE_CONT);
    305295    return rc;
    306296}
     
    309299int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
    310300{
    311 #if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */
    312     /*
    313      * Try allocate the memory and create it's IOMemoryDescriptor first.
    314      * Note that IOMallocPhysical is not working correctly (it's ignoring the mask).
    315      */
    316 
    317     /* first calc the mask (in the hope that it'll be used) */
    318     IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
    319     if (PhysHighest != NIL_RTHCPHYS)
    320     {
    321         PhysMask = ~(IOPhysicalAddress)0;
    322         while (PhysMask > PhysHighest)
     301    /*
     302     * Translate the PhysHighest address into a mask.
     303     */
     304    int rc;
     305    if (PhysHighest == NIL_RTHCPHYS)
     306        rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
     307                                         0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
     308    else
     309    {
     310        mach_vm_address_t PhysMask = 0;
     311        PhysMask = ~(mach_vm_address_t)0;
     312        while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
    323313            PhysMask >>= 1;
    324         AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER);
    325         PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
    326     }
    327 
    328     /* try allocate physical memory. */
    329     int rc = VERR_NO_PHYS_MEMORY;
    330     mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask);
    331     if (PhysAddr64)
    332     {
    333         IOPhysicalAddress PhysAddr = PhysAddr64;
    334         if (    PhysAddr == PhysAddr64
    335             &&  PhysAddr < PhysHighest
    336             &&  PhysAddr + cb <= PhysHighest)
    337         {
    338             /* create a descriptor. */
    339             IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
    340             if (pMemDesc)
    341             {
    342                 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
    343 
    344                 /*
    345                  * Create the IPRT memory object.
    346                  */
    347                 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    348                 if (pMemDarwin)
    349                 {
    350                     pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
    351                     pMemDarwin->Core.u.Phys.fAllocated = true;
    352                     pMemDarwin->pMemDesc = pMemDesc;
    353                     *ppMem = &pMemDarwin->Core;
    354                     return VINF_SUCCESS;
    355                 }
    356 
    357                 rc = VERR_NO_MEMORY;
    358                 pMemDesc->release();
    359             }
    360             else
    361                 rc = VERR_MEMOBJ_INIT_FAILED;
    362         }
    363         else
    364         {
    365             AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr,
    366                              (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest));
    367             rc = VERR_INTERNAL_ERROR;
    368         }
    369 
    370         IOFreePhysical(PhysAddr64, cb);
    371     }
    372 
    373     /*
    374      * Just in case IOMallocContiguous doesn't work right, we can try fall back
    375      * on a contiguous allcation.
    376      */
    377     if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY)
    378     {
    379         int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false);
    380         if (RT_SUCCESS(rc2))
    381             rc = rc2;
    382     }
    383 
    384     return rc;
    385 
    386 #else
    387 
    388     return rtR0MemObjNativeAllocCont(ppMem, cb, false);
    389 #endif
     314        AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
     315        PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
     316
     317        rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
     318                                         PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
     319    }
     320    return rc;
    390321}
    391322
     
    409340     */
    410341    int rc = VERR_ADDRESS_TOO_BIG;
    411     IOAddressRange aRanges[1] = { Phys, cb };
     342    IOAddressRange aRanges[1] = { { Phys, cb } };
    412343    if (    aRanges[0].address == Phys
    413344        &&  aRanges[0].length == cb)
     
    493424     */
    494425    int rc = VERR_MEMOBJ_INIT_FAILED;
    495     IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, Task);
     426    IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
    496427    if (pMemDesc)
    497428    {
     
    550481                              unsigned fProt, size_t offSub, size_t cbSub)
    551482{
     483    AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
     484
    552485    /*
    553486     * Must have a memory descriptor.
     
    557490    if (pMemToMapDarwin->pMemDesc)
    558491    {
    559         IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere,
     492#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
     493        IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
     494                                                                              0,
     495                                                                              kIOMapAnywhere | kIOMapDefaultCache,
     496                                                                              offSub,
     497                                                                              cbSub);
     498#else
     499        IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, 0,
    560500                                                              kIOMapAnywhere | kIOMapDefaultCache,
    561501                                                              offSub, cbSub);
     502#endif
    562503        if (pMemMap)
    563504        {
     
    594535int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
    595536{
     537    AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
     538
    596539    /*
    597540     * Must have a memory descriptor.
     
    601544    if (pMemToMapDarwin->pMemDesc)
    602545    {
    603         IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, kIOMapAnywhere,
     546#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
     547        IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
     548                                                                              0,
     549                                                                              kIOMapAnywhere | kIOMapDefaultCache,
     550                                                                              0 /* offset */,
     551                                                                              0 /* length */);
     552#else
     553        IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, 0,
    604554                                                              kIOMapAnywhere | kIOMapDefaultCache);
     555#endif
    605556        if (pMemMap)
    606557        {
     
    699650         * If we've got a memory descriptor, use getPhysicalSegment64().
    700651         */
     652#ifdef __LP64__ /* Grumble! */
     653        addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
     654#else
    701655        addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
     656#endif
    702657        AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
    703658        PhysAddr = Addr;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette