VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 394

Last change on this file since 394 was 394, checked in by vboxsync, 18 years ago

Use the vm_map_wire mode as it appears to be kinder to heap consumption.

  • Property svn:keywords set to Id
File size: 19.9 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 394 2007-01-28 00:02:50Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include "the-darwin-kernel.h"
27
28#include <iprt/memobj.h>
29#include <iprt/alloc.h>
30#include <iprt/assert.h>
31#include <iprt/log.h>
32#include <iprt/param.h>
33#include <iprt/string.h>
34#include <iprt/process.h>
35#include "internal/memobj.h"
36
37#define USE_VM_MAP_WIRE
38
39
40/*******************************************************************************
41* Structures and Typedefs *
42*******************************************************************************/
43/**
44 * The Darwin version of the memory object structure.
45 */
46typedef struct RTR0MEMOBJDARWIN
47{
48 /** The core structure. */
49 RTR0MEMOBJINTERNAL Core;
50 /** Pointer to the memory descriptor created for allocated and locked memory. */
51 IOMemoryDescriptor *pMemDesc;
52 /** Pointer to the memory mapping object for mapped memory. */
53 IOMemoryMap *pMemMap;
54} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
55
56
57int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
58{
59 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
60
61 /*
62 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object.
63 */
64 if (pMemDarwin->pMemDesc)
65 {
66 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
67 pMemDarwin->pMemDesc->complete(); /* paranoia */
68 pMemDarwin->pMemDesc->release();
69 pMemDarwin->pMemDesc = NULL;
70 Assert(!pMemDarwin->pMemMap);
71 }
72 else if (pMemDarwin->pMemMap)
73 {
74 pMemDarwin->pMemMap->release();
75 pMemDarwin->pMemMap = NULL;
76 }
77
78 /*
79 * Release any memory that we've allocated or locked.
80 */
81 switch (pMemDarwin->Core.enmType)
82 {
83 case RTR0MEMOBJTYPE_PAGE:
84 IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
85 break;
86
87 /*case RTR0MEMOBJTYPE_LOW: => RTR0MEMOBJTYPE_CONT
88 break;*/
89
90 case RTR0MEMOBJTYPE_CONT:
91 IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
92 break;
93
94 case RTR0MEMOBJTYPE_LOCK:
95 {
96#ifdef USE_VM_MAP_WIRE
97 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
98 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
99 : kernel_map;
100 kern_return_t kr = vm_map_unwire(Map,
101 (vm_map_offset_t)pMemDarwin->Core.pv,
102 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
103 0 /* not user */);
104 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
105#endif
106 break;
107 }
108
109 case RTR0MEMOBJTYPE_PHYS:
110 /*if (pMemDarwin->Core.u.Phys.fAllocated)
111 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
112 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
113 break;
114
115 case RTR0MEMOBJTYPE_RES_VIRT:
116 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
117 return VERR_INTERNAL_ERROR;
118 break;
119
120 case RTR0MEMOBJTYPE_MAPPING:
121 /* nothing to do here. */
122 break;
123
124 default:
125 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
126 return VERR_INTERNAL_ERROR;
127 }
128
129 return VINF_SUCCESS;
130}
131
132
133int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
134{
135 /*
136 * Try allocate the memory and create it's IOMemoryDescriptor first.
137 */
138 int rc = VERR_NO_PAGE_MEMORY;
139 AssertCompile(sizeof(IOPhysicalAddress) == 4);
140 void *pv = IOMallocAligned(cb, PAGE_SIZE);
141 if (pv)
142 {
143 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
144 if (pMemDesc)
145 {
146 /*
147 * Create the IPRT memory object.
148 */
149 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb);
150 if (pMemDarwin)
151 {
152 pMemDarwin->pMemDesc = pMemDesc;
153 *ppMem = &pMemDarwin->Core;
154 return VINF_SUCCESS;
155 }
156
157 rc = VERR_NO_MEMORY;
158 pMemDesc->release();
159 }
160 else
161 rc = VERR_MEMOBJ_INIT_FAILED;
162 IOFreeAligned(pv, cb);
163 }
164 return rc;
165}
166
167
168int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
169{
170 /*
171 * IOMallocContiguous is the most suitable API.
172 */
173 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
174}
175
176
177int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
178{
179 /*
180 * Try allocate the memory and create it's IOMemoryDescriptor first.
181 */
182 int rc = VERR_NO_CONT_MEMORY;
183 AssertCompile(sizeof(IOPhysicalAddress) == 4);
184 void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL);
185 if (pv)
186 {
187 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
188 if (pMemDesc)
189 {
190 /* a bit of useful paranoia. */
191 addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL);
192 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
193 if ( PhysAddr > 0
194 && PhysAddr <= _4G
195 && PhysAddr + cb <= _4G)
196 {
197 /*
198 * Create the IPRT memory object.
199 */
200 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb);
201 if (pMemDarwin)
202 {
203 pMemDarwin->Core.u.Cont.Phys = PhysAddr;
204 pMemDarwin->pMemDesc = pMemDesc;
205 *ppMem = &pMemDarwin->Core;
206 return VINF_SUCCESS;
207 }
208
209 rc = VERR_NO_MEMORY;
210 }
211 else
212 {
213 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr));
214 rc = VERR_INTERNAL_ERROR;
215 }
216 pMemDesc->release();
217 }
218 else
219 rc = VERR_MEMOBJ_INIT_FAILED;
220 IOFreeContiguous(pv, cb);
221 }
222 return rc;
223}
224
225
226int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
227{
228#if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */
229 /*
230 * Try allocate the memory and create it's IOMemoryDescriptor first.
231 * Note that IOMallocPhysical is not working correctly (it's ignoring the mask).
232 */
233
234 /* first calc the mask (in the hope that it'll be used) */
235 IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
236 if (PhysHighest != NIL_RTHCPHYS)
237 {
238 PhysMask = ~(IOPhysicalAddress)0;
239 while (PhysMask > PhysHighest)
240 PhysMask >>= 1;
241 AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER);
242 PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
243 }
244
245 /* try allocate physical memory. */
246 int rc = VERR_NO_PHYS_MEMORY;
247 mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask);
248 if (PhysAddr64)
249 {
250 IOPhysicalAddress PhysAddr = PhysAddr64;
251 if ( PhysAddr == PhysAddr64
252 && PhysAddr < PhysHighest
253 && PhysAddr + cb <= PhysHighest)
254 {
255 /* create a descriptor. */
256 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
257 if (pMemDesc)
258 {
259 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
260
261 /*
262 * Create the IPRT memory object.
263 */
264 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
265 if (pMemDarwin)
266 {
267 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
268 pMemDarwin->Core.u.Phys.fAllocated = true;
269 pMemDarwin->pMemDesc = pMemDesc;
270 *ppMem = &pMemDarwin->Core;
271 return VINF_SUCCESS;
272 }
273
274 rc = VERR_NO_MEMORY;
275 pMemDesc->release();
276 }
277 else
278 rc = VERR_MEMOBJ_INIT_FAILED;
279 }
280 else
281 {
282 AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr,
283 (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest));
284 rc = VERR_INTERNAL_ERROR;
285 }
286
287 IOFreePhysical(PhysAddr64, cb);
288 }
289
290 /*
291 * Just in case IOMallocContigus doesn't work right, we can try fall back
292 * on a contiguous allcation.
293 */
294 if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY)
295 {
296 int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false);
297 if (RT_SUCCESS(rc2))
298 rc = rc2;
299 }
300
301 return rc;
302
303#else
304
305 return rtR0MemObjNativeAllocCont(ppMem, cb, false);
306#endif
307}
308
309
310int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
311{
312 /*
313 * Validate the address range and create a descriptor for it.
314 */
315 int rc = VERR_ADDRESS_TOO_BIG;
316 IOPhysicalAddress PhysAddr = Phys;
317 if (PhysAddr == Phys)
318 {
319 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
320 if (pMemDesc)
321 {
322 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
323
324 /*
325 * Create the IPRT memory object.
326 */
327 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
328 if (pMemDarwin)
329 {
330 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
331 pMemDarwin->Core.u.Phys.fAllocated = false;
332 pMemDarwin->pMemDesc = pMemDesc;
333 *ppMem = &pMemDarwin->Core;
334 return VINF_SUCCESS;
335 }
336
337 rc = VERR_NO_MEMORY;
338 pMemDesc->release();
339 }
340 }
341 else
342 AssertMsgFailed(("%#llx\n", (unsigned long long)Phys));
343 return rc;
344}
345
346
347/**
348 * Internal worker for locking down pages.
349 *
350 * @return IPRT status code.
351 *
352 * @param ppMem Where to store the memory object pointer.
353 * @param pv First page.
354 * @param cb Number of bytes.
355 * @param Task The task \a pv and \a cb refers to.
356 */
357static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
358{
359#ifdef USE_VM_MAP_WIRE
360 vm_map_t Map = get_task_map(Task);
361 Assert(Map);
362
363 /*
364 * First try lock the memory.
365 */
366 int rc = VERR_LOCK_FAILED;
367 kern_return_t kr = vm_map_wire(get_task_map(Task),
368 (vm_map_offset_t)pv,
369 (vm_map_offset_t)pv + cb,
370 VM_PROT_DEFAULT,
371 0 /* not user */);
372 if (kr == KERN_SUCCESS)
373 {
374 /*
375 * Create the IPRT memory object.
376 */
377 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
378 if (pMemDarwin)
379 {
380 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
381 *ppMem = &pMemDarwin->Core;
382 return VINF_SUCCESS;
383 }
384
385 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
386 Assert(kr == KERN_SUCCESS);
387 rc = VERR_NO_MEMORY;
388 }
389
390#else
391
392 /*
393 * Create a descriptor and try lock it (prepare).
394 */
395 int rc = VERR_MEMOBJ_INIT_FAILED;
396 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, Task);
397 if (pMemDesc)
398 {
399 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
400 if (IORet == kIOReturnSuccess)
401 {
402 /*
403 * Create the IPRT memory object.
404 */
405 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
406 if (pMemDarwin)
407 {
408 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
409 pMemDarwin->pMemDesc = pMemDesc;
410 *ppMem = &pMemDarwin->Core;
411 return VINF_SUCCESS;
412 }
413
414 pMemDesc->complete();
415 rc = VERR_NO_MEMORY;
416 }
417 else
418 rc = VERR_LOCK_FAILED;
419 pMemDesc->release();
420 }
421#endif
422 return rc;
423}
424
425
426int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
427{
428 return rtR0MemObjNativeLock(ppMem, pv, cb, (task_t)R0Process);
429}
430
431
432int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
433{
434 return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
435}
436
437
438int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
439{
440 return VERR_NOT_IMPLEMENTED;
441}
442
443
444int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
445{
446 return VERR_NOT_IMPLEMENTED;
447}
448
449
450int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
451{
452 /*
453 * Must have a memory descriptor.
454 */
455 int rc = VERR_INVALID_PARAMETER;
456 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
457 if (pMemToMapDarwin->pMemDesc)
458 {
459 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere,
460 kIOMapAnywhere | kIOMapDefaultCache);
461 if (pMemMap)
462 {
463 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
464 void *pv = (void *)(uintptr_t)VirtAddr;
465 if ((uintptr_t)pv == VirtAddr)
466 {
467 /*
468 * Create the IPRT memory object.
469 */
470 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
471 pv, pMemToMapDarwin->Core.cb);
472 if (pMemDarwin)
473 {
474 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
475 pMemDarwin->pMemMap = pMemMap;
476 *ppMem = &pMemDarwin->Core;
477 return VINF_SUCCESS;
478 }
479
480 rc = VERR_NO_MEMORY;
481 }
482 else
483 rc = VERR_ADDRESS_TOO_BIG;
484 pMemMap->release();
485 }
486 else
487 rc = VERR_MAP_FAILED;
488 }
489 return rc;
490}
491
492
493int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
494{
495 /*
496 * Must have a memory descriptor.
497 */
498 int rc = VERR_INVALID_PARAMETER;
499 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
500 if (pMemToMapDarwin->pMemDesc)
501 {
502 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, kIOMapAnywhere,
503 kIOMapAnywhere | kIOMapDefaultCache);
504 if (pMemMap)
505 {
506 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
507 void *pv = (void *)(uintptr_t)VirtAddr;
508 if ((uintptr_t)pv == VirtAddr)
509 {
510 /*
511 * Create the IPRT memory object.
512 */
513 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
514 pv, pMemToMapDarwin->Core.cb);
515 if (pMemDarwin)
516 {
517 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
518 pMemDarwin->pMemMap = pMemMap;
519 *ppMem = &pMemDarwin->Core;
520 return VINF_SUCCESS;
521 }
522
523 rc = VERR_NO_MEMORY;
524 }
525 else
526 rc = VERR_ADDRESS_TOO_BIG;
527 pMemMap->release();
528 }
529 else
530 rc = VERR_MAP_FAILED;
531 }
532 return rc;
533}
534
535
536RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage)
537{
538 RTHCPHYS PhysAddr;
539 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
540
541#ifdef USE_VM_MAP_WIRE
542 /*
543 * Locked memory doesn't have a memory descriptor and
544 * needs to be handled differently.
545 */
546 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
547 {
548 ppnum_t PgNo;
549 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
550 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
551 else
552 {
553 /*
554 * From what I can tell, Apple seems to have locked up the all the
555 * available interfaces that could help us obtain the pmap_t of a task
556 * or vm_map_t.
557
558 * So, we'll have to figure out where in the vm_map_t structure it is
559 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
560 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
561 * Not nice, but it will hopefully do the job in a reliable manner...
562 *
563 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
564 */
565 static int s_offPmap = -1;
566 if (RT_UNLIKELY(s_offPmap == -1))
567 {
568 pmap_t const *p = (pmap_t *)kernel_map;
569 pmap_t const * const pEnd = p + 64;
570 for (; p < pEnd; p++)
571 if (*p == kernel_pmap)
572 {
573 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
574 break;
575 }
576 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
577 }
578 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
579 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
580 }
581
582 AssertReturn(PgNo, NIL_RTHCPHYS);
583 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
584 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
585 }
586 else
587#endif /* USE_VM_MAP_WIRE */
588 {
589 /*
590 * Get the memory descriptor.
591 */
592 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
593 if (!pMemDesc)
594 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
595 AssertReturn(pMemDesc, NIL_RTHCPHYS);
596
597 /*
598 * If we've got a memory descriptor, use getPhysicalSegment64().
599 */
600 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
601 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
602 PhysAddr = Addr;
603 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
604 }
605
606 return PhysAddr;
607}
608
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette