VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 377

Last change on this file since 377 was 377, checked in by vboxsync, 18 years ago

vm_map_wire/vm_map_unwire (alternative that didn't make any difference for the deadlock).

  • Property svn:keywords set to Id
File size: 19.9 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 377 2007-01-27 03:48:18Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include "the-darwin-kernel.h"
27
28#include <iprt/memobj.h>
29#include <iprt/alloc.h>
30#include <iprt/assert.h>
31#include <iprt/log.h>
32#include <iprt/param.h>
33#include <iprt/string.h>
34#include "internal/memobj.h"
35
36/*#define USE_VM_MAP_WIRE*/
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42/**
43 * The Darwin version of the memory object structure.
44 */
45typedef struct RTR0MEMOBJDARWIN
46{
47 /** The core structure. */
48 RTR0MEMOBJINTERNAL Core;
49 /** Pointer to the memory descriptor created for allocated and locked memory. */
50 IOMemoryDescriptor *pMemDesc;
51 /** Pointer to the memory mapping object for mapped memory. */
52 IOMemoryMap *pMemMap;
53} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
54
55
56int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
57{
58 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
59
60 /*
61 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object.
62 */
63 if (pMemDarwin->pMemDesc)
64 {
65 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
66 pMemDarwin->pMemDesc->complete(); /* paranoia */
67 pMemDarwin->pMemDesc->release();
68 pMemDarwin->pMemDesc = NULL;
69 Assert(!pMemDarwin->pMemMap);
70 }
71 else if (pMemDarwin->pMemMap)
72 {
73 pMemDarwin->pMemMap->release();
74 pMemDarwin->pMemMap = NULL;
75 }
76
77 /*
78 * Release any memory that we've allocated or locked.
79 */
80 switch (pMemDarwin->Core.enmType)
81 {
82 case RTR0MEMOBJTYPE_PAGE:
83 IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
84 break;
85
86 /*case RTR0MEMOBJTYPE_LOW: => RTR0MEMOBJTYPE_CONT
87 break;*/
88
89 case RTR0MEMOBJTYPE_CONT:
90 IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
91 break;
92
93 case RTR0MEMOBJTYPE_LOCK:
94 {
95#ifdef USE_VM_MAP_WIRE
96 vm_map_t Map = pMemDarwin->Core.u.Lock.Process != NIL_RTPROCESS
97 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.Process)
98 : kernel_map;
99 kern_return_t kr = vm_map_unwire(Map,
100 (vm_map_offset_t)pMemDarwin->Core.pv,
101 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
102 0 /* not user */);
103 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
104#endif
105 break;
106 }
107
108 case RTR0MEMOBJTYPE_PHYS:
109 /*if (pMemDarwin->Core.u.Phys.fAllocated)
110 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
111 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
112 break;
113
114 case RTR0MEMOBJTYPE_RES_VIRT:
115 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
116 return VERR_INTERNAL_ERROR;
117 break;
118
119 case RTR0MEMOBJTYPE_MAPPING:
120 /* nothing to do here. */
121 break;
122
123 default:
124 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
125 return VERR_INTERNAL_ERROR;
126 }
127
128 return VINF_SUCCESS;
129}
130
131
132int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
133{
134 /*
135 * Try allocate the memory and create it's IOMemoryDescriptor first.
136 */
137 int rc = VERR_NO_PAGE_MEMORY;
138 AssertCompile(sizeof(IOPhysicalAddress) == 4);
139 void *pv = IOMallocAligned(cb, PAGE_SIZE);
140 if (pv)
141 {
142 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
143 if (pMemDesc)
144 {
145 /*
146 * Create the IPRT memory object.
147 */
148 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb);
149 if (pMemDarwin)
150 {
151 pMemDarwin->pMemDesc = pMemDesc;
152 *ppMem = &pMemDarwin->Core;
153 return VINF_SUCCESS;
154 }
155
156 rc = VERR_NO_MEMORY;
157 pMemDesc->release();
158 }
159 else
160 rc = VERR_MEMOBJ_INIT_FAILED;
161 IOFreeAligned(pv, cb);
162 }
163 return rc;
164}
165
166
167int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
168{
169 /*
170 * IOMallocContiguous is the most suitable API.
171 */
172 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
173}
174
175
176int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
177{
178 /*
179 * Try allocate the memory and create it's IOMemoryDescriptor first.
180 */
181 int rc = VERR_NO_CONT_MEMORY;
182 AssertCompile(sizeof(IOPhysicalAddress) == 4);
183 void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL);
184 if (pv)
185 {
186 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
187 if (pMemDesc)
188 {
189 /* a bit of useful paranoia. */
190 addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL);
191 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
192 if ( PhysAddr > 0
193 && PhysAddr <= _4G
194 && PhysAddr + cb <= _4G)
195 {
196 /*
197 * Create the IPRT memory object.
198 */
199 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb);
200 if (pMemDarwin)
201 {
202 pMemDarwin->Core.u.Cont.Phys = PhysAddr;
203 pMemDarwin->pMemDesc = pMemDesc;
204 *ppMem = &pMemDarwin->Core;
205 return VINF_SUCCESS;
206 }
207
208 rc = VERR_NO_MEMORY;
209 }
210 else
211 {
212 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr));
213 rc = VERR_INTERNAL_ERROR;
214 }
215 pMemDesc->release();
216 }
217 else
218 rc = VERR_MEMOBJ_INIT_FAILED;
219 IOFreeContiguous(pv, cb);
220 }
221 return rc;
222}
223
224
225int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
226{
227#if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */
228 /*
229 * Try allocate the memory and create it's IOMemoryDescriptor first.
230 * Note that IOMallocPhysical is not working correctly (it's ignoring the mask).
231 */
232
233 /* first calc the mask (in the hope that it'll be used) */
234 IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
235 if (PhysHighest != NIL_RTHCPHYS)
236 {
237 PhysMask = ~(IOPhysicalAddress)0;
238 while (PhysMask > PhysHighest)
239 PhysMask >>= 1;
240 AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER);
241 PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
242 }
243
244 /* try allocate physical memory. */
245 int rc = VERR_NO_PHYS_MEMORY;
246 mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask);
247 if (PhysAddr64)
248 {
249 IOPhysicalAddress PhysAddr = PhysAddr64;
250 if ( PhysAddr == PhysAddr64
251 && PhysAddr < PhysHighest
252 && PhysAddr + cb <= PhysHighest)
253 {
254 /* create a descriptor. */
255 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
256 if (pMemDesc)
257 {
258 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
259
260 /*
261 * Create the IPRT memory object.
262 */
263 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
264 if (pMemDarwin)
265 {
266 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
267 pMemDarwin->Core.u.Phys.fAllocated = true;
268 pMemDarwin->pMemDesc = pMemDesc;
269 *ppMem = &pMemDarwin->Core;
270 return VINF_SUCCESS;
271 }
272
273 rc = VERR_NO_MEMORY;
274 pMemDesc->release();
275 }
276 else
277 rc = VERR_MEMOBJ_INIT_FAILED;
278 }
279 else
280 {
281 AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr,
282 (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest));
283 rc = VERR_INTERNAL_ERROR;
284 }
285
286 IOFreePhysical(PhysAddr64, cb);
287 }
288
289 /*
290 * Just in case IOMallocContigus doesn't work right, we can try fall back
291 * on a contiguous allcation.
292 */
293 if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY)
294 {
295 int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false);
296 if (RT_SUCCESS(rc2))
297 rc = rc2;
298 }
299
300 return rc;
301
302#else
303
304 return rtR0MemObjNativeAllocCont(ppMem, cb, false);
305#endif
306}
307
308
309int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
310{
311 /*
312 * Validate the address range and create a descriptor for it.
313 */
314 int rc = VERR_ADDRESS_TOO_BIG;
315 IOPhysicalAddress PhysAddr = Phys;
316 if (PhysAddr == Phys)
317 {
318 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
319 if (pMemDesc)
320 {
321 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
322
323 /*
324 * Create the IPRT memory object.
325 */
326 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
327 if (pMemDarwin)
328 {
329 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
330 pMemDarwin->Core.u.Phys.fAllocated = false;
331 pMemDarwin->pMemDesc = pMemDesc;
332 *ppMem = &pMemDarwin->Core;
333 return VINF_SUCCESS;
334 }
335
336 rc = VERR_NO_MEMORY;
337 pMemDesc->release();
338 }
339 }
340 else
341 AssertMsgFailed(("%#llx\n", (unsigned long long)Phys));
342 return rc;
343}
344
345
346/**
347 * Internal worker for locking down pages.
348 *
349 * @return IPRT status code.
350 *
351 * @param ppMem Where to store the memory object pointer.
352 * @param pv First page.
353 * @param cb Number of bytes.
354 * @param Task The task \a pv and \a cb refers to.
355 */
356static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
357{
358#ifdef USE_VM_MAP_WIRE
359 vm_map_t Map = get_task_map(Task);
360 Assert(Map);
361
362 /*
363 * First try lock the memory.
364 */
365 int rc = VERR_LOCK_FAILED;
366 kern_return_t kr = vm_map_wire(get_task_map(Task),
367 (vm_map_offset_t)pv,
368 (vm_map_offset_t)pv + cb,
369 VM_PROT_DEFAULT,
370 0 /* not user */);
371 if (kr == KERN_SUCCESS)
372 {
373 /*
374 * Create the IPRT memory object.
375 */
376 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
377 if (pMemDarwin)
378 {
379 pMemDarwin->Core.u.Lock.Process = (RTPROCESS)Task;
380 *ppMem = &pMemDarwin->Core;
381 return VINF_SUCCESS;
382 }
383 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
384 Assert(kr == KERN_SUCCESS);
385 rc = VERR_NO_MEMORY;
386 }
387
388#else
389
390 /*
391 * Create a descriptor and try lock it (prepare).
392 */
393 int rc = VERR_MEMOBJ_INIT_FAILED;
394 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, Task);
395 if (pMemDesc)
396 {
397 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
398 if (IORet == kIOReturnSuccess)
399 {
400 /*
401 * Create the IPRT memory object.
402 */
403 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
404 if (pMemDarwin)
405 {
406 pMemDarwin->Core.u.Lock.Process = (RTPROCESS)Task;
407 pMemDarwin->pMemDesc = pMemDesc;
408 *ppMem = &pMemDarwin->Core;
409 return VINF_SUCCESS;
410 }
411
412 pMemDesc->complete();
413 rc = VERR_NO_MEMORY;
414 }
415 else
416 rc = VERR_LOCK_FAILED;
417 pMemDesc->release();
418 }
419#endif
420 return rc;
421}
422
423
424int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
425{
426 return rtR0MemObjNativeLock(ppMem, pv, cb, current_task());
427}
428
429
430int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
431{
432 return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
433}
434
435
436int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
437{
438 return VERR_NOT_IMPLEMENTED;
439}
440
441
442int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
443{
444 return VERR_NOT_IMPLEMENTED;
445}
446
447
448int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
449{
450 /*
451 * Must have a memory descriptor.
452 */
453 int rc = VERR_INVALID_PARAMETER;
454 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
455 if (pMemToMapDarwin->pMemDesc)
456 {
457 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere,
458 kIOMapAnywhere | kIOMapDefaultCache);
459 if (pMemMap)
460 {
461 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
462 void *pv = (void *)(uintptr_t)VirtAddr;
463 if ((uintptr_t)pv == VirtAddr)
464 {
465 /*
466 * Create the IPRT memory object.
467 */
468 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
469 pv, pMemToMapDarwin->Core.cb);
470 if (pMemDarwin)
471 {
472 pMemDarwin->Core.u.Mapping.Process = NIL_RTPROCESS;
473 pMemDarwin->pMemMap = pMemMap;
474 *ppMem = &pMemDarwin->Core;
475 return VINF_SUCCESS;
476 }
477
478 rc = VERR_NO_MEMORY;
479 }
480 else
481 rc = VERR_ADDRESS_TOO_BIG;
482 pMemMap->release();
483 }
484 else
485 rc = VERR_MAP_FAILED;
486 }
487 return rc;
488}
489
490
491int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
492{
493 /*
494 * Must have a memory descriptor.
495 */
496 int rc = VERR_INVALID_PARAMETER;
497 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
498 if (pMemToMapDarwin->pMemDesc)
499 {
500 Assert(current_task() != kernel_task);
501 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(current_task(), kIOMapAnywhere,
502 kIOMapAnywhere | kIOMapDefaultCache);
503 if (pMemMap)
504 {
505 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
506 void *pv = (void *)(uintptr_t)VirtAddr;
507 if ((uintptr_t)pv == VirtAddr)
508 {
509 /*
510 * Create the IPRT memory object.
511 */
512 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
513 pv, pMemToMapDarwin->Core.cb);
514 if (pMemDarwin)
515 {
516 pMemDarwin->Core.u.Mapping.Process = /*RTProcSelf()*/(RTPROCESS)current_task();
517 pMemDarwin->pMemMap = pMemMap;
518 *ppMem = &pMemDarwin->Core;
519 return VINF_SUCCESS;
520 }
521
522 rc = VERR_NO_MEMORY;
523 }
524 else
525 rc = VERR_ADDRESS_TOO_BIG;
526 pMemMap->release();
527 }
528 else
529 rc = VERR_MAP_FAILED;
530 }
531 return rc;
532}
533
534
535RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage)
536{
537 RTHCPHYS PhysAddr;
538 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
539
540#ifdef USE_VM_MAP_WIRE
541 /*
542 * Locked memory doesn't have a memory descriptor and
543 * needs to be handled differently.
544 */
545 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
546 {
547 ppnum_t PgNo;
548 if (pMemDarwin->Core.u.Lock.Process == NIL_RTPROCESS)
549 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
550 else
551 {
552 /*
553 * From what I can tell, Apple seems to have locked up the all the
554 * available interfaces that could help us obtain the pmap_t of a task
555 * or vm_map_t.
556
557 * So, we'll have to figure out where in the vm_map_t structure it is
558 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
559 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
560 * Not nice, but it will hopefully do the job in a reliable manner...
561 *
562 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
563 */
564 static int s_offPmap = -1;
565 if (RT_UNLIKELY(s_offPmap == -1))
566 {
567 pmap_t const *p = (pmap_t *)kernel_map;
568 pmap_t const * const pEnd = p + 64;
569 for (; p < pEnd; p++)
570 if (*p == kernel_pmap)
571 {
572 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
573 break;
574 }
575 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
576 }
577 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.Process) + s_offPmap);
578 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
579 }
580
581 AssertReturn(PgNo, NIL_RTHCPHYS);
582 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
583 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
584 }
585 else
586#endif /* USE_VM_MAP_WIRE */
587 {
588 /*
589 * Get the memory descriptor.
590 */
591 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
592 if (!pMemDesc)
593 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
594 AssertReturn(pMemDesc, NIL_RTHCPHYS);
595
596 /*
597 * If we've got a memory descriptor, use getPhysicalSegment64().
598 */
599 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
600 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
601 PhysAddr = Addr;
602 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
603 }
604
605 return PhysAddr;
606}
607
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette