VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 4781

Last change on this file since 4781 was 4781, checked in by vboxsync, 17 years ago

eol

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.8 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 4781 2007-09-13 19:07:42Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#include "the-darwin-kernel.h"
23
24#include <iprt/memobj.h>
25#include <iprt/alloc.h>
26#include <iprt/assert.h>
27#include <iprt/log.h>
28#include <iprt/param.h>
29#include <iprt/string.h>
30#include <iprt/process.h>
31#include "internal/memobj.h"
32
33#define USE_VM_MAP_WIRE
34
35
36/*******************************************************************************
37* Structures and Typedefs *
38*******************************************************************************/
39/**
40 * The Darwin version of the memory object structure.
41 */
42typedef struct RTR0MEMOBJDARWIN
43{
44 /** The core structure. */
45 RTR0MEMOBJINTERNAL Core;
46 /** Pointer to the memory descriptor created for allocated and locked memory. */
47 IOMemoryDescriptor *pMemDesc;
48 /** Pointer to the memory mapping object for mapped memory. */
49 IOMemoryMap *pMemMap;
50} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
51
52
53int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
54{
55 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
56
57 /*
58 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object.
59 */
60 if (pMemDarwin->pMemDesc)
61 {
62 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
63 pMemDarwin->pMemDesc->complete(); /* paranoia */
64 pMemDarwin->pMemDesc->release();
65 pMemDarwin->pMemDesc = NULL;
66 Assert(!pMemDarwin->pMemMap);
67 }
68 else if (pMemDarwin->pMemMap)
69 {
70 pMemDarwin->pMemMap->release();
71 pMemDarwin->pMemMap = NULL;
72 }
73
74 /*
75 * Release any memory that we've allocated or locked.
76 */
77 switch (pMemDarwin->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_LOW:
80 case RTR0MEMOBJTYPE_PAGE:
81 IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
82 break;
83
84 case RTR0MEMOBJTYPE_CONT:
85 IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb);
86 break;
87
88 case RTR0MEMOBJTYPE_LOCK:
89 {
90#ifdef USE_VM_MAP_WIRE
91 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
92 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
93 : kernel_map;
94 kern_return_t kr = vm_map_unwire(Map,
95 (vm_map_offset_t)pMemDarwin->Core.pv,
96 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
97 0 /* not user */);
98 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
99#endif
100 break;
101 }
102
103 case RTR0MEMOBJTYPE_PHYS:
104 /*if (pMemDarwin->Core.u.Phys.fAllocated)
105 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
106 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
107 break;
108
109 case RTR0MEMOBJTYPE_PHYS_NC:
110 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
111 return VERR_INTERNAL_ERROR;
112 break;
113
114 case RTR0MEMOBJTYPE_RES_VIRT:
115 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
116 return VERR_INTERNAL_ERROR;
117 break;
118
119 case RTR0MEMOBJTYPE_MAPPING:
120 /* nothing to do here. */
121 break;
122
123 default:
124 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
125 return VERR_INTERNAL_ERROR;
126 }
127
128 return VINF_SUCCESS;
129}
130
131
132int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
133{
134 /*
135 * Try allocate the memory and create it's IOMemoryDescriptor first.
136 */
137 int rc = VERR_NO_PAGE_MEMORY;
138 AssertCompile(sizeof(IOPhysicalAddress) == 4);
139 void *pv = IOMallocAligned(cb, PAGE_SIZE);
140 if (pv)
141 {
142 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
143 if (pMemDesc)
144 {
145 /*
146 * Create the IPRT memory object.
147 */
148 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb);
149 if (pMemDarwin)
150 {
151 pMemDarwin->pMemDesc = pMemDesc;
152 *ppMem = &pMemDarwin->Core;
153 return VINF_SUCCESS;
154 }
155
156 rc = VERR_NO_MEMORY;
157 pMemDesc->release();
158 }
159 else
160 rc = VERR_MEMOBJ_INIT_FAILED;
161 IOFreeAligned(pv, cb);
162 }
163 return rc;
164}
165
166
167int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
168{
169#if 1
170 /*
171 * Allocating 128KB for the low page pool can bit a bit exhausting on the kernel,
172 * it frequnetly causes the entire box to lock up on startup.
173 *
174 * So, try allocate the memory using IOMallocAligned first and if we get any high
175 * physical memory we'll release it and fall back on IOMAllocContiguous.
176 */
177 int rc = VERR_NO_PAGE_MEMORY;
178 AssertCompile(sizeof(IOPhysicalAddress) == 4);
179 void *pv = IOMallocAligned(cb, PAGE_SIZE);
180 if (pv)
181 {
182 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
183 if (pMemDesc)
184 {
185 /*
186 * Check if it's all below 4GB.
187 */
188 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
189 {
190 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
191 if (Addr > (uint32_t)(_4G - PAGE_SIZE))
192 {
193 /* Ok, we failed, fall back on contiguous allocation. */
194 pMemDesc->release();
195 IOFreeAligned(pv, cb);
196 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
197 }
198 }
199
200 /*
201 * Create the IPRT memory object.
202 */
203 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOW, pv, cb);
204 if (pMemDarwin)
205 {
206 pMemDarwin->pMemDesc = pMemDesc;
207 *ppMem = &pMemDarwin->Core;
208 return VINF_SUCCESS;
209 }
210
211 rc = VERR_NO_MEMORY;
212 pMemDesc->release();
213 }
214 else
215 rc = VERR_MEMOBJ_INIT_FAILED;
216 IOFreeAligned(pv, cb);
217 }
218 return rc;
219
220#else
221
222 /*
223 * IOMallocContiguous is the most suitable API.
224 */
225 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
226#endif
227}
228
229
230int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
231{
232 /*
233 * Try allocate the memory and create it's IOMemoryDescriptor first.
234 */
235 int rc = VERR_NO_CONT_MEMORY;
236 AssertCompile(sizeof(IOPhysicalAddress) == 4);
237 void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL);
238 if (pv)
239 {
240 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task);
241 if (pMemDesc)
242 {
243 /* a bit of useful paranoia. */
244 addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL);
245 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
246 if ( PhysAddr > 0
247 && PhysAddr <= _4G
248 && PhysAddr + cb <= _4G)
249 {
250 /*
251 * Create the IPRT memory object.
252 */
253 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb);
254 if (pMemDarwin)
255 {
256 pMemDarwin->Core.u.Cont.Phys = PhysAddr;
257 pMemDarwin->pMemDesc = pMemDesc;
258 *ppMem = &pMemDarwin->Core;
259 return VINF_SUCCESS;
260 }
261
262 rc = VERR_NO_MEMORY;
263 }
264 else
265 {
266 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr));
267 rc = VERR_INTERNAL_ERROR;
268 }
269 pMemDesc->release();
270 }
271 else
272 rc = VERR_MEMOBJ_INIT_FAILED;
273 IOFreeContiguous(pv, cb);
274 }
275 return rc;
276}
277
278
279int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
280{
281#if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */
282 /*
283 * Try allocate the memory and create it's IOMemoryDescriptor first.
284 * Note that IOMallocPhysical is not working correctly (it's ignoring the mask).
285 */
286
287 /* first calc the mask (in the hope that it'll be used) */
288 IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
289 if (PhysHighest != NIL_RTHCPHYS)
290 {
291 PhysMask = ~(IOPhysicalAddress)0;
292 while (PhysMask > PhysHighest)
293 PhysMask >>= 1;
294 AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER);
295 PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK;
296 }
297
298 /* try allocate physical memory. */
299 int rc = VERR_NO_PHYS_MEMORY;
300 mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask);
301 if (PhysAddr64)
302 {
303 IOPhysicalAddress PhysAddr = PhysAddr64;
304 if ( PhysAddr == PhysAddr64
305 && PhysAddr < PhysHighest
306 && PhysAddr + cb <= PhysHighest)
307 {
308 /* create a descriptor. */
309 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
310 if (pMemDesc)
311 {
312 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
313
314 /*
315 * Create the IPRT memory object.
316 */
317 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
318 if (pMemDarwin)
319 {
320 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
321 pMemDarwin->Core.u.Phys.fAllocated = true;
322 pMemDarwin->pMemDesc = pMemDesc;
323 *ppMem = &pMemDarwin->Core;
324 return VINF_SUCCESS;
325 }
326
327 rc = VERR_NO_MEMORY;
328 pMemDesc->release();
329 }
330 else
331 rc = VERR_MEMOBJ_INIT_FAILED;
332 }
333 else
334 {
335 AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr,
336 (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest));
337 rc = VERR_INTERNAL_ERROR;
338 }
339
340 IOFreePhysical(PhysAddr64, cb);
341 }
342
343 /*
344 * Just in case IOMallocContiguous doesn't work right, we can try fall back
345 * on a contiguous allcation.
346 */
347 if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY)
348 {
349 int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false);
350 if (RT_SUCCESS(rc2))
351 rc = rc2;
352 }
353
354 return rc;
355
356#else
357
358 return rtR0MemObjNativeAllocCont(ppMem, cb, false);
359#endif
360}
361
362
363int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
364{
365 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
366 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest);
367}
368
369
370int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
371{
372 /*
373 * Validate the address range and create a descriptor for it.
374 */
375 int rc = VERR_ADDRESS_TOO_BIG;
376 IOPhysicalAddress PhysAddr = Phys;
377 if (PhysAddr == Phys)
378 {
379 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut);
380 if (pMemDesc)
381 {
382 Assert(PhysAddr == pMemDesc->getPhysicalAddress());
383
384 /*
385 * Create the IPRT memory object.
386 */
387 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
388 if (pMemDarwin)
389 {
390 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr;
391 pMemDarwin->Core.u.Phys.fAllocated = false;
392 pMemDarwin->pMemDesc = pMemDesc;
393 *ppMem = &pMemDarwin->Core;
394 return VINF_SUCCESS;
395 }
396
397 rc = VERR_NO_MEMORY;
398 pMemDesc->release();
399 }
400 }
401 else
402 AssertMsgFailed(("%#llx\n", (unsigned long long)Phys));
403 return rc;
404}
405
406
407/**
408 * Internal worker for locking down pages.
409 *
410 * @return IPRT status code.
411 *
412 * @param ppMem Where to store the memory object pointer.
413 * @param pv First page.
414 * @param cb Number of bytes.
415 * @param Task The task \a pv and \a cb refers to.
416 */
417static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
418{
419#ifdef USE_VM_MAP_WIRE
420 vm_map_t Map = get_task_map(Task);
421 Assert(Map);
422
423 /*
424 * First try lock the memory.
425 */
426 int rc = VERR_LOCK_FAILED;
427 kern_return_t kr = vm_map_wire(get_task_map(Task),
428 (vm_map_offset_t)pv,
429 (vm_map_offset_t)pv + cb,
430 VM_PROT_DEFAULT,
431 0 /* not user */);
432 if (kr == KERN_SUCCESS)
433 {
434 /*
435 * Create the IPRT memory object.
436 */
437 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
438 if (pMemDarwin)
439 {
440 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
441 *ppMem = &pMemDarwin->Core;
442 return VINF_SUCCESS;
443 }
444
445 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
446 Assert(kr == KERN_SUCCESS);
447 rc = VERR_NO_MEMORY;
448 }
449
450#else
451
452 /*
453 * Create a descriptor and try lock it (prepare).
454 */
455 int rc = VERR_MEMOBJ_INIT_FAILED;
456 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, Task);
457 if (pMemDesc)
458 {
459 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
460 if (IORet == kIOReturnSuccess)
461 {
462 /*
463 * Create the IPRT memory object.
464 */
465 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
466 if (pMemDarwin)
467 {
468 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
469 pMemDarwin->pMemDesc = pMemDesc;
470 *ppMem = &pMemDarwin->Core;
471 return VINF_SUCCESS;
472 }
473
474 pMemDesc->complete();
475 rc = VERR_NO_MEMORY;
476 }
477 else
478 rc = VERR_LOCK_FAILED;
479 pMemDesc->release();
480 }
481#endif
482 return rc;
483}
484
485
486int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
487{
488 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, (task_t)R0Process);
489}
490
491
492int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
493{
494 return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
495}
496
497
498int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
499{
500 return VERR_NOT_IMPLEMENTED;
501}
502
503
504int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
505{
506 return VERR_NOT_IMPLEMENTED;
507}
508
509
510int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
511{
512 /*
513 * Must have a memory descriptor.
514 */
515 int rc = VERR_INVALID_PARAMETER;
516 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
517 if (pMemToMapDarwin->pMemDesc)
518 {
519 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere,
520 kIOMapAnywhere | kIOMapDefaultCache);
521 if (pMemMap)
522 {
523 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
524 void *pv = (void *)(uintptr_t)VirtAddr;
525 if ((uintptr_t)pv == VirtAddr)
526 {
527 /*
528 * Create the IPRT memory object.
529 */
530 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
531 pv, pMemToMapDarwin->Core.cb);
532 if (pMemDarwin)
533 {
534 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
535 pMemDarwin->pMemMap = pMemMap;
536 *ppMem = &pMemDarwin->Core;
537 return VINF_SUCCESS;
538 }
539
540 rc = VERR_NO_MEMORY;
541 }
542 else
543 rc = VERR_ADDRESS_TOO_BIG;
544 pMemMap->release();
545 }
546 else
547 rc = VERR_MAP_FAILED;
548 }
549 return rc;
550}
551
552
553int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
554{
555 /*
556 * Must have a memory descriptor.
557 */
558 int rc = VERR_INVALID_PARAMETER;
559 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
560 if (pMemToMapDarwin->pMemDesc)
561 {
562 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, kIOMapAnywhere,
563 kIOMapAnywhere | kIOMapDefaultCache);
564 if (pMemMap)
565 {
566 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
567 void *pv = (void *)(uintptr_t)VirtAddr;
568 if ((uintptr_t)pv == VirtAddr)
569 {
570 /*
571 * Create the IPRT memory object.
572 */
573 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
574 pv, pMemToMapDarwin->Core.cb);
575 if (pMemDarwin)
576 {
577 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
578 pMemDarwin->pMemMap = pMemMap;
579 *ppMem = &pMemDarwin->Core;
580 return VINF_SUCCESS;
581 }
582
583 rc = VERR_NO_MEMORY;
584 }
585 else
586 rc = VERR_ADDRESS_TOO_BIG;
587 pMemMap->release();
588 }
589 else
590 rc = VERR_MAP_FAILED;
591 }
592 return rc;
593}
594
595
596RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
597{
598 RTHCPHYS PhysAddr;
599 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
600
601#ifdef USE_VM_MAP_WIRE
602 /*
603 * Locked memory doesn't have a memory descriptor and
604 * needs to be handled differently.
605 */
606 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
607 {
608 ppnum_t PgNo;
609 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
610 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
611 else
612 {
613 /*
614 * From what I can tell, Apple seems to have locked up the all the
615 * available interfaces that could help us obtain the pmap_t of a task
616 * or vm_map_t.
617
618 * So, we'll have to figure out where in the vm_map_t structure it is
619 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
620 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
621 * Not nice, but it will hopefully do the job in a reliable manner...
622 *
623 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
624 */
625 static int s_offPmap = -1;
626 if (RT_UNLIKELY(s_offPmap == -1))
627 {
628 pmap_t const *p = (pmap_t *)kernel_map;
629 pmap_t const * const pEnd = p + 64;
630 for (; p < pEnd; p++)
631 if (*p == kernel_pmap)
632 {
633 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
634 break;
635 }
636 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
637 }
638 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
639 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
640 }
641
642 AssertReturn(PgNo, NIL_RTHCPHYS);
643 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
644 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
645 }
646 else
647#endif /* USE_VM_MAP_WIRE */
648 {
649 /*
650 * Get the memory descriptor.
651 */
652 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
653 if (!pMemDesc)
654 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
655 AssertReturn(pMemDesc, NIL_RTHCPHYS);
656
657 /*
658 * If we've got a memory descriptor, use getPhysicalSegment64().
659 */
660 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
661 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
662 PhysAddr = Addr;
663 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
664 }
665
666 return PhysAddr;
667}
668
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette