VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 19956

Last change on this file since 19956 was 19956, checked in by vboxsync, 16 years ago

memobj-r0drv-darwin.cpp: Added an ugly hack in rtR0MemObjNativeMapKernel that touches all the pages in the mapping to make sure there are page table entries for them. Need to take a second look at this later...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.3 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 19956 2009-05-24 02:34:58Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-darwin-kernel.h"
36
37#include <iprt/memobj.h>
38
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/param.h>
44#include <iprt/process.h>
45#include <iprt/string.h>
46
47#include "internal/memobj.h"
48
49/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
50
51
52/*******************************************************************************
53* Structures and Typedefs *
54*******************************************************************************/
55/**
56 * The Darwin version of the memory object structure.
57 */
58typedef struct RTR0MEMOBJDARWIN
59{
60 /** The core structure. */
61 RTR0MEMOBJINTERNAL Core;
62 /** Pointer to the memory descriptor created for allocated and locked memory. */
63 IOMemoryDescriptor *pMemDesc;
64 /** Pointer to the memory mapping object for mapped memory. */
65 IOMemoryMap *pMemMap;
66} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
67
68
69int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
70{
71 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
72
73 /*
74 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object.
75 */
76 if (pMemDarwin->pMemDesc)
77 {
78 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
79 pMemDarwin->pMemDesc->complete(); /* paranoia */
80 pMemDarwin->pMemDesc->release();
81 pMemDarwin->pMemDesc = NULL;
82 Assert(!pMemDarwin->pMemMap);
83 }
84 else if (pMemDarwin->pMemMap)
85 {
86 pMemDarwin->pMemMap->release();
87 pMemDarwin->pMemMap = NULL;
88 }
89
90 /*
91 * Release any memory that we've allocated or locked.
92 */
93 switch (pMemDarwin->Core.enmType)
94 {
95 case RTR0MEMOBJTYPE_LOW:
96 case RTR0MEMOBJTYPE_PAGE:
97 case RTR0MEMOBJTYPE_CONT:
98 break;
99
100 case RTR0MEMOBJTYPE_LOCK:
101 {
102#ifdef USE_VM_MAP_WIRE
103 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
104 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
105 : kernel_map;
106 kern_return_t kr = vm_map_unwire(Map,
107 (vm_map_offset_t)pMemDarwin->Core.pv,
108 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
109 0 /* not user */);
110 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
111#endif
112 break;
113 }
114
115 case RTR0MEMOBJTYPE_PHYS:
116 /*if (pMemDarwin->Core.u.Phys.fAllocated)
117 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
118 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
119 break;
120
121 case RTR0MEMOBJTYPE_PHYS_NC:
122 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
123 return VERR_INTERNAL_ERROR;
124
125 case RTR0MEMOBJTYPE_RES_VIRT:
126 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
127 return VERR_INTERNAL_ERROR;
128
129 case RTR0MEMOBJTYPE_MAPPING:
130 /* nothing to do here. */
131 break;
132
133 default:
134 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
135 return VERR_INTERNAL_ERROR;
136 }
137
138 return VINF_SUCCESS;
139}
140
141
142
143/**
144 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
145 *
146 * @returns IPRT status code.
147 * @retval VERR_ADDRESS_TOO_BIG try another way.
148 *
149 * @param ppMem Where to return the memory object.
150 * @param cb The page aligned memory size.
151 * @param fExecutable Whether the mapping needs to be executable.
152 * @param fContiguous Whether the backing memory needs to be contiguous.
153 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
154 * you don't care that much or is speculating.
155 * @param MaxPhysAddr The max address to verify the result against. Use
156 * UINT64_MAX if it doesn't matter.
157 * @param enmType The object type.
158 */
159static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
160 bool fExecutable, bool fContiguous,
161 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
162 RTR0MEMOBJTYPE enmType)
163{
164 /*
165 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
166 * actually respects the physical memory mask (10.5.x is certainly busted),
167 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
168 *
169 * The kIOMemorySharingTypeMask flag just forces the result to be page aligned.
170 */
171 int rc;
172 IOBufferMemoryDescriptor *pMemDesc =
173 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
174 kIOMemorySharingTypeMask
175 | kIODirectionInOut
176 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
177 cb,
178 PhysMask);
179 if (pMemDesc)
180 {
181 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
182 if (IORet == kIOReturnSuccess)
183 {
184 void *pv = pMemDesc->getBytesNoCopy(0, cb);
185 if (pv)
186 {
187 /*
188 * Check if it's all below 4GB.
189 */
190 addr64_t AddrPrev = 0;
191 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
192 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
193 {
194#ifdef __LP64__ /* Grumble! */
195 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
196#else
197 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
198#endif
199 if ( Addr > MaxPhysAddr
200 || !Addr
201 || (Addr & PAGE_OFFSET_MASK)
202 || ( fContiguous
203 && !off
204 && Addr == AddrPrev + PAGE_SIZE))
205 {
206 /* Buggy API, try allocate the memory another way. */
207 pMemDesc->release();
208 if (PhysMask)
209 LogAlways(("rtR0MemObjNativeAllocLow: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
210 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
211 return VERR_ADDRESS_TOO_BIG;
212 }
213 AddrPrev = Addr;
214 }
215
216 /*
217 * Create the IPRT memory object.
218 */
219 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
220 if (pMemDarwin)
221 {
222 if (fContiguous)
223 {
224#ifdef __LP64__ /* Grumble! */
225 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
226#else
227 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
228#endif
229 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
230 if (enmType == RTR0MEMOBJTYPE_CONT)
231 pMemDarwin->Core.u.Cont.Phys = PhysBase;
232 else if (enmType == RTR0MEMOBJTYPE_PHYS)
233 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
234 else
235 AssertMsgFailed(("enmType=%d\n", enmType));
236 }
237
238 pMemDarwin->pMemDesc = pMemDesc;
239 *ppMem = &pMemDarwin->Core;
240 return VINF_SUCCESS;
241 }
242
243 rc = VERR_NO_MEMORY;
244 }
245 else
246 rc = VERR_MEMOBJ_INIT_FAILED;
247 }
248 else
249 rc = RTErrConvertFromDarwinIO(IORet);
250 pMemDesc->release();
251 }
252 else
253 rc = VERR_MEMOBJ_INIT_FAILED;
254 Assert(rc != VERR_ADDRESS_TOO_BIG);
255 return rc;
256}
257
258
259int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
260{
261 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
262 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
263}
264
265
266int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
267{
268 /*
269 * Try IOMallocPhysical/IOMallocAligned first.
270 * Then try optimistically without a physical address mask, which will always
271 * end up using IOMallocAligned.
272 *
273 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
274 */
275 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
276 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
277 if (rc == VERR_ADDRESS_TOO_BIG)
278 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
279 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
280 return rc;
281}
282
283
284int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
285{
286 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
287 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
288 RTR0MEMOBJTYPE_CONT);
289
290 /*
291 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
292 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
293 */
294 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
295 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
296 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
297 RTR0MEMOBJTYPE_CONT);
298 return rc;
299}
300
301
302int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
303{
304 /*
305 * Translate the PhysHighest address into a mask.
306 */
307 int rc;
308 if (PhysHighest == NIL_RTHCPHYS)
309 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
310 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
311 else
312 {
313 mach_vm_address_t PhysMask = 0;
314 PhysMask = ~(mach_vm_address_t)0;
315 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
316 PhysMask >>= 1;
317 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
318 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
319
320 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
321 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
322 }
323 return rc;
324}
325
326
327int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
328{
329 /** @todo rtR0MemObjNativeAllocPhys / darwin.
330 * This might be a bit problematic and may very well require having to create our own
331 * object which we populate with pages but without mapping it into any address space.
332 * Estimate is 2-3 days.
333 */
334 return VERR_NOT_SUPPORTED;
335}
336
337
338int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
339{
340 /*
341 * Create a descriptor for it (the validation is always true on intel macs, but
342 * as it doesn't harm us keep it in).
343 */
344 int rc = VERR_ADDRESS_TOO_BIG;
345 IOAddressRange aRanges[1] = { { Phys, cb } };
346 if ( aRanges[0].address == Phys
347 && aRanges[0].length == cb)
348 {
349 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
350 kIODirectionInOut, NULL /*task*/);
351 if (pMemDesc)
352 {
353 Assert(Phys == pMemDesc->getPhysicalAddress());
354
355 /*
356 * Create the IPRT memory object.
357 */
358 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
359 if (pMemDarwin)
360 {
361 pMemDarwin->Core.u.Phys.PhysBase = Phys;
362 pMemDarwin->Core.u.Phys.fAllocated = false;
363 pMemDarwin->pMemDesc = pMemDesc;
364 *ppMem = &pMemDarwin->Core;
365 return VINF_SUCCESS;
366 }
367
368 rc = VERR_NO_MEMORY;
369 pMemDesc->release();
370 }
371 else
372 rc = VERR_MEMOBJ_INIT_FAILED;
373 }
374 else
375 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
376 return rc;
377}
378
379
380/**
381 * Internal worker for locking down pages.
382 *
383 * @return IPRT status code.
384 *
385 * @param ppMem Where to store the memory object pointer.
386 * @param pv First page.
387 * @param cb Number of bytes.
388 * @param Task The task \a pv and \a cb refers to.
389 */
390static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
391{
392#ifdef USE_VM_MAP_WIRE
393 vm_map_t Map = get_task_map(Task);
394 Assert(Map);
395
396 /*
397 * First try lock the memory.
398 */
399 int rc = VERR_LOCK_FAILED;
400 kern_return_t kr = vm_map_wire(get_task_map(Task),
401 (vm_map_offset_t)pv,
402 (vm_map_offset_t)pv + cb,
403 VM_PROT_DEFAULT,
404 0 /* not user */);
405 if (kr == KERN_SUCCESS)
406 {
407 /*
408 * Create the IPRT memory object.
409 */
410 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
411 if (pMemDarwin)
412 {
413 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
414 *ppMem = &pMemDarwin->Core;
415 return VINF_SUCCESS;
416 }
417
418 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
419 Assert(kr == KERN_SUCCESS);
420 rc = VERR_NO_MEMORY;
421 }
422
423#else
424
425 /*
426 * Create a descriptor and try lock it (prepare).
427 */
428 int rc = VERR_MEMOBJ_INIT_FAILED;
429 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
430 if (pMemDesc)
431 {
432 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
433 if (IORet == kIOReturnSuccess)
434 {
435 /*
436 * Create the IPRT memory object.
437 */
438 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
439 if (pMemDarwin)
440 {
441 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
442 pMemDarwin->pMemDesc = pMemDesc;
443 *ppMem = &pMemDarwin->Core;
444 return VINF_SUCCESS;
445 }
446
447 pMemDesc->complete();
448 rc = VERR_NO_MEMORY;
449 }
450 else
451 rc = VERR_LOCK_FAILED;
452 pMemDesc->release();
453 }
454#endif
455 return rc;
456}
457
458
459int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
460{
461 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, (task_t)R0Process);
462}
463
464
465int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
466{
467 return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
468}
469
470
471int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
472{
473 return VERR_NOT_IMPLEMENTED;
474}
475
476
477int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
478{
479 return VERR_NOT_IMPLEMENTED;
480}
481
482
483int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
484 unsigned fProt, size_t offSub, size_t cbSub)
485{
486 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
487
488 /*
489 * Must have a memory descriptor.
490 */
491 int rc = VERR_INVALID_PARAMETER;
492 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
493 if (pMemToMapDarwin->pMemDesc)
494 {
495#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
496 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
497 0,
498 kIOMapAnywhere | kIOMapDefaultCache,
499 offSub,
500 cbSub);
501#else
502 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, 0,
503 kIOMapAnywhere | kIOMapDefaultCache,
504 offSub, cbSub);
505#endif
506 if (pMemMap)
507 {
508 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
509 void *pv = (void *)(uintptr_t)VirtAddr;
510 if ((uintptr_t)pv == VirtAddr)
511 {
512 /*
513 * HACK ALERT!
514 *
515 * Touch the pages to force the kernel to create the page
516 * table entries. This is necessary since the kernel gets
517 * upset if we take a page fault when preemption is disabled
518 * and/or we own a simple lock. It has no problems with us
519 * disabling interrupts when taking the traps, weird stuff.
520 */
521 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
522 size_t cbLeft = cbSub;
523 for (;;)
524 {
525 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
526 if (cbLeft <= PAGE_SIZE)
527 break;
528 cbLeft -= PAGE_SIZE;
529 pu32 += PAGE_SIZE / sizeof(uint32_t);
530 }
531
532 /*
533 * Create the IPRT memory object.
534 */
535 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
536 pv, pMemToMapDarwin->Core.cb);
537 if (pMemDarwin)
538 {
539 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
540 pMemDarwin->pMemMap = pMemMap;
541 *ppMem = &pMemDarwin->Core;
542 return VINF_SUCCESS;
543 }
544
545 rc = VERR_NO_MEMORY;
546 }
547 else
548 rc = VERR_ADDRESS_TOO_BIG;
549 pMemMap->release();
550 }
551 else
552 rc = VERR_MAP_FAILED;
553 }
554 return rc;
555}
556
557
558int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
559{
560 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
561
562 /*
563 * Must have a memory descriptor.
564 */
565 int rc = VERR_INVALID_PARAMETER;
566 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
567 if (pMemToMapDarwin->pMemDesc)
568 {
569#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
570 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
571 0,
572 kIOMapAnywhere | kIOMapDefaultCache,
573 0 /* offset */,
574 0 /* length */);
575#else
576 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, 0,
577 kIOMapAnywhere | kIOMapDefaultCache);
578#endif
579 if (pMemMap)
580 {
581 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
582 void *pv = (void *)(uintptr_t)VirtAddr;
583 if ((uintptr_t)pv == VirtAddr)
584 {
585 /*
586 * Create the IPRT memory object.
587 */
588 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
589 pv, pMemToMapDarwin->Core.cb);
590 if (pMemDarwin)
591 {
592 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
593 pMemDarwin->pMemMap = pMemMap;
594 *ppMem = &pMemDarwin->Core;
595 return VINF_SUCCESS;
596 }
597
598 rc = VERR_NO_MEMORY;
599 }
600 else
601 rc = VERR_ADDRESS_TOO_BIG;
602 pMemMap->release();
603 }
604 else
605 rc = VERR_MAP_FAILED;
606 }
607 return rc;
608}
609
610
611RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
612{
613 RTHCPHYS PhysAddr;
614 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
615
616#ifdef USE_VM_MAP_WIRE
617 /*
618 * Locked memory doesn't have a memory descriptor and
619 * needs to be handled differently.
620 */
621 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
622 {
623 ppnum_t PgNo;
624 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
625 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
626 else
627 {
628 /*
629 * From what I can tell, Apple seems to have locked up the all the
630 * available interfaces that could help us obtain the pmap_t of a task
631 * or vm_map_t.
632
633 * So, we'll have to figure out where in the vm_map_t structure it is
634 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
635 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
636 * Not nice, but it will hopefully do the job in a reliable manner...
637 *
638 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
639 */
640 static int s_offPmap = -1;
641 if (RT_UNLIKELY(s_offPmap == -1))
642 {
643 pmap_t const *p = (pmap_t *)kernel_map;
644 pmap_t const * const pEnd = p + 64;
645 for (; p < pEnd; p++)
646 if (*p == kernel_pmap)
647 {
648 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
649 break;
650 }
651 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
652 }
653 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
654 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
655 }
656
657 AssertReturn(PgNo, NIL_RTHCPHYS);
658 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
659 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
660 }
661 else
662#endif /* USE_VM_MAP_WIRE */
663 {
664 /*
665 * Get the memory descriptor.
666 */
667 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
668 if (!pMemDesc)
669 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
670 AssertReturn(pMemDesc, NIL_RTHCPHYS);
671
672 /*
673 * If we've got a memory descriptor, use getPhysicalSegment64().
674 */
675#ifdef __LP64__ /* Grumble! */
676 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
677#else
678 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
679#endif
680 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
681 PhysAddr = Addr;
682 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
683 }
684
685 return PhysAddr;
686}
687
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette