VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.2 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-darwin-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/alloc.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/log.h>
39#include <iprt/param.h>
40#include <iprt/process.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43#include "internal/memobj.h"
44
45/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * The Darwin version of the memory object structure.
53 */
54typedef struct RTR0MEMOBJDARWIN
55{
56 /** The core structure. */
57 RTR0MEMOBJINTERNAL Core;
58 /** Pointer to the memory descriptor created for allocated and locked memory. */
59 IOMemoryDescriptor *pMemDesc;
60 /** Pointer to the memory mapping object for mapped memory. */
61 IOMemoryMap *pMemMap;
62} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
63
64
65/**
66 * HACK ALERT!
67 *
68 * Touch the pages to force the kernel to create the page
69 * table entries. This is necessary since the kernel gets
70 * upset if we take a page fault when preemption is disabled
71 * and/or we own a simple lock. It has no problems with us
72 * disabling interrupts when taking the traps, weird stuff.
73 *
74 * @param pv Pointer to the first page.
75 * @param cb The number of bytes.
76 */
77static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
78{
79 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
80 for (;;)
81 {
82 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
83 if (cb <= PAGE_SIZE)
84 break;
85 cb -= PAGE_SIZE;
86 pu32 += PAGE_SIZE / sizeof(uint32_t);
87 }
88}
89
90
91/**
92 * Gets the virtual memory map the specified object is mapped into.
93 *
94 * @returns VM map handle on success, NULL if no map.
95 * @param pMem The memory object.
96 */
97DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
98{
99 switch (pMem->enmType)
100 {
101 case RTR0MEMOBJTYPE_PAGE:
102 case RTR0MEMOBJTYPE_LOW:
103 case RTR0MEMOBJTYPE_CONT:
104 return kernel_map;
105
106 case RTR0MEMOBJTYPE_PHYS:
107 case RTR0MEMOBJTYPE_PHYS_NC:
108 return NULL; /* pretend these have no mapping atm. */
109
110 case RTR0MEMOBJTYPE_LOCK:
111 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
112 ? kernel_map
113 : get_task_map((task_t)pMem->u.Lock.R0Process);
114
115 case RTR0MEMOBJTYPE_RES_VIRT:
116 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
117 ? kernel_map
118 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
119
120 case RTR0MEMOBJTYPE_MAPPING:
121 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
122 ? kernel_map
123 : get_task_map((task_t)pMem->u.Mapping.R0Process);
124
125 default:
126 return NULL;
127 }
128}
129
130#if 0 /* not necessary after all*/
131/* My vm_map mockup. */
132struct my_vm_map
133{
134 struct { char pad[8]; } lock;
135 struct my_vm_map_header
136 {
137 struct vm_map_links
138 {
139 void *prev;
140 void *next;
141 vm_map_offset_t start;
142 vm_map_offset_t end;
143 } links;
144 int nentries;
145 boolean_t entries_pageable;
146 } hdr;
147 pmap_t pmap;
148 vm_map_size_t size;
149};
150
151
152/**
153 * Gets the minimum map address, this is similar to get_map_min.
154 *
155 * @returns The start address of the map.
156 * @param pMap The map.
157 */
158static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
159{
160 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
161 static int32_t volatile s_offAdjust = INT32_MAX;
162 int32_t off = s_offAdjust;
163 if (off == INT32_MAX)
164 {
165 for (off = 0; ; off += sizeof(pmap_t))
166 {
167 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
168 break;
169 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
170 }
171 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
172 }
173
174 /* calculate it. */
175 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
176 return pMyMap->hdr.links.start;
177}
178#endif /* unused */
179
180#ifdef RT_STRICT
181
182/**
183 * Read from a physical page.
184 *
185 * @param HCPhys The address to start reading at.
186 * @param cb How many bytes to read.
187 * @param pvDst Where to put the bytes. This is zero'ed on failure.
188 */
189static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
190{
191 memset(pvDst, '\0', cb);
192
193 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN(cb, PAGE_SIZE) } };
194 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
195 kIODirectionIn, NULL /*task*/);
196 if (pMemDesc)
197 {
198#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
199 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
200#else
201 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
202#endif
203 if (pMemMap)
204 {
205 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
206 memcpy(pvDst, pvSrc, cb);
207 pMemMap->release();
208 }
209 else
210 printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
211
212 pMemDesc->release();
213 }
214 else
215 printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
216}
217
218
219/**
220 * Gets the PTE for a page.
221 *
222 * @returns the PTE.
223 * @param pvPage The virtual address to get the PTE for.
224 */
225uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
226{
227 RTUINT64U u64;
228 RTCCUINTREG cr3 = ASMGetCR3();
229 RTCCUINTREG cr4 = ASMGetCR4();
230 bool fPAE = false;
231 bool fLMA = false;
232 if (cr4 & RT_BIT(5) /*X86_CR4_PAE*/)
233 {
234 fPAE = true;
235 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
236 if (fAmdFeatures & RT_BIT(29) /*X86_CPUID_AMD_FEATURE_EDX_LONG_MODE*/)
237 {
238 uint64_t efer = ASMRdMsr(0xc0000080 /*MSR_K6_EFER*/);
239 if (efer & RT_BIT(10) /*MSR_K6_EFER_LMA*/)
240 fLMA = true;
241 }
242 }
243
244 if (fLMA)
245 {
246 /* PML4 */
247 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64);
248 if (!(u64.u & RT_BIT(0) /* present */))
249 {
250 printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
251 return 0;
252 }
253
254 /* PDPTR */
255 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64);
256 if (!(u64.u & RT_BIT(0) /* present */))
257 {
258 printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
259 return 0;
260 }
261 if (u64.u & RT_BIT(7) /* big */)
262 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
263
264 /* PD */
265 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
266 if (!(u64.u & RT_BIT(0) /* present */))
267 {
268 printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
269 return 0;
270 }
271 if (u64.u & RT_BIT(7) /* big */)
272 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
273
274 /* PD */
275 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
276 if (!(u64.u & RT_BIT(0) /* present */))
277 {
278 printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
279 return 0;
280 }
281 return u64.u;
282 }
283
284 if (fPAE)
285 {
286 /* PDPTR */
287 rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64);
288 if (!(u64.u & RT_BIT(0) /* present */))
289 return 0;
290
291 /* PD */
292 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
293 if (!(u64.u & RT_BIT(0) /* present */))
294 return 0;
295 if (u64.u & RT_BIT(7) /* big */)
296 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
297
298 /* PD */
299 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
300 if (!(u64.u & RT_BIT(0) /* present */))
301 return 0;
302 return u64.u;
303 }
304
305 /* PD */
306 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64);
307 if (!(u64.au32[0] & RT_BIT(0) /* present */))
308 return 0;
309 if (u64.au32[0] & RT_BIT(7) /* big */)
310 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
311
312 /* PD */
313 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64);
314 if (!(u64.au32[0] & RT_BIT(0) /* present */))
315 return 0;
316 return u64.au32[0];
317
318 return 0;
319}
320
321#endif /* RT_STRICT */
322
323int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
324{
325 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
326
327 /*
328 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
329 */
330 if (pMemDarwin->pMemDesc)
331 {
332 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
333 pMemDarwin->pMemDesc->complete(); /* paranoia */
334 pMemDarwin->pMemDesc->release();
335 pMemDarwin->pMemDesc = NULL;
336 }
337
338 if (pMemDarwin->pMemMap)
339 {
340 pMemDarwin->pMemMap->release();
341 pMemDarwin->pMemMap = NULL;
342 }
343
344 /*
345 * Release any memory that we've allocated or locked.
346 */
347 switch (pMemDarwin->Core.enmType)
348 {
349 case RTR0MEMOBJTYPE_LOW:
350 case RTR0MEMOBJTYPE_PAGE:
351 case RTR0MEMOBJTYPE_CONT:
352 break;
353
354 case RTR0MEMOBJTYPE_LOCK:
355 {
356#ifdef USE_VM_MAP_WIRE
357 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
358 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
359 : kernel_map;
360 kern_return_t kr = vm_map_unwire(Map,
361 (vm_map_offset_t)pMemDarwin->Core.pv,
362 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
363 0 /* not user */);
364 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
365#endif
366 break;
367 }
368
369 case RTR0MEMOBJTYPE_PHYS:
370 /*if (pMemDarwin->Core.u.Phys.fAllocated)
371 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
372 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
373 break;
374
375 case RTR0MEMOBJTYPE_PHYS_NC:
376 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
377 return VERR_INTERNAL_ERROR;
378
379 case RTR0MEMOBJTYPE_RES_VIRT:
380 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
381 return VERR_INTERNAL_ERROR;
382
383 case RTR0MEMOBJTYPE_MAPPING:
384 /* nothing to do here. */
385 break;
386
387 default:
388 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
389 return VERR_INTERNAL_ERROR;
390 }
391
392 return VINF_SUCCESS;
393}
394
395
396
397/**
398 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
399 *
400 * @returns IPRT status code.
401 * @retval VERR_ADDRESS_TOO_BIG try another way.
402 *
403 * @param ppMem Where to return the memory object.
404 * @param cb The page aligned memory size.
405 * @param fExecutable Whether the mapping needs to be executable.
406 * @param fContiguous Whether the backing memory needs to be contiguous.
407 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
408 * you don't care that much or is speculating.
409 * @param MaxPhysAddr The max address to verify the result against. Use
410 * UINT64_MAX if it doesn't matter.
411 * @param enmType The object type.
412 */
413static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
414 bool fExecutable, bool fContiguous,
415 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
416 RTR0MEMOBJTYPE enmType)
417{
418 /*
419 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
420 * actually respects the physical memory mask (10.5.x is certainly busted),
421 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
422 *
423 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
424 */
425 int rc;
426 IOBufferMemoryDescriptor *pMemDesc =
427 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
428 kIOMemoryKernelUserShared
429 | kIODirectionInOut
430 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
431 cb,
432 PhysMask);
433 if (pMemDesc)
434 {
435 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
436 if (IORet == kIOReturnSuccess)
437 {
438 void *pv = pMemDesc->getBytesNoCopy(0, cb);
439 if (pv)
440 {
441 /*
442 * Check if it's all below 4GB.
443 */
444 addr64_t AddrPrev = 0;
445 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
446 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
447 {
448#ifdef __LP64__ /* Grumble! */
449 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
450#else
451 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
452#endif
453 if ( Addr > MaxPhysAddr
454 || !Addr
455 || (Addr & PAGE_OFFSET_MASK)
456 || ( fContiguous
457 && !off
458 && Addr == AddrPrev + PAGE_SIZE))
459 {
460 /* Buggy API, try allocate the memory another way. */
461 pMemDesc->release();
462 if (PhysMask)
463 LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
464 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
465 return VERR_ADDRESS_TOO_BIG;
466 }
467 AddrPrev = Addr;
468 }
469
470#ifdef RT_STRICT
471 /* check that the memory is actually mapped. */
472 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
473 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
474 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
475 RTThreadPreemptDisable(&State);
476 rtR0MemObjDarwinTouchPages(pv, cb);
477 RTThreadPreemptRestore(&State);
478#endif
479
480 /*
481 * Create the IPRT memory object.
482 */
483 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
484 if (pMemDarwin)
485 {
486 if (fContiguous)
487 {
488#ifdef __LP64__ /* Grumble! */
489 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
490#else
491 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
492#endif
493 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
494 if (enmType == RTR0MEMOBJTYPE_CONT)
495 pMemDarwin->Core.u.Cont.Phys = PhysBase;
496 else if (enmType == RTR0MEMOBJTYPE_PHYS)
497 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
498 else
499 AssertMsgFailed(("enmType=%d\n", enmType));
500 }
501
502 pMemDarwin->pMemDesc = pMemDesc;
503 *ppMem = &pMemDarwin->Core;
504 return VINF_SUCCESS;
505 }
506
507 rc = VERR_NO_MEMORY;
508 }
509 else
510 rc = VERR_MEMOBJ_INIT_FAILED;
511 }
512 else
513 rc = RTErrConvertFromDarwinIO(IORet);
514 pMemDesc->release();
515 }
516 else
517 rc = VERR_MEMOBJ_INIT_FAILED;
518 Assert(rc != VERR_ADDRESS_TOO_BIG);
519 return rc;
520}
521
522
523int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
524{
525 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
526 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
527}
528
529
530int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
531{
532 /*
533 * Try IOMallocPhysical/IOMallocAligned first.
534 * Then try optimistically without a physical address mask, which will always
535 * end up using IOMallocAligned.
536 *
537 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
538 */
539 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
540 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
541 if (rc == VERR_ADDRESS_TOO_BIG)
542 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
543 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
544 return rc;
545}
546
547
548int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
549{
550 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
551 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
552 RTR0MEMOBJTYPE_CONT);
553
554 /*
555 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
556 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
557 */
558 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
559 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
560 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
561 RTR0MEMOBJTYPE_CONT);
562 return rc;
563}
564
565
566int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
567{
568 /** @todo alignment */
569 if (uAlignment != PAGE_SIZE)
570 return VERR_NOT_SUPPORTED;
571
572 /*
573 * Translate the PhysHighest address into a mask.
574 */
575 int rc;
576 if (PhysHighest == NIL_RTHCPHYS)
577 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
578 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
579 else
580 {
581 mach_vm_address_t PhysMask = 0;
582 PhysMask = ~(mach_vm_address_t)0;
583 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
584 PhysMask >>= 1;
585 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
586 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
587
588 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
589 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
590 }
591 return rc;
592}
593
594
595int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
596{
597 /** @todo rtR0MemObjNativeAllocPhys / darwin.
598 * This might be a bit problematic and may very well require having to create our own
599 * object which we populate with pages but without mapping it into any address space.
600 * Estimate is 2-3 days.
601 */
602 return VERR_NOT_SUPPORTED;
603}
604
605
606int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, unsigned CachePolicy)
607{
608 AssertReturn(CachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_IMPLEMENTED);
609
610 /*
611 * Create a descriptor for it (the validation is always true on intel macs, but
612 * as it doesn't harm us keep it in).
613 */
614 int rc = VERR_ADDRESS_TOO_BIG;
615 IOAddressRange aRanges[1] = { { Phys, cb } };
616 if ( aRanges[0].address == Phys
617 && aRanges[0].length == cb)
618 {
619 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
620 kIODirectionInOut, NULL /*task*/);
621 if (pMemDesc)
622 {
623#ifdef __LP64__ /* Grumble! */
624 Assert(Phys == pMemDesc->getPhysicalSegment(0, 0));
625#else
626 Assert(Phys == pMemDesc->getPhysicalSegment64(0, 0));
627#endif
628
629 /*
630 * Create the IPRT memory object.
631 */
632 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
633 if (pMemDarwin)
634 {
635 pMemDarwin->Core.u.Phys.PhysBase = Phys;
636 pMemDarwin->Core.u.Phys.fAllocated = false;
637 pMemDarwin->pMemDesc = pMemDesc;
638 *ppMem = &pMemDarwin->Core;
639 return VINF_SUCCESS;
640 }
641
642 rc = VERR_NO_MEMORY;
643 pMemDesc->release();
644 }
645 else
646 rc = VERR_MEMOBJ_INIT_FAILED;
647 }
648 else
649 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
650 return rc;
651}
652
653
654/**
655 * Internal worker for locking down pages.
656 *
657 * @return IPRT status code.
658 *
659 * @param ppMem Where to store the memory object pointer.
660 * @param pv First page.
661 * @param cb Number of bytes.
662 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
663 * and RTMEM_PROT_WRITE.
664 * @param Task The task \a pv and \a cb refers to.
665 */
666static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
667{
668 NOREF(fAccess);
669#ifdef USE_VM_MAP_WIRE
670 vm_map_t Map = get_task_map(Task);
671 Assert(Map);
672
673 /*
674 * First try lock the memory.
675 */
676 int rc = VERR_LOCK_FAILED;
677 kern_return_t kr = vm_map_wire(get_task_map(Task),
678 (vm_map_offset_t)pv,
679 (vm_map_offset_t)pv + cb,
680 VM_PROT_DEFAULT,
681 0 /* not user */);
682 if (kr == KERN_SUCCESS)
683 {
684 /*
685 * Create the IPRT memory object.
686 */
687 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
688 if (pMemDarwin)
689 {
690 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
691 *ppMem = &pMemDarwin->Core;
692 return VINF_SUCCESS;
693 }
694
695 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
696 Assert(kr == KERN_SUCCESS);
697 rc = VERR_NO_MEMORY;
698 }
699
700#else
701
702 /*
703 * Create a descriptor and try lock it (prepare).
704 */
705 int rc = VERR_MEMOBJ_INIT_FAILED;
706 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
707 if (pMemDesc)
708 {
709 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
710 if (IORet == kIOReturnSuccess)
711 {
712 /*
713 * Create the IPRT memory object.
714 */
715 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
716 if (pMemDarwin)
717 {
718 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
719 pMemDarwin->pMemDesc = pMemDesc;
720 *ppMem = &pMemDarwin->Core;
721 return VINF_SUCCESS;
722 }
723
724 pMemDesc->complete();
725 rc = VERR_NO_MEMORY;
726 }
727 else
728 rc = VERR_LOCK_FAILED;
729 pMemDesc->release();
730 }
731#endif
732 return rc;
733}
734
735
736int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
737{
738 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
739}
740
741
742int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
743{
744 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
745}
746
747
748int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
749{
750 return VERR_NOT_IMPLEMENTED;
751}
752
753
754int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
755{
756 return VERR_NOT_IMPLEMENTED;
757}
758
759
760int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
761 unsigned fProt, size_t offSub, size_t cbSub)
762{
763 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
764
765 /*
766 * Check that the specified alignment is supported.
767 */
768 if (uAlignment > PAGE_SIZE)
769 return VERR_NOT_SUPPORTED;
770
771 /*
772 * Must have a memory descriptor that we can map.
773 */
774 int rc = VERR_INVALID_PARAMETER;
775 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
776 if (pMemToMapDarwin->pMemDesc)
777 {
778#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
779 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
780 0,
781 kIOMapAnywhere | kIOMapDefaultCache,
782 offSub,
783 cbSub);
784#else
785 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
786 0,
787 kIOMapAnywhere | kIOMapDefaultCache,
788 offSub,
789 cbSub);
790#endif
791 if (pMemMap)
792 {
793 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
794 void *pv = (void *)(uintptr_t)VirtAddr;
795 if ((uintptr_t)pv == VirtAddr)
796 {
797 //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
798 //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
799
800// /*
801// * Explicitly lock it so that we're sure it is present and that
802// * its PTEs cannot be recycled.
803// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
804// * to the options which causes prepare() to not wire the pages.
805// * This is probably a bug.
806// */
807// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
808// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
809// 1 /* count */,
810// 0 /* offset */,
811// kernel_task,
812// kIODirectionInOut | kIOMemoryTypeVirtual,
813// kIOMapperSystem);
814// if (pMemDesc)
815// {
816// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
817// if (IORet == kIOReturnSuccess)
818// {
819 /* HACK ALERT! */
820 rtR0MemObjDarwinTouchPages(pv, cbSub);
821 /** @todo First, the memory should've been mapped by now, and second, it
822 * shouild have the wired attribute in the PTE (bit 9). Neither is
823 * seems to be the case. The disabled locking code doesn't make any
824 * difference, which is extremely odd, and breaks
825 * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
826 * lock descriptor. */
827 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
828 //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
829
830 /*
831 * Create the IPRT memory object.
832 */
833 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
834 pv, cbSub);
835 if (pMemDarwin)
836 {
837 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
838 pMemDarwin->pMemMap = pMemMap;
839// pMemDarwin->pMemDesc = pMemDesc;
840 *ppMem = &pMemDarwin->Core;
841 return VINF_SUCCESS;
842 }
843
844// pMemDesc->complete();
845// rc = VERR_NO_MEMORY;
846// }
847// else
848// rc = RTErrConvertFromDarwinIO(IORet);
849// pMemDesc->release();
850// }
851// else
852// rc = VERR_MEMOBJ_INIT_FAILED;
853 }
854 else
855 rc = VERR_ADDRESS_TOO_BIG;
856 pMemMap->release();
857 }
858 else
859 rc = VERR_MAP_FAILED;
860 }
861 return rc;
862}
863
864
865int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
866{
867 /*
868 * Check for unsupported things.
869 */
870 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
871 if (uAlignment > PAGE_SIZE)
872 return VERR_NOT_SUPPORTED;
873
874 /*
875 * Must have a memory descriptor.
876 */
877 int rc = VERR_INVALID_PARAMETER;
878 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
879 if (pMemToMapDarwin->pMemDesc)
880 {
881#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
882 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
883 0,
884 kIOMapAnywhere | kIOMapDefaultCache,
885 0 /* offset */,
886 0 /* length */);
887#else
888 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
889 0,
890 kIOMapAnywhere | kIOMapDefaultCache);
891#endif
892 if (pMemMap)
893 {
894 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
895 void *pv = (void *)(uintptr_t)VirtAddr;
896 if ((uintptr_t)pv == VirtAddr)
897 {
898 /*
899 * Create the IPRT memory object.
900 */
901 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
902 pv, pMemToMapDarwin->Core.cb);
903 if (pMemDarwin)
904 {
905 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
906 pMemDarwin->pMemMap = pMemMap;
907 *ppMem = &pMemDarwin->Core;
908 return VINF_SUCCESS;
909 }
910
911 rc = VERR_NO_MEMORY;
912 }
913 else
914 rc = VERR_ADDRESS_TOO_BIG;
915 pMemMap->release();
916 }
917 else
918 rc = VERR_MAP_FAILED;
919 }
920 return rc;
921}
922
923
924int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
925{
926 /* Get the map for the object. */
927 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
928 if (!pVmMap)
929 return VERR_NOT_SUPPORTED;
930
931 /* Convert the protection. */
932 vm_prot_t fMachProt;
933 switch (fProt)
934 {
935 case RTMEM_PROT_NONE:
936 fMachProt = VM_PROT_NONE;
937 break;
938 case RTMEM_PROT_READ:
939 fMachProt = VM_PROT_READ;
940 break;
941 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
942 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
943 break;
944 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
945 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
946 break;
947 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
948 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE;
949 break;
950 case RTMEM_PROT_EXEC:
951 fMachProt = VM_PROT_EXECUTE;
952 break;
953 default:
954 AssertFailedReturn(VERR_INVALID_PARAMETER);
955 }
956
957 /* do the job. */
958 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
959 kern_return_t krc = vm_protect(pVmMap,
960 Start,
961 cbSub,
962 false,
963 fMachProt);
964 if (krc != KERN_SUCCESS)
965 return RTErrConvertFromDarwinKern(krc);
966 return VINF_SUCCESS;
967}
968
969
970RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
971{
972 RTHCPHYS PhysAddr;
973 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
974
975#ifdef USE_VM_MAP_WIRE
976 /*
977 * Locked memory doesn't have a memory descriptor and
978 * needs to be handled differently.
979 */
980 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
981 {
982 ppnum_t PgNo;
983 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
984 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
985 else
986 {
987 /*
988 * From what I can tell, Apple seems to have locked up the all the
989 * available interfaces that could help us obtain the pmap_t of a task
990 * or vm_map_t.
991
992 * So, we'll have to figure out where in the vm_map_t structure it is
993 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
994 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
995 * Not nice, but it will hopefully do the job in a reliable manner...
996 *
997 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
998 */
999 static int s_offPmap = -1;
1000 if (RT_UNLIKELY(s_offPmap == -1))
1001 {
1002 pmap_t const *p = (pmap_t *)kernel_map;
1003 pmap_t const * const pEnd = p + 64;
1004 for (; p < pEnd; p++)
1005 if (*p == kernel_pmap)
1006 {
1007 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1008 break;
1009 }
1010 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1011 }
1012 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1013 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1014 }
1015
1016 AssertReturn(PgNo, NIL_RTHCPHYS);
1017 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1018 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1019 }
1020 else
1021#endif /* USE_VM_MAP_WIRE */
1022 {
1023 /*
1024 * Get the memory descriptor.
1025 */
1026 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1027 if (!pMemDesc)
1028 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1029 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1030
1031 /*
1032 * If we've got a memory descriptor, use getPhysicalSegment64().
1033 */
1034#ifdef __LP64__ /* Grumble! */
1035 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
1036#else
1037 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1038#endif
1039 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1040 PhysAddr = Addr;
1041 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1042 }
1043
1044 return PhysAddr;
1045}
1046
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette