VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 41077

Last change on this file since 41077 was 41077, checked in by vboxsync, 13 years ago

Disabled the broken vm_protect code again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.1 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 41077 2012-04-27 08:44:07Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-darwin-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/mem.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include <iprt/string.h>
45#include <iprt/thread.h>
46#include "internal/memobj.h"
47
48/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Darwin version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJDARWIN
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to the memory descriptor created for allocated and locked memory. */
62 IOMemoryDescriptor *pMemDesc;
63 /** Pointer to the memory mapping object for mapped memory. */
64 IOMemoryMap *pMemMap;
65} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
66
67
68/**
69 * HACK ALERT!
70 *
71 * Touch the pages to force the kernel to create the page
72 * table entries. This is necessary since the kernel gets
73 * upset if we take a page fault when preemption is disabled
74 * and/or we own a simple lock. It has no problems with us
75 * disabling interrupts when taking the traps, weird stuff.
76 *
77 * @param pv Pointer to the first page.
78 * @param cb The number of bytes.
79 */
80static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
81{
82 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
83 for (;;)
84 {
85 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
86 if (cb <= PAGE_SIZE)
87 break;
88 cb -= PAGE_SIZE;
89 pu32 += PAGE_SIZE / sizeof(uint32_t);
90 }
91}
92
93
94/**
95 * Gets the virtual memory map the specified object is mapped into.
96 *
97 * @returns VM map handle on success, NULL if no map.
98 * @param pMem The memory object.
99 */
100DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
101{
102 switch (pMem->enmType)
103 {
104 case RTR0MEMOBJTYPE_PAGE:
105 case RTR0MEMOBJTYPE_LOW:
106 case RTR0MEMOBJTYPE_CONT:
107 return kernel_map;
108
109 case RTR0MEMOBJTYPE_PHYS:
110 case RTR0MEMOBJTYPE_PHYS_NC:
111 return NULL; /* pretend these have no mapping atm. */
112
113 case RTR0MEMOBJTYPE_LOCK:
114 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
115 ? kernel_map
116 : get_task_map((task_t)pMem->u.Lock.R0Process);
117
118 case RTR0MEMOBJTYPE_RES_VIRT:
119 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
120 ? kernel_map
121 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
122
123 case RTR0MEMOBJTYPE_MAPPING:
124 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
125 ? kernel_map
126 : get_task_map((task_t)pMem->u.Mapping.R0Process);
127
128 default:
129 return NULL;
130 }
131}
132
133#if 0 /* not necessary after all*/
134/* My vm_map mockup. */
135struct my_vm_map
136{
137 struct { char pad[8]; } lock;
138 struct my_vm_map_header
139 {
140 struct vm_map_links
141 {
142 void *prev;
143 void *next;
144 vm_map_offset_t start;
145 vm_map_offset_t end;
146 } links;
147 int nentries;
148 boolean_t entries_pageable;
149 } hdr;
150 pmap_t pmap;
151 vm_map_size_t size;
152};
153
154
155/**
156 * Gets the minimum map address, this is similar to get_map_min.
157 *
158 * @returns The start address of the map.
159 * @param pMap The map.
160 */
161static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
162{
163 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
164 static int32_t volatile s_offAdjust = INT32_MAX;
165 int32_t off = s_offAdjust;
166 if (off == INT32_MAX)
167 {
168 for (off = 0; ; off += sizeof(pmap_t))
169 {
170 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
171 break;
172 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
173 }
174 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
175 }
176
177 /* calculate it. */
178 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
179 return pMyMap->hdr.links.start;
180}
181#endif /* unused */
182
183#ifdef RT_STRICT
184
185/**
186 * Read from a physical page.
187 *
188 * @param HCPhys The address to start reading at.
189 * @param cb How many bytes to read.
190 * @param pvDst Where to put the bytes. This is zero'd on failure.
191 */
192static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
193{
194 memset(pvDst, '\0', cb);
195
196 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN(cb, PAGE_SIZE) } };
197 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
198 kIODirectionIn, NULL /*task*/);
199 if (pMemDesc)
200 {
201#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
202 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
203#else
204 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
205#endif
206 if (pMemMap)
207 {
208 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
209 memcpy(pvDst, pvSrc, cb);
210 pMemMap->release();
211 }
212 else
213 printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
214
215 pMemDesc->release();
216 }
217 else
218 printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
219}
220
221
222/**
223 * Gets the PTE for a page.
224 *
225 * @returns the PTE.
226 * @param pvPage The virtual address to get the PTE for.
227 */
228static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
229{
230 RTUINT64U u64;
231 RTCCUINTREG cr3 = ASMGetCR3();
232 RTCCUINTREG cr4 = ASMGetCR4();
233 bool fPAE = false;
234 bool fLMA = false;
235 if (cr4 & RT_BIT(5) /*X86_CR4_PAE*/)
236 {
237 fPAE = true;
238 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
239 if (fAmdFeatures & RT_BIT(29) /*X86_CPUID_AMD_FEATURE_EDX_LONG_MODE*/)
240 {
241 uint64_t efer = ASMRdMsr(0xc0000080 /*MSR_K6_EFER*/);
242 if (efer & RT_BIT(10) /*MSR_K6_EFER_LMA*/)
243 fLMA = true;
244 }
245 }
246
247 if (fLMA)
248 {
249 /* PML4 */
250 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64);
251 if (!(u64.u & RT_BIT(0) /* present */))
252 {
253 printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
254 return 0;
255 }
256
257 /* PDPTR */
258 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64);
259 if (!(u64.u & RT_BIT(0) /* present */))
260 {
261 printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
262 return 0;
263 }
264 if (u64.u & RT_BIT(7) /* big */)
265 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
266
267 /* PD */
268 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
269 if (!(u64.u & RT_BIT(0) /* present */))
270 {
271 printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
272 return 0;
273 }
274 if (u64.u & RT_BIT(7) /* big */)
275 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
276
277 /* PD */
278 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
279 if (!(u64.u & RT_BIT(0) /* present */))
280 {
281 printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
282 return 0;
283 }
284 return u64.u;
285 }
286
287 if (fPAE)
288 {
289 /* PDPTR */
290 rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64);
291 if (!(u64.u & RT_BIT(0) /* present */))
292 return 0;
293
294 /* PD */
295 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
296 if (!(u64.u & RT_BIT(0) /* present */))
297 return 0;
298 if (u64.u & RT_BIT(7) /* big */)
299 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
300
301 /* PD */
302 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
303 if (!(u64.u & RT_BIT(0) /* present */))
304 return 0;
305 return u64.u;
306 }
307
308 /* PD */
309 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64);
310 if (!(u64.au32[0] & RT_BIT(0) /* present */))
311 return 0;
312 if (u64.au32[0] & RT_BIT(7) /* big */)
313 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
314
315 /* PD */
316 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64);
317 if (!(u64.au32[0] & RT_BIT(0) /* present */))
318 return 0;
319 return u64.au32[0];
320
321 return 0;
322}
323
324#endif /* RT_STRICT */
325
326DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
327{
328 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
329
330 /*
331 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
332 */
333 if (pMemDarwin->pMemDesc)
334 {
335 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
336 pMemDarwin->pMemDesc->complete(); /* paranoia */
337 pMemDarwin->pMemDesc->release();
338 pMemDarwin->pMemDesc = NULL;
339 }
340
341 if (pMemDarwin->pMemMap)
342 {
343 pMemDarwin->pMemMap->release();
344 pMemDarwin->pMemMap = NULL;
345 }
346
347 /*
348 * Release any memory that we've allocated or locked.
349 */
350 switch (pMemDarwin->Core.enmType)
351 {
352 case RTR0MEMOBJTYPE_LOW:
353 case RTR0MEMOBJTYPE_PAGE:
354 case RTR0MEMOBJTYPE_CONT:
355 break;
356
357 case RTR0MEMOBJTYPE_LOCK:
358 {
359#ifdef USE_VM_MAP_WIRE
360 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
361 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
362 : kernel_map;
363 kern_return_t kr = vm_map_unwire(Map,
364 (vm_map_offset_t)pMemDarwin->Core.pv,
365 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
366 0 /* not user */);
367 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
368#endif
369 break;
370 }
371
372 case RTR0MEMOBJTYPE_PHYS:
373 /*if (pMemDarwin->Core.u.Phys.fAllocated)
374 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
375 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
376 break;
377
378 case RTR0MEMOBJTYPE_PHYS_NC:
379 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
380 return VERR_INTERNAL_ERROR;
381
382 case RTR0MEMOBJTYPE_RES_VIRT:
383 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
384 return VERR_INTERNAL_ERROR;
385
386 case RTR0MEMOBJTYPE_MAPPING:
387 /* nothing to do here. */
388 break;
389
390 default:
391 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
392 return VERR_INTERNAL_ERROR;
393 }
394
395 return VINF_SUCCESS;
396}
397
398
399
400/**
401 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
402 *
403 * @returns IPRT status code.
404 * @retval VERR_ADDRESS_TOO_BIG try another way.
405 *
406 * @param ppMem Where to return the memory object.
407 * @param cb The page aligned memory size.
408 * @param fExecutable Whether the mapping needs to be executable.
409 * @param fContiguous Whether the backing memory needs to be contiguous.
410 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
411 * you don't care that much or is speculating.
412 * @param MaxPhysAddr The max address to verify the result against. Use
413 * UINT64_MAX if it doesn't matter.
414 * @param enmType The object type.
415 */
416static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
417 bool fExecutable, bool fContiguous,
418 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
419 RTR0MEMOBJTYPE enmType)
420{
421 /*
422 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
423 * actually respects the physical memory mask (10.5.x is certainly busted),
424 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
425 *
426 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
427 */
428#if 1 /** @todo Figure out why this is broken. Is it only on snow leopard? Seen allocating memory for the VM structure, last page corrupted or inaccessible. */
429 size_t const cbFudged = cb + PAGE_SIZE;
430#else
431 size_t const cbFudged = cb;
432#endif
433 int rc;
434 IOBufferMemoryDescriptor *pMemDesc =
435 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
436 kIOMemoryKernelUserShared
437 | kIODirectionInOut
438 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
439 cbFudged,
440 PhysMask);
441 if (pMemDesc)
442 {
443 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
444 if (IORet == kIOReturnSuccess)
445 {
446 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
447 if (pv)
448 {
449 /*
450 * Check if it's all below 4GB.
451 */
452 addr64_t AddrPrev = 0;
453 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
454 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
455 {
456#ifdef __LP64__ /* Grumble! */
457 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
458#else
459 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
460#endif
461 if ( Addr > MaxPhysAddr
462 || !Addr
463 || (Addr & PAGE_OFFSET_MASK)
464 || ( fContiguous
465 && !off
466 && Addr == AddrPrev + PAGE_SIZE))
467 {
468 /* Buggy API, try allocate the memory another way. */
469 pMemDesc->release();
470 if (PhysMask)
471 LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
472 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
473 return VERR_ADDRESS_TOO_BIG;
474 }
475 AddrPrev = Addr;
476 }
477
478#ifdef RT_STRICT
479 /* check that the memory is actually mapped. */
480 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
481 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
482 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
483 RTThreadPreemptDisable(&State);
484 rtR0MemObjDarwinTouchPages(pv, cb);
485 RTThreadPreemptRestore(&State);
486#endif
487
488 /*
489 * Create the IPRT memory object.
490 */
491 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
492 if (pMemDarwin)
493 {
494 if (fContiguous)
495 {
496#ifdef __LP64__ /* Grumble! */
497 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
498#else
499 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
500#endif
501 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
502 if (enmType == RTR0MEMOBJTYPE_CONT)
503 pMemDarwin->Core.u.Cont.Phys = PhysBase;
504 else if (enmType == RTR0MEMOBJTYPE_PHYS)
505 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
506 else
507 AssertMsgFailed(("enmType=%d\n", enmType));
508 }
509
510#if 0 /* Experimental code. */
511 if (fExecutable)
512 {
513 rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
514# ifdef RT_STRICT
515 /* check that the memory is actually mapped. */
516 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
517 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
518 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
519 RTThreadPreemptDisable(&State);
520 rtR0MemObjDarwinTouchPages(pv, cb);
521 RTThreadPreemptRestore(&State);
522# endif
523 }
524 else
525#endif
526 rc = VINF_SUCCESS;
527 if (RT_SUCCESS(rc))
528 {
529 pMemDarwin->pMemDesc = pMemDesc;
530 *ppMem = &pMemDarwin->Core;
531 return VINF_SUCCESS;
532 }
533
534 rtR0MemObjDelete(&pMemDarwin->Core);
535 }
536
537 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
538 rc = VERR_NO_PHYS_MEMORY;
539 else if (enmType == RTR0MEMOBJTYPE_LOW)
540 rc = VERR_NO_LOW_MEMORY;
541 else if (enmType == RTR0MEMOBJTYPE_CONT)
542 rc = VERR_NO_CONT_MEMORY;
543 else
544 rc = VERR_NO_MEMORY;
545 }
546 else
547 rc = VERR_MEMOBJ_INIT_FAILED;
548 }
549 else
550 rc = RTErrConvertFromDarwinIO(IORet);
551 pMemDesc->release();
552 }
553 else
554 rc = VERR_MEMOBJ_INIT_FAILED;
555 Assert(rc != VERR_ADDRESS_TOO_BIG);
556 return rc;
557}
558
559
560DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
561{
562 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
563 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
564}
565
566
567DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
568{
569 /*
570 * Try IOMallocPhysical/IOMallocAligned first.
571 * Then try optimistically without a physical address mask, which will always
572 * end up using IOMallocAligned.
573 *
574 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
575 */
576 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
577 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
578 if (rc == VERR_ADDRESS_TOO_BIG)
579 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
580 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
581 return rc;
582}
583
584
585DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
586{
587 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
588 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
589 RTR0MEMOBJTYPE_CONT);
590
591 /*
592 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
593 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
594 */
595 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
596 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
597 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
598 RTR0MEMOBJTYPE_CONT);
599 return rc;
600}
601
602
603DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
604{
605 /** @todo alignment */
606 if (uAlignment != PAGE_SIZE)
607 return VERR_NOT_SUPPORTED;
608
609 /*
610 * Translate the PhysHighest address into a mask.
611 */
612 int rc;
613 if (PhysHighest == NIL_RTHCPHYS)
614 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
615 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
616 else
617 {
618 mach_vm_address_t PhysMask = 0;
619 PhysMask = ~(mach_vm_address_t)0;
620 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
621 PhysMask >>= 1;
622 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
623 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
624
625 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
626 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
627 }
628 return rc;
629}
630
631
632DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
633{
634 /** @todo rtR0MemObjNativeAllocPhys / darwin.
635 * This might be a bit problematic and may very well require having to create our own
636 * object which we populate with pages but without mapping it into any address space.
637 * Estimate is 2-3 days.
638 */
639 return VERR_NOT_SUPPORTED;
640}
641
642
643DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
644{
645 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
646
647 /*
648 * Create a descriptor for it (the validation is always true on intel macs, but
649 * as it doesn't harm us keep it in).
650 */
651 int rc = VERR_ADDRESS_TOO_BIG;
652 IOAddressRange aRanges[1] = { { Phys, cb } };
653 if ( aRanges[0].address == Phys
654 && aRanges[0].length == cb)
655 {
656 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
657 kIODirectionInOut, NULL /*task*/);
658 if (pMemDesc)
659 {
660#ifdef __LP64__ /* Grumble! */
661 Assert(Phys == pMemDesc->getPhysicalSegment(0, 0));
662#else
663 Assert(Phys == pMemDesc->getPhysicalSegment64(0, 0));
664#endif
665
666 /*
667 * Create the IPRT memory object.
668 */
669 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
670 if (pMemDarwin)
671 {
672 pMemDarwin->Core.u.Phys.PhysBase = Phys;
673 pMemDarwin->Core.u.Phys.fAllocated = false;
674 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
675 pMemDarwin->pMemDesc = pMemDesc;
676 *ppMem = &pMemDarwin->Core;
677 return VINF_SUCCESS;
678 }
679
680 rc = VERR_NO_MEMORY;
681 pMemDesc->release();
682 }
683 else
684 rc = VERR_MEMOBJ_INIT_FAILED;
685 }
686 else
687 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
688 return rc;
689}
690
691
692/**
693 * Internal worker for locking down pages.
694 *
695 * @return IPRT status code.
696 *
697 * @param ppMem Where to store the memory object pointer.
698 * @param pv First page.
699 * @param cb Number of bytes.
700 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
701 * and RTMEM_PROT_WRITE.
702 * @param Task The task \a pv and \a cb refers to.
703 */
704static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
705{
706 NOREF(fAccess);
707#ifdef USE_VM_MAP_WIRE
708 vm_map_t Map = get_task_map(Task);
709 Assert(Map);
710
711 /*
712 * First try lock the memory.
713 */
714 int rc = VERR_LOCK_FAILED;
715 kern_return_t kr = vm_map_wire(get_task_map(Task),
716 (vm_map_offset_t)pv,
717 (vm_map_offset_t)pv + cb,
718 VM_PROT_DEFAULT,
719 0 /* not user */);
720 if (kr == KERN_SUCCESS)
721 {
722 /*
723 * Create the IPRT memory object.
724 */
725 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
726 if (pMemDarwin)
727 {
728 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
729 *ppMem = &pMemDarwin->Core;
730 return VINF_SUCCESS;
731 }
732
733 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
734 Assert(kr == KERN_SUCCESS);
735 rc = VERR_NO_MEMORY;
736 }
737
738#else
739
740 /*
741 * Create a descriptor and try lock it (prepare).
742 */
743 int rc = VERR_MEMOBJ_INIT_FAILED;
744 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
745 if (pMemDesc)
746 {
747 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
748 if (IORet == kIOReturnSuccess)
749 {
750 /*
751 * Create the IPRT memory object.
752 */
753 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
754 if (pMemDarwin)
755 {
756 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
757 pMemDarwin->pMemDesc = pMemDesc;
758 *ppMem = &pMemDarwin->Core;
759 return VINF_SUCCESS;
760 }
761
762 pMemDesc->complete();
763 rc = VERR_NO_MEMORY;
764 }
765 else
766 rc = VERR_LOCK_FAILED;
767 pMemDesc->release();
768 }
769#endif
770 return rc;
771}
772
773
774DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
775{
776 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
777}
778
779
780DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
781{
782 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
783}
784
785
786DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
787{
788 return VERR_NOT_SUPPORTED;
789}
790
791
792DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
793{
794 return VERR_NOT_SUPPORTED;
795}
796
797
798DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
799 unsigned fProt, size_t offSub, size_t cbSub)
800{
801 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
802
803 /*
804 * Check that the specified alignment is supported.
805 */
806 if (uAlignment > PAGE_SIZE)
807 return VERR_NOT_SUPPORTED;
808
809 /*
810 * Must have a memory descriptor that we can map.
811 */
812 int rc = VERR_INVALID_PARAMETER;
813 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
814 if (pMemToMapDarwin->pMemDesc)
815 {
816#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
817 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
818 0,
819 kIOMapAnywhere | kIOMapDefaultCache,
820 offSub,
821 cbSub);
822#else
823 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
824 0,
825 kIOMapAnywhere | kIOMapDefaultCache,
826 offSub,
827 cbSub);
828#endif
829 if (pMemMap)
830 {
831 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
832 void *pv = (void *)(uintptr_t)VirtAddr;
833 if ((uintptr_t)pv == VirtAddr)
834 {
835 //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
836 //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
837
838// /*
839// * Explicitly lock it so that we're sure it is present and that
840// * its PTEs cannot be recycled.
841// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
842// * to the options which causes prepare() to not wire the pages.
843// * This is probably a bug.
844// */
845// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
846// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
847// 1 /* count */,
848// 0 /* offset */,
849// kernel_task,
850// kIODirectionInOut | kIOMemoryTypeVirtual,
851// kIOMapperSystem);
852// if (pMemDesc)
853// {
854// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
855// if (IORet == kIOReturnSuccess)
856// {
857 /* HACK ALERT! */
858 rtR0MemObjDarwinTouchPages(pv, cbSub);
859 /** @todo First, the memory should've been mapped by now, and second, it
860 * should have the wired attribute in the PTE (bit 9). Neither is
861 * seems to be the case. The disabled locking code doesn't make any
862 * difference, which is extremely odd, and breaks
863 * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
864 * lock descriptor. */
865 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
866 //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
867
868 /*
869 * Create the IPRT memory object.
870 */
871 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
872 pv, cbSub);
873 if (pMemDarwin)
874 {
875 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
876 pMemDarwin->pMemMap = pMemMap;
877// pMemDarwin->pMemDesc = pMemDesc;
878 *ppMem = &pMemDarwin->Core;
879 return VINF_SUCCESS;
880 }
881
882// pMemDesc->complete();
883// rc = VERR_NO_MEMORY;
884// }
885// else
886// rc = RTErrConvertFromDarwinIO(IORet);
887// pMemDesc->release();
888// }
889// else
890// rc = VERR_MEMOBJ_INIT_FAILED;
891 }
892 else
893 rc = VERR_ADDRESS_TOO_BIG;
894 pMemMap->release();
895 }
896 else
897 rc = VERR_MAP_FAILED;
898 }
899 return rc;
900}
901
902
903DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
904{
905 /*
906 * Check for unsupported things.
907 */
908 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
909 if (uAlignment > PAGE_SIZE)
910 return VERR_NOT_SUPPORTED;
911
912 /*
913 * Must have a memory descriptor.
914 */
915 int rc = VERR_INVALID_PARAMETER;
916 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
917 if (pMemToMapDarwin->pMemDesc)
918 {
919#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
920 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
921 0,
922 kIOMapAnywhere | kIOMapDefaultCache,
923 0 /* offset */,
924 0 /* length */);
925#else
926 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
927 0,
928 kIOMapAnywhere | kIOMapDefaultCache);
929#endif
930 if (pMemMap)
931 {
932 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
933 void *pv = (void *)(uintptr_t)VirtAddr;
934 if ((uintptr_t)pv == VirtAddr)
935 {
936 /*
937 * Create the IPRT memory object.
938 */
939 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
940 pv, pMemToMapDarwin->Core.cb);
941 if (pMemDarwin)
942 {
943 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
944 pMemDarwin->pMemMap = pMemMap;
945 *ppMem = &pMemDarwin->Core;
946 return VINF_SUCCESS;
947 }
948
949 rc = VERR_NO_MEMORY;
950 }
951 else
952 rc = VERR_ADDRESS_TOO_BIG;
953 pMemMap->release();
954 }
955 else
956 rc = VERR_MAP_FAILED;
957 }
958 return rc;
959}
960
961
962DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
963{
964 /* Get the map for the object. */
965 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
966 if (!pVmMap)
967 return VERR_NOT_SUPPORTED;
968
969 /* Convert the protection. */
970 vm_prot_t fMachProt;
971 switch (fProt)
972 {
973 case RTMEM_PROT_NONE:
974 fMachProt = VM_PROT_NONE;
975 break;
976 case RTMEM_PROT_READ:
977 fMachProt = VM_PROT_READ;
978 break;
979 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
980 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
981 break;
982 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
983 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
984 break;
985 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
986 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE;
987 break;
988 case RTMEM_PROT_EXEC:
989 fMachProt = VM_PROT_EXECUTE;
990 break;
991 default:
992 AssertFailedReturn(VERR_INVALID_PARAMETER);
993 }
994
995 /* do the job. */
996 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
997 kern_return_t krc = vm_protect(pVmMap,
998 Start,
999 cbSub,
1000 false,
1001 fMachProt);
1002 if (krc != KERN_SUCCESS)
1003 return RTErrConvertFromDarwinKern(krc);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1009{
1010 RTHCPHYS PhysAddr;
1011 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1012
1013#ifdef USE_VM_MAP_WIRE
1014 /*
1015 * Locked memory doesn't have a memory descriptor and
1016 * needs to be handled differently.
1017 */
1018 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1019 {
1020 ppnum_t PgNo;
1021 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1022 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1023 else
1024 {
1025 /*
1026 * From what I can tell, Apple seems to have locked up the all the
1027 * available interfaces that could help us obtain the pmap_t of a task
1028 * or vm_map_t.
1029
1030 * So, we'll have to figure out where in the vm_map_t structure it is
1031 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1032 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1033 * Not nice, but it will hopefully do the job in a reliable manner...
1034 *
1035 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1036 */
1037 static int s_offPmap = -1;
1038 if (RT_UNLIKELY(s_offPmap == -1))
1039 {
1040 pmap_t const *p = (pmap_t *)kernel_map;
1041 pmap_t const * const pEnd = p + 64;
1042 for (; p < pEnd; p++)
1043 if (*p == kernel_pmap)
1044 {
1045 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1046 break;
1047 }
1048 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1049 }
1050 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1051 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1052 }
1053
1054 AssertReturn(PgNo, NIL_RTHCPHYS);
1055 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1056 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1057 }
1058 else
1059#endif /* USE_VM_MAP_WIRE */
1060 {
1061 /*
1062 * Get the memory descriptor.
1063 */
1064 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1065 if (!pMemDesc)
1066 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1067 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1068
1069 /*
1070 * If we've got a memory descriptor, use getPhysicalSegment64().
1071 */
1072#ifdef __LP64__ /* Grumble! */
1073 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
1074#else
1075 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1076#endif
1077 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1078 PhysAddr = Addr;
1079 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1080 }
1081
1082 return PhysAddr;
1083}
1084
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette