VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 82868

Last change on this file since 82868 was 82868, checked in by vboxsync, 5 years ago

IPRT/memobj-r0drv-darwin.cpp: Added support for alignments other than PAGE_SIZE in rtR0MemObjNativeAllocPhys. This allows for large page allocations. bugref:5324

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.0 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 82868 2020-01-27 10:25:43Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/string.h>
47#include <iprt/thread.h>
48#include "internal/memobj.h"
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
55
56/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * The Darwin version of the memory object structure.
64 */
65typedef struct RTR0MEMOBJDARWIN
66{
67 /** The core structure. */
68 RTR0MEMOBJINTERNAL Core;
69 /** Pointer to the memory descriptor created for allocated and locked memory. */
70 IOMemoryDescriptor *pMemDesc;
71 /** Pointer to the memory mapping object for mapped memory. */
72 IOMemoryMap *pMemMap;
73} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
74
75
76/**
77 * Touch the pages to force the kernel to create or write-enable the page table
78 * entries.
79 *
80 * This is necessary since the kernel gets upset if we take a page fault when
81 * preemption is disabled and/or we own a simple lock (same thing). It has no
82 * problems with us disabling interrupts when taking the traps, weird stuff.
83 *
84 * (This is basically a way of invoking vm_fault on a range of pages.)
85 *
86 * @param pv Pointer to the first page.
87 * @param cb The number of bytes.
88 */
89static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
90{
91 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
92 for (;;)
93 {
94 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
95 if (cb <= PAGE_SIZE)
96 break;
97 cb -= PAGE_SIZE;
98 pu32 += PAGE_SIZE / sizeof(uint32_t);
99 }
100}
101
102
103/**
104 * Read (sniff) every page in the range to make sure there are some page tables
105 * entries backing it.
106 *
107 * This is just to be sure vm_protect didn't remove stuff without re-adding it
108 * if someone should try write-protect something.
109 *
110 * @param pv Pointer to the first page.
111 * @param cb The number of bytes.
112 */
113static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
114{
115 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
116 uint32_t volatile u32Counter = 0;
117 for (;;)
118 {
119 u32Counter += *pu32;
120
121 if (cb <= PAGE_SIZE)
122 break;
123 cb -= PAGE_SIZE;
124 pu32 += PAGE_SIZE / sizeof(uint32_t);
125 }
126}
127
128
129/**
130 * Gets the virtual memory map the specified object is mapped into.
131 *
132 * @returns VM map handle on success, NULL if no map.
133 * @param pMem The memory object.
134 */
135DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
136{
137 switch (pMem->enmType)
138 {
139 case RTR0MEMOBJTYPE_PAGE:
140 case RTR0MEMOBJTYPE_LOW:
141 case RTR0MEMOBJTYPE_CONT:
142 return kernel_map;
143
144 case RTR0MEMOBJTYPE_PHYS:
145 case RTR0MEMOBJTYPE_PHYS_NC:
146 if (pMem->pv)
147 return kernel_map;
148 return NULL;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
152 ? kernel_map
153 : get_task_map((task_t)pMem->u.Lock.R0Process);
154
155 case RTR0MEMOBJTYPE_RES_VIRT:
156 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
157 ? kernel_map
158 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
159
160 case RTR0MEMOBJTYPE_MAPPING:
161 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
162 ? kernel_map
163 : get_task_map((task_t)pMem->u.Mapping.R0Process);
164
165 default:
166 return NULL;
167 }
168}
169
170#if 0 /* not necessary after all*/
171/* My vm_map mockup. */
172struct my_vm_map
173{
174 struct { char pad[8]; } lock;
175 struct my_vm_map_header
176 {
177 struct vm_map_links
178 {
179 void *prev;
180 void *next;
181 vm_map_offset_t start;
182 vm_map_offset_t end;
183 } links;
184 int nentries;
185 boolean_t entries_pageable;
186 } hdr;
187 pmap_t pmap;
188 vm_map_size_t size;
189};
190
191
192/**
193 * Gets the minimum map address, this is similar to get_map_min.
194 *
195 * @returns The start address of the map.
196 * @param pMap The map.
197 */
198static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
199{
200 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
201 static int32_t volatile s_offAdjust = INT32_MAX;
202 int32_t off = s_offAdjust;
203 if (off == INT32_MAX)
204 {
205 for (off = 0; ; off += sizeof(pmap_t))
206 {
207 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
208 break;
209 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
210 }
211 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
212 }
213
214 /* calculate it. */
215 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
216 return pMyMap->hdr.links.start;
217}
218#endif /* unused */
219
220#ifdef RT_STRICT
221# if 0 /* unused */
222
223/**
224 * Read from a physical page.
225 *
226 * @param HCPhys The address to start reading at.
227 * @param cb How many bytes to read.
228 * @param pvDst Where to put the bytes. This is zero'd on failure.
229 */
230static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
231{
232 memset(pvDst, '\0', cb);
233
234 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
235 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
236 kIODirectionIn, NULL /*task*/);
237 if (pMemDesc)
238 {
239#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
240 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
241#else
242 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
243#endif
244 if (pMemMap)
245 {
246 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
247 memcpy(pvDst, pvSrc, cb);
248 pMemMap->release();
249 }
250 else
251 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
252
253 pMemDesc->release();
254 }
255 else
256 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
257}
258
259
260/**
261 * Gets the PTE for a page.
262 *
263 * @returns the PTE.
264 * @param pvPage The virtual address to get the PTE for.
265 */
266static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
267{
268 RTUINT64U u64;
269 RTCCUINTREG cr3 = ASMGetCR3();
270 RTCCUINTREG cr4 = ASMGetCR4();
271 bool fPAE = false;
272 bool fLMA = false;
273 if (cr4 & X86_CR4_PAE)
274 {
275 fPAE = true;
276 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
277 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
278 {
279 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
280 if (efer & MSR_K6_EFER_LMA)
281 fLMA = true;
282 }
283 }
284
285 if (fLMA)
286 {
287 /* PML4 */
288 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
289 if (!(u64.u & X86_PML4E_P))
290 {
291 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
292 return 0;
293 }
294
295 /* PDPTR */
296 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
297 if (!(u64.u & X86_PDPE_P))
298 {
299 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
300 return 0;
301 }
302 if (u64.u & X86_PDPE_LM_PS)
303 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
304
305 /* PD */
306 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
307 if (!(u64.u & X86_PDE_P))
308 {
309 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
310 return 0;
311 }
312 if (u64.u & X86_PDE_PS)
313 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
314
315 /* PT */
316 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
317 if (!(u64.u & X86_PTE_P))
318 {
319 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
320 return 0;
321 }
322 return u64.u;
323 }
324
325 if (fPAE)
326 {
327 /* PDPTR */
328 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
329 if (!(u64.u & X86_PDE_P))
330 return 0;
331
332 /* PD */
333 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
334 if (!(u64.u & X86_PDE_P))
335 return 0;
336 if (u64.u & X86_PDE_PS)
337 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
338
339 /* PT */
340 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
341 if (!(u64.u & X86_PTE_P))
342 return 0;
343 return u64.u;
344 }
345
346 /* PD */
347 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
348 if (!(u64.au32[0] & X86_PDE_P))
349 return 0;
350 if (u64.au32[0] & X86_PDE_PS)
351 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
352
353 /* PT */
354 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
355 if (!(u64.au32[0] & X86_PTE_P))
356 return 0;
357 return u64.au32[0];
358
359 return 0;
360}
361
362# endif /* unused */
363#endif /* RT_STRICT */
364
365DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
366{
367 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
368 IPRT_DARWIN_SAVE_EFL_AC();
369
370 /*
371 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
372 */
373 if (pMemDarwin->pMemDesc)
374 {
375 pMemDarwin->pMemDesc->complete();
376 pMemDarwin->pMemDesc->release();
377 pMemDarwin->pMemDesc = NULL;
378 }
379
380 if (pMemDarwin->pMemMap)
381 {
382 pMemDarwin->pMemMap->release();
383 pMemDarwin->pMemMap = NULL;
384 }
385
386 /*
387 * Release any memory that we've allocated or locked.
388 */
389 switch (pMemDarwin->Core.enmType)
390 {
391 case RTR0MEMOBJTYPE_LOW:
392 case RTR0MEMOBJTYPE_PAGE:
393 case RTR0MEMOBJTYPE_CONT:
394 break;
395
396 case RTR0MEMOBJTYPE_LOCK:
397 {
398#ifdef USE_VM_MAP_WIRE
399 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
400 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
401 : kernel_map;
402 kern_return_t kr = vm_map_unwire(Map,
403 (vm_map_offset_t)pMemDarwin->Core.pv,
404 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
405 0 /* not user */);
406 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
407#endif
408 break;
409 }
410
411 case RTR0MEMOBJTYPE_PHYS:
412 /*if (pMemDarwin->Core.u.Phys.fAllocated)
413 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
414 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
415 break;
416
417 case RTR0MEMOBJTYPE_PHYS_NC:
418 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
419 IPRT_DARWIN_RESTORE_EFL_AC();
420 return VERR_INTERNAL_ERROR;
421
422 case RTR0MEMOBJTYPE_RES_VIRT:
423 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
424 IPRT_DARWIN_RESTORE_EFL_AC();
425 return VERR_INTERNAL_ERROR;
426
427 case RTR0MEMOBJTYPE_MAPPING:
428 /* nothing to do here. */
429 break;
430
431 default:
432 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
433 IPRT_DARWIN_RESTORE_EFL_AC();
434 return VERR_INTERNAL_ERROR;
435 }
436
437 IPRT_DARWIN_RESTORE_EFL_AC();
438 return VINF_SUCCESS;
439}
440
441
442
443/**
444 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
445 *
446 * @returns IPRT status code.
447 * @retval VERR_ADDRESS_TOO_BIG try another way.
448 *
449 * @param ppMem Where to return the memory object.
450 * @param cb The page aligned memory size.
451 * @param fExecutable Whether the mapping needs to be executable.
452 * @param fContiguous Whether the backing memory needs to be contiguous.
453 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
454 * you don't care that much or is speculating.
455 * @param MaxPhysAddr The max address to verify the result against. Use
456 * UINT64_MAX if it doesn't matter.
457 * @param enmType The object type.
458 * @param uAlignment The allocation alignment (in bytes).
459 */
460static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
461 bool fExecutable, bool fContiguous,
462 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
463 RTR0MEMOBJTYPE enmType, size_t uAlignment)
464{
465 RT_NOREF_PV(uAlignment);
466 int rc;
467
468 /*
469 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
470 * actually respects the physical memory mask (10.5.x is certainly busted),
471 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
472 *
473 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
474 *
475 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
476 */
477 size_t cbFudged = cb;
478 if (1) /** @todo Figure out why this is broken. Is it only on snow leopard? Seen allocating memory for the VM structure, last page corrupted or inaccessible. */
479 cbFudged += PAGE_SIZE;
480
481 uint64_t uAlignmentActual = uAlignment;
482
483 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
484 if (fContiguous)
485 fOptions |= kIOMemoryPhysicallyContiguous;
486 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
487 fOptions |= kIOMemoryMapperNone;
488
489 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
490 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
491 x86 only and didn't have the alignment parameter (slot was different too). */
492 IOBufferMemoryDescriptor *pMemDesc;
493#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
494 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
495 {
496 if (fContiguous || MaxPhysAddr < UINT64_MAX)
497 {
498 fOptions |= kIOMemoryPhysicallyContiguous;
499 // cannot find any evidence of this: uAlignmentActual = 1; /* PhysMask isn't respected if higher. */
500 }
501
502 pMemDesc = new IOBufferMemoryDescriptor;
503 if (pMemDesc)
504 {
505 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
506 { /* likely */ }
507 else
508 {
509 pMemDesc->release();
510 pMemDesc = NULL;
511 }
512 }
513 }
514 else
515#endif
516 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
517 if (pMemDesc)
518 {
519 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
520 if (IORet == kIOReturnSuccess)
521 {
522 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
523 if (pv)
524 {
525 /*
526 * Check if it's all below 4GB.
527 */
528 addr64_t AddrPrev = 0;
529 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
530 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
531 {
532#ifdef __LP64__
533 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
534#else
535 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
536#endif
537 if ( Addr > MaxPhysAddr
538 || !Addr
539 || (Addr & PAGE_OFFSET_MASK)
540 || ( fContiguous
541 && !off
542 && Addr == AddrPrev + PAGE_SIZE))
543 {
544 /* Buggy API, try allocate the memory another way. */
545 pMemDesc->complete();
546 pMemDesc->release();
547 if (PhysMask)
548 {
549 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
550 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
551 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
552 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
553 }
554 return VERR_ADDRESS_TOO_BIG;
555 }
556 AddrPrev = Addr;
557 }
558
559 /*
560 * Check that it's aligned correctly.
561 */
562 if ((uintptr_t)pv & (uAlignment - 1))
563 {
564 pMemDesc->complete();
565 pMemDesc->release();
566 if (PhysMask)
567 {
568 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
569 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
570 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
571 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
572 }
573 return VERR_NOT_SUPPORTED;
574 }
575
576#ifdef RT_STRICT
577 /* check that the memory is actually mapped. */
578 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
579 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
580 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
581 RTThreadPreemptDisable(&State);
582 rtR0MemObjDarwinTouchPages(pv, cb);
583 RTThreadPreemptRestore(&State);
584#endif
585
586 /*
587 * Create the IPRT memory object.
588 */
589 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
590 if (pMemDarwin)
591 {
592 if (fContiguous)
593 {
594#ifdef __LP64__
595 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
596#else
597 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
598#endif
599 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
600 if (enmType == RTR0MEMOBJTYPE_CONT)
601 pMemDarwin->Core.u.Cont.Phys = PhysBase;
602 else if (enmType == RTR0MEMOBJTYPE_PHYS)
603 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
604 else
605 AssertMsgFailed(("enmType=%d\n", enmType));
606 }
607
608#if 1 /* Experimental code. */
609 if (fExecutable)
610 {
611 rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
612# ifdef RT_STRICT
613 /* check that the memory is actually mapped. */
614 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
615 RTThreadPreemptDisable(&State2);
616 rtR0MemObjDarwinTouchPages(pv, cb);
617 RTThreadPreemptRestore(&State2);
618# endif
619
620 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
621 if ( rc == VERR_PERMISSION_DENIED
622 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
623 rc = VINF_SUCCESS;
624 }
625 else
626#endif
627 rc = VINF_SUCCESS;
628 if (RT_SUCCESS(rc))
629 {
630 pMemDarwin->pMemDesc = pMemDesc;
631 *ppMem = &pMemDarwin->Core;
632 return VINF_SUCCESS;
633 }
634
635 rtR0MemObjDelete(&pMemDarwin->Core);
636 }
637
638 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
639 rc = VERR_NO_PHYS_MEMORY;
640 else if (enmType == RTR0MEMOBJTYPE_LOW)
641 rc = VERR_NO_LOW_MEMORY;
642 else if (enmType == RTR0MEMOBJTYPE_CONT)
643 rc = VERR_NO_CONT_MEMORY;
644 else
645 rc = VERR_NO_MEMORY;
646 }
647 else
648 rc = VERR_MEMOBJ_INIT_FAILED;
649
650 pMemDesc->complete();
651 }
652 else
653 rc = RTErrConvertFromDarwinIO(IORet);
654 pMemDesc->release();
655 }
656 else
657 rc = VERR_MEMOBJ_INIT_FAILED;
658 Assert(rc != VERR_ADDRESS_TOO_BIG);
659 return rc;
660}
661
662
663DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
664{
665 IPRT_DARWIN_SAVE_EFL_AC();
666
667 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
668 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE, PAGE_SIZE);
669
670 IPRT_DARWIN_RESTORE_EFL_AC();
671 return rc;
672}
673
674
675DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
676{
677 IPRT_DARWIN_SAVE_EFL_AC();
678
679 /*
680 * Try IOMallocPhysical/IOMallocAligned first.
681 * Then try optimistically without a physical address mask, which will always
682 * end up using IOMallocAligned.
683 *
684 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
685 */
686 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
687 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
688 if (rc == VERR_ADDRESS_TOO_BIG)
689 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
690 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
691
692 IPRT_DARWIN_RESTORE_EFL_AC();
693 return rc;
694}
695
696
697DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
698{
699 IPRT_DARWIN_SAVE_EFL_AC();
700
701 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
702 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
703 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
704
705 /*
706 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
707 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
708 */
709 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
710 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
711 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
712 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
713 IPRT_DARWIN_RESTORE_EFL_AC();
714 return rc;
715}
716
717
718DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
719{
720 if (uAlignment != PAGE_SIZE)
721 {
722 /* See rtR0MemObjNativeAllocWorker: */
723 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
724 return VERR_NOT_SUPPORTED;
725 }
726
727 IPRT_DARWIN_SAVE_EFL_AC();
728
729 /*
730 * Translate the PhysHighest address into a mask.
731 */
732 int rc;
733 if (PhysHighest == NIL_RTHCPHYS)
734 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
735 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
736 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment);
737 else
738 {
739 mach_vm_address_t PhysMask = 0;
740 PhysMask = ~(mach_vm_address_t)0;
741 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
742 PhysMask >>= 1;
743 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
744 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
745
746 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
747 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS, uAlignment);
748 }
749
750 IPRT_DARWIN_RESTORE_EFL_AC();
751 return rc;
752}
753
754
755DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
756{
757 /** @todo rtR0MemObjNativeAllocPhys / darwin.
758 * This might be a bit problematic and may very well require having to create our own
759 * object which we populate with pages but without mapping it into any address space.
760 * Estimate is 2-3 days.
761 */
762 RT_NOREF(ppMem, cb, PhysHighest);
763 return VERR_NOT_SUPPORTED;
764}
765
766
767DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
768{
769 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
770 IPRT_DARWIN_SAVE_EFL_AC();
771
772 /*
773 * Create a descriptor for it (the validation is always true on intel macs, but
774 * as it doesn't harm us keep it in).
775 */
776 int rc = VERR_ADDRESS_TOO_BIG;
777 IOAddressRange aRanges[1] = { { Phys, cb } };
778 if ( aRanges[0].address == Phys
779 && aRanges[0].length == cb)
780 {
781 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
782 kIODirectionInOut, NULL /*task*/);
783 if (pMemDesc)
784 {
785#ifdef __LP64__
786 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
787#else
788 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
789#endif
790
791 /*
792 * Create the IPRT memory object.
793 */
794 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
795 if (pMemDarwin)
796 {
797 pMemDarwin->Core.u.Phys.PhysBase = Phys;
798 pMemDarwin->Core.u.Phys.fAllocated = false;
799 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
800 pMemDarwin->pMemDesc = pMemDesc;
801 *ppMem = &pMemDarwin->Core;
802 IPRT_DARWIN_RESTORE_EFL_AC();
803 return VINF_SUCCESS;
804 }
805
806 rc = VERR_NO_MEMORY;
807 pMemDesc->release();
808 }
809 else
810 rc = VERR_MEMOBJ_INIT_FAILED;
811 }
812 else
813 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
814 IPRT_DARWIN_RESTORE_EFL_AC();
815 return rc;
816}
817
818
819/**
820 * Internal worker for locking down pages.
821 *
822 * @return IPRT status code.
823 *
824 * @param ppMem Where to store the memory object pointer.
825 * @param pv First page.
826 * @param cb Number of bytes.
827 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
828 * and RTMEM_PROT_WRITE.
829 * @param Task The task \a pv and \a cb refers to.
830 */
831static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
832{
833 IPRT_DARWIN_SAVE_EFL_AC();
834 NOREF(fAccess);
835#ifdef USE_VM_MAP_WIRE
836 vm_map_t Map = get_task_map(Task);
837 Assert(Map);
838
839 /*
840 * First try lock the memory.
841 */
842 int rc = VERR_LOCK_FAILED;
843 kern_return_t kr = vm_map_wire(get_task_map(Task),
844 (vm_map_offset_t)pv,
845 (vm_map_offset_t)pv + cb,
846 VM_PROT_DEFAULT,
847 0 /* not user */);
848 if (kr == KERN_SUCCESS)
849 {
850 /*
851 * Create the IPRT memory object.
852 */
853 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
854 if (pMemDarwin)
855 {
856 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
857 *ppMem = &pMemDarwin->Core;
858
859 IPRT_DARWIN_RESTORE_EFL_AC();
860 return VINF_SUCCESS;
861 }
862
863 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
864 Assert(kr == KERN_SUCCESS);
865 rc = VERR_NO_MEMORY;
866 }
867
868#else
869
870 /*
871 * Create a descriptor and try lock it (prepare).
872 */
873 int rc = VERR_MEMOBJ_INIT_FAILED;
874 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
875 if (pMemDesc)
876 {
877 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
878 if (IORet == kIOReturnSuccess)
879 {
880 /*
881 * Create the IPRT memory object.
882 */
883 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
884 if (pMemDarwin)
885 {
886 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
887 pMemDarwin->pMemDesc = pMemDesc;
888 *ppMem = &pMemDarwin->Core;
889
890 IPRT_DARWIN_RESTORE_EFL_AC();
891 return VINF_SUCCESS;
892 }
893
894 pMemDesc->complete();
895 rc = VERR_NO_MEMORY;
896 }
897 else
898 rc = VERR_LOCK_FAILED;
899 pMemDesc->release();
900 }
901#endif
902 IPRT_DARWIN_RESTORE_EFL_AC();
903 return rc;
904}
905
906
907DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
908{
909 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
910}
911
912
913DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
914{
915 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
916}
917
918
919DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
920{
921 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
922 return VERR_NOT_SUPPORTED;
923}
924
925
926DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
927{
928 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
929 return VERR_NOT_SUPPORTED;
930}
931
932
933DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
934 unsigned fProt, size_t offSub, size_t cbSub)
935{
936 RT_NOREF(fProt);
937 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
938
939 /*
940 * Check that the specified alignment is supported.
941 */
942 if (uAlignment > PAGE_SIZE)
943 return VERR_NOT_SUPPORTED;
944 Assert(!offSub || cbSub);
945
946 IPRT_DARWIN_SAVE_EFL_AC();
947
948 /*
949 * Must have a memory descriptor that we can map.
950 */
951 int rc = VERR_INVALID_PARAMETER;
952 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
953 if (pMemToMapDarwin->pMemDesc)
954 {
955 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
956 INTEL_PTE_WIRED to be set, just like we desire (see further down). */
957#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000
958 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
959 0,
960 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
961 offSub,
962 cbSub);
963#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
964 static uint32_t volatile s_fOptions = UINT32_MAX;
965 uint32_t fOptions = s_fOptions;
966 if (RT_UNLIKELY(fOptions == UINT32_MAX))
967 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
968 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
969 0,
970 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
971 offSub,
972 cbSub);
973#else
974 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
975 0,
976 kIOMapAnywhere | kIOMapDefaultCache,
977 offSub,
978 cbSub);
979#endif
980 if (pMemMap)
981 {
982 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
983 void *pv = (void *)(uintptr_t)VirtAddr;
984 if ((uintptr_t)pv == VirtAddr && pv != NULL)
985 {
986//#ifdef __LP64__
987// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
988//#else
989// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
990//#endif
991// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
992
993// /*
994// * Explicitly lock it so that we're sure it is present and that
995// * its PTEs cannot be recycled.
996// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
997// * to the options which causes prepare() to not wire the pages.
998// * This is probably a bug.
999// */
1000// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1001// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1002// 1 /* count */,
1003// 0 /* offset */,
1004// kernel_task,
1005// kIODirectionInOut | kIOMemoryTypeVirtual,
1006// kIOMapperSystem);
1007// if (pMemDesc)
1008// {
1009// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1010// if (IORet == kIOReturnSuccess)
1011// {
1012 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1013 the pages here so they can safely be accessed from inside simple
1014 locks and when preemption is disabled (no page-ins allowed).
1015 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1016 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1017 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1018 /** @todo First, the memory should've been mapped by now, and second, it
1019 * should have the wired attribute in the PTE (bit 10). Neither seems to
1020 * be the case. The disabled locking code doesn't make any difference,
1021 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1022 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1023//#ifdef __LP64__
1024// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1025//#else
1026// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1027//#endif
1028// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1029
1030 /*
1031 * Create the IPRT memory object.
1032 */
1033 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1034 pv, cbSub ? cbSub : pMemToMap->cb);
1035 if (pMemDarwin)
1036 {
1037 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1038 pMemDarwin->pMemMap = pMemMap;
1039// pMemDarwin->pMemDesc = pMemDesc;
1040 *ppMem = &pMemDarwin->Core;
1041
1042 IPRT_DARWIN_RESTORE_EFL_AC();
1043 return VINF_SUCCESS;
1044 }
1045
1046// pMemDesc->complete();
1047// rc = VERR_NO_MEMORY;
1048// }
1049// else
1050// rc = RTErrConvertFromDarwinIO(IORet);
1051// pMemDesc->release();
1052// }
1053// else
1054// rc = VERR_MEMOBJ_INIT_FAILED;
1055 }
1056 else if (pv)
1057 rc = VERR_ADDRESS_TOO_BIG;
1058 else
1059 rc = VERR_MAP_FAILED;
1060 pMemMap->release();
1061 }
1062 else
1063 rc = VERR_MAP_FAILED;
1064 }
1065
1066 IPRT_DARWIN_RESTORE_EFL_AC();
1067 return rc;
1068}
1069
1070
1071DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1072 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1073{
1074 RT_NOREF(fProt);
1075
1076 /*
1077 * Check for unsupported things.
1078 */
1079 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1080 if (uAlignment > PAGE_SIZE)
1081 return VERR_NOT_SUPPORTED;
1082 Assert(!offSub || cbSub);
1083
1084 IPRT_DARWIN_SAVE_EFL_AC();
1085
1086 /*
1087 * Must have a memory descriptor.
1088 */
1089 int rc = VERR_INVALID_PARAMETER;
1090 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1091 if (pMemToMapDarwin->pMemDesc)
1092 {
1093#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1094 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1095 0,
1096 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1097 offSub,
1098 cbSub);
1099#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1100 static uint32_t volatile s_fOptions = UINT32_MAX;
1101 uint32_t fOptions = s_fOptions;
1102 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1103 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1104 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1105 0,
1106 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1107 offSub,
1108 cbSub);
1109#else
1110 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1111 0,
1112 kIOMapAnywhere | kIOMapDefaultCache,
1113 offSub,
1114 cbSub);
1115#endif
1116 if (pMemMap)
1117 {
1118 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1119 void *pv = (void *)(uintptr_t)VirtAddr;
1120 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1121 {
1122 /*
1123 * Create the IPRT memory object.
1124 */
1125 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1126 pv, cbSub ? cbSub : pMemToMap->cb);
1127 if (pMemDarwin)
1128 {
1129 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1130 pMemDarwin->pMemMap = pMemMap;
1131 *ppMem = &pMemDarwin->Core;
1132
1133 IPRT_DARWIN_RESTORE_EFL_AC();
1134 return VINF_SUCCESS;
1135 }
1136
1137 rc = VERR_NO_MEMORY;
1138 }
1139 else if (pv)
1140 rc = VERR_ADDRESS_TOO_BIG;
1141 else
1142 rc = VERR_MAP_FAILED;
1143 pMemMap->release();
1144 }
1145 else
1146 rc = VERR_MAP_FAILED;
1147 }
1148
1149 IPRT_DARWIN_RESTORE_EFL_AC();
1150 return rc;
1151}
1152
1153
1154DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1155{
1156 IPRT_DARWIN_SAVE_EFL_AC();
1157
1158 /* Get the map for the object. */
1159 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1160 if (!pVmMap)
1161 {
1162 IPRT_DARWIN_RESTORE_EFL_AC();
1163 return VERR_NOT_SUPPORTED;
1164 }
1165
1166 /*
1167 * Convert the protection.
1168 */
1169 vm_prot_t fMachProt;
1170 switch (fProt)
1171 {
1172 case RTMEM_PROT_NONE:
1173 fMachProt = VM_PROT_NONE;
1174 break;
1175 case RTMEM_PROT_READ:
1176 fMachProt = VM_PROT_READ;
1177 break;
1178 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1179 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1180 break;
1181 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1182 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1183 break;
1184 case RTMEM_PROT_WRITE:
1185 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1186 break;
1187 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1188 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1189 break;
1190 case RTMEM_PROT_EXEC:
1191 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1192 break;
1193 default:
1194 AssertFailedReturn(VERR_INVALID_PARAMETER);
1195 }
1196
1197 /*
1198 * Do the job.
1199 */
1200 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1201 kern_return_t krc = vm_protect(pVmMap,
1202 Start,
1203 cbSub,
1204 false,
1205 fMachProt);
1206 if (krc != KERN_SUCCESS)
1207 {
1208 static int s_cComplaints = 0;
1209 if (s_cComplaints < 10)
1210 {
1211 s_cComplaints++;
1212 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1213 pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1214
1215 kern_return_t krc2;
1216 vm_offset_t pvReal = Start;
1217 vm_size_t cbReal = 0;
1218 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1219 struct vm_region_basic_info Info;
1220 RT_ZERO(Info);
1221 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1222 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1223 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1224 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1225 }
1226 IPRT_DARWIN_RESTORE_EFL_AC();
1227 return RTErrConvertFromDarwinKern(krc);
1228 }
1229
1230 /*
1231 * Touch the pages if they should be writable afterwards and accessible
1232 * from code which should never fault. vm_protect() may leave pages
1233 * temporarily write protected, possibly due to pmap no-upgrade rules?
1234 *
1235 * This is the same trick (or HACK ALERT if you like) as applied in
1236 * rtR0MemObjNativeMapKernel.
1237 */
1238 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1239 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1240 {
1241 if (fProt & RTMEM_PROT_WRITE)
1242 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1243 /*
1244 * Sniff (read) read-only pages too, just to be sure.
1245 */
1246 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1247 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1248 }
1249
1250 IPRT_DARWIN_RESTORE_EFL_AC();
1251 return VINF_SUCCESS;
1252}
1253
1254
1255DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1256{
1257 RTHCPHYS PhysAddr;
1258 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1259 IPRT_DARWIN_SAVE_EFL_AC();
1260
1261#ifdef USE_VM_MAP_WIRE
1262 /*
1263 * Locked memory doesn't have a memory descriptor and
1264 * needs to be handled differently.
1265 */
1266 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1267 {
1268 ppnum_t PgNo;
1269 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1270 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1271 else
1272 {
1273 /*
1274 * From what I can tell, Apple seems to have locked up the all the
1275 * available interfaces that could help us obtain the pmap_t of a task
1276 * or vm_map_t.
1277
1278 * So, we'll have to figure out where in the vm_map_t structure it is
1279 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1280 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1281 * Not nice, but it will hopefully do the job in a reliable manner...
1282 *
1283 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1284 */
1285 static int s_offPmap = -1;
1286 if (RT_UNLIKELY(s_offPmap == -1))
1287 {
1288 pmap_t const *p = (pmap_t *)kernel_map;
1289 pmap_t const * const pEnd = p + 64;
1290 for (; p < pEnd; p++)
1291 if (*p == kernel_pmap)
1292 {
1293 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1294 break;
1295 }
1296 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1297 }
1298 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1299 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1300 }
1301
1302 IPRT_DARWIN_RESTORE_EFL_AC();
1303 AssertReturn(PgNo, NIL_RTHCPHYS);
1304 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1305 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1306 }
1307 else
1308#endif /* USE_VM_MAP_WIRE */
1309 {
1310 /*
1311 * Get the memory descriptor.
1312 */
1313 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1314 if (!pMemDesc)
1315 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1316 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1317
1318 /*
1319 * If we've got a memory descriptor, use getPhysicalSegment64().
1320 */
1321#ifdef __LP64__
1322 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1323#else
1324 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1325#endif
1326 IPRT_DARWIN_RESTORE_EFL_AC();
1327 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1328 PhysAddr = Addr;
1329 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1330 }
1331
1332 return PhysAddr;
1333}
1334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette