VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 82890

Last change on this file since 82890 was 82886, checked in by vboxsync, 5 years ago

IPRt/memobj-r0drv-darwin.cpp: Turns out kIOMapPrefault wasn't allowed on kernel mappings till 10.13.x. bugref:9627

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 50.8 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 82886 2020-01-27 22:26:05Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/string.h>
47#include <iprt/thread.h>
48#include "internal/memobj.h"
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
55
56/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * The Darwin version of the memory object structure.
64 */
65typedef struct RTR0MEMOBJDARWIN
66{
67 /** The core structure. */
68 RTR0MEMOBJINTERNAL Core;
69 /** Pointer to the memory descriptor created for allocated and locked memory. */
70 IOMemoryDescriptor *pMemDesc;
71 /** Pointer to the memory mapping object for mapped memory. */
72 IOMemoryMap *pMemMap;
73} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
74
75
76/**
77 * Touch the pages to force the kernel to create or write-enable the page table
78 * entries.
79 *
80 * This is necessary since the kernel gets upset if we take a page fault when
81 * preemption is disabled and/or we own a simple lock (same thing). It has no
82 * problems with us disabling interrupts when taking the traps, weird stuff.
83 *
84 * (This is basically a way of invoking vm_fault on a range of pages.)
85 *
86 * @param pv Pointer to the first page.
87 * @param cb The number of bytes.
88 */
89static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
90{
91 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
92 for (;;)
93 {
94 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
95 if (cb <= PAGE_SIZE)
96 break;
97 cb -= PAGE_SIZE;
98 pu32 += PAGE_SIZE / sizeof(uint32_t);
99 }
100}
101
102
103/**
104 * Read (sniff) every page in the range to make sure there are some page tables
105 * entries backing it.
106 *
107 * This is just to be sure vm_protect didn't remove stuff without re-adding it
108 * if someone should try write-protect something.
109 *
110 * @param pv Pointer to the first page.
111 * @param cb The number of bytes.
112 */
113static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
114{
115 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
116 uint32_t volatile u32Counter = 0;
117 for (;;)
118 {
119 u32Counter += *pu32;
120
121 if (cb <= PAGE_SIZE)
122 break;
123 cb -= PAGE_SIZE;
124 pu32 += PAGE_SIZE / sizeof(uint32_t);
125 }
126}
127
128
129/**
130 * Gets the virtual memory map the specified object is mapped into.
131 *
132 * @returns VM map handle on success, NULL if no map.
133 * @param pMem The memory object.
134 */
135DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
136{
137 switch (pMem->enmType)
138 {
139 case RTR0MEMOBJTYPE_PAGE:
140 case RTR0MEMOBJTYPE_LOW:
141 case RTR0MEMOBJTYPE_CONT:
142 return kernel_map;
143
144 case RTR0MEMOBJTYPE_PHYS:
145 case RTR0MEMOBJTYPE_PHYS_NC:
146 if (pMem->pv)
147 return kernel_map;
148 return NULL;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
152 ? kernel_map
153 : get_task_map((task_t)pMem->u.Lock.R0Process);
154
155 case RTR0MEMOBJTYPE_RES_VIRT:
156 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
157 ? kernel_map
158 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
159
160 case RTR0MEMOBJTYPE_MAPPING:
161 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
162 ? kernel_map
163 : get_task_map((task_t)pMem->u.Mapping.R0Process);
164
165 default:
166 return NULL;
167 }
168}
169
170#if 0 /* not necessary after all*/
171/* My vm_map mockup. */
172struct my_vm_map
173{
174 struct { char pad[8]; } lock;
175 struct my_vm_map_header
176 {
177 struct vm_map_links
178 {
179 void *prev;
180 void *next;
181 vm_map_offset_t start;
182 vm_map_offset_t end;
183 } links;
184 int nentries;
185 boolean_t entries_pageable;
186 } hdr;
187 pmap_t pmap;
188 vm_map_size_t size;
189};
190
191
192/**
193 * Gets the minimum map address, this is similar to get_map_min.
194 *
195 * @returns The start address of the map.
196 * @param pMap The map.
197 */
198static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
199{
200 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
201 static int32_t volatile s_offAdjust = INT32_MAX;
202 int32_t off = s_offAdjust;
203 if (off == INT32_MAX)
204 {
205 for (off = 0; ; off += sizeof(pmap_t))
206 {
207 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
208 break;
209 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
210 }
211 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
212 }
213
214 /* calculate it. */
215 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
216 return pMyMap->hdr.links.start;
217}
218#endif /* unused */
219
220#ifdef RT_STRICT
221# if 0 /* unused */
222
223/**
224 * Read from a physical page.
225 *
226 * @param HCPhys The address to start reading at.
227 * @param cb How many bytes to read.
228 * @param pvDst Where to put the bytes. This is zero'd on failure.
229 */
230static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
231{
232 memset(pvDst, '\0', cb);
233
234 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
235 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
236 kIODirectionIn, NULL /*task*/);
237 if (pMemDesc)
238 {
239#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
240 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
241#else
242 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
243#endif
244 if (pMemMap)
245 {
246 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
247 memcpy(pvDst, pvSrc, cb);
248 pMemMap->release();
249 }
250 else
251 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
252
253 pMemDesc->release();
254 }
255 else
256 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
257}
258
259
260/**
261 * Gets the PTE for a page.
262 *
263 * @returns the PTE.
264 * @param pvPage The virtual address to get the PTE for.
265 */
266static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
267{
268 RTUINT64U u64;
269 RTCCUINTREG cr3 = ASMGetCR3();
270 RTCCUINTREG cr4 = ASMGetCR4();
271 bool fPAE = false;
272 bool fLMA = false;
273 if (cr4 & X86_CR4_PAE)
274 {
275 fPAE = true;
276 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
277 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
278 {
279 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
280 if (efer & MSR_K6_EFER_LMA)
281 fLMA = true;
282 }
283 }
284
285 if (fLMA)
286 {
287 /* PML4 */
288 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
289 if (!(u64.u & X86_PML4E_P))
290 {
291 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
292 return 0;
293 }
294
295 /* PDPTR */
296 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
297 if (!(u64.u & X86_PDPE_P))
298 {
299 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
300 return 0;
301 }
302 if (u64.u & X86_PDPE_LM_PS)
303 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
304
305 /* PD */
306 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
307 if (!(u64.u & X86_PDE_P))
308 {
309 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
310 return 0;
311 }
312 if (u64.u & X86_PDE_PS)
313 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
314
315 /* PT */
316 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
317 if (!(u64.u & X86_PTE_P))
318 {
319 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
320 return 0;
321 }
322 return u64.u;
323 }
324
325 if (fPAE)
326 {
327 /* PDPTR */
328 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
329 if (!(u64.u & X86_PDE_P))
330 return 0;
331
332 /* PD */
333 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
334 if (!(u64.u & X86_PDE_P))
335 return 0;
336 if (u64.u & X86_PDE_PS)
337 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
338
339 /* PT */
340 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
341 if (!(u64.u & X86_PTE_P))
342 return 0;
343 return u64.u;
344 }
345
346 /* PD */
347 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
348 if (!(u64.au32[0] & X86_PDE_P))
349 return 0;
350 if (u64.au32[0] & X86_PDE_PS)
351 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
352
353 /* PT */
354 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
355 if (!(u64.au32[0] & X86_PTE_P))
356 return 0;
357 return u64.au32[0];
358
359 return 0;
360}
361
362# endif /* unused */
363#endif /* RT_STRICT */
364
365DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
366{
367 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
368 IPRT_DARWIN_SAVE_EFL_AC();
369
370 /*
371 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
372 */
373 if (pMemDarwin->pMemDesc)
374 {
375 pMemDarwin->pMemDesc->complete();
376 pMemDarwin->pMemDesc->release();
377 pMemDarwin->pMemDesc = NULL;
378 }
379
380 if (pMemDarwin->pMemMap)
381 {
382 pMemDarwin->pMemMap->release();
383 pMemDarwin->pMemMap = NULL;
384 }
385
386 /*
387 * Release any memory that we've allocated or locked.
388 */
389 switch (pMemDarwin->Core.enmType)
390 {
391 case RTR0MEMOBJTYPE_LOW:
392 case RTR0MEMOBJTYPE_PAGE:
393 case RTR0MEMOBJTYPE_CONT:
394 break;
395
396 case RTR0MEMOBJTYPE_LOCK:
397 {
398#ifdef USE_VM_MAP_WIRE
399 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
400 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
401 : kernel_map;
402 kern_return_t kr = vm_map_unwire(Map,
403 (vm_map_offset_t)pMemDarwin->Core.pv,
404 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
405 0 /* not user */);
406 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
407#endif
408 break;
409 }
410
411 case RTR0MEMOBJTYPE_PHYS:
412 /*if (pMemDarwin->Core.u.Phys.fAllocated)
413 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
414 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
415 break;
416
417 case RTR0MEMOBJTYPE_PHYS_NC:
418 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
419 IPRT_DARWIN_RESTORE_EFL_AC();
420 return VERR_INTERNAL_ERROR;
421
422 case RTR0MEMOBJTYPE_RES_VIRT:
423 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
424 IPRT_DARWIN_RESTORE_EFL_AC();
425 return VERR_INTERNAL_ERROR;
426
427 case RTR0MEMOBJTYPE_MAPPING:
428 /* nothing to do here. */
429 break;
430
431 default:
432 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
433 IPRT_DARWIN_RESTORE_EFL_AC();
434 return VERR_INTERNAL_ERROR;
435 }
436
437 IPRT_DARWIN_RESTORE_EFL_AC();
438 return VINF_SUCCESS;
439}
440
441
442
443/**
444 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
445 *
446 * @returns IPRT status code.
447 * @retval VERR_ADDRESS_TOO_BIG try another way.
448 *
449 * @param ppMem Where to return the memory object.
450 * @param cb The page aligned memory size.
451 * @param fExecutable Whether the mapping needs to be executable.
452 * @param fContiguous Whether the backing memory needs to be contiguous.
453 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
454 * you don't care that much or is speculating.
455 * @param MaxPhysAddr The max address to verify the result against. Use
456 * UINT64_MAX if it doesn't matter.
457 * @param enmType The object type.
458 * @param uAlignment The allocation alignment (in bytes).
459 */
460static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
461 bool fExecutable, bool fContiguous,
462 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
463 RTR0MEMOBJTYPE enmType, size_t uAlignment)
464{
465 int rc;
466
467 /*
468 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
469 * actually respects the physical memory mask (10.5.x is certainly busted),
470 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
471 *
472 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
473 *
474 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
475 */
476
477 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
478 Seen allocating memory for the VM structure, last page corrupted or
479 inaccessible." Made it only apply to snow leopard and older for now. */
480 size_t cbFudged = cb;
481 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
482 { /* likely */ }
483 else
484 cbFudged += PAGE_SIZE;
485
486 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
487 if (fContiguous)
488 fOptions |= kIOMemoryPhysicallyContiguous;
489 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
490 fOptions |= kIOMemoryMapperNone;
491
492 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
493 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
494 x86 only and didn't have the alignment parameter (slot was different too). */
495 uint64_t uAlignmentActual = uAlignment;
496 IOBufferMemoryDescriptor *pMemDesc;
497#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
498 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
499 {
500 if (fContiguous || MaxPhysAddr < UINT64_MAX)
501 {
502 fOptions |= kIOMemoryPhysicallyContiguous;
503 // cannot find any evidence of this: uAlignmentActual = 1; /* PhysMask isn't respected if higher. */
504 }
505
506 pMemDesc = new IOBufferMemoryDescriptor;
507 if (pMemDesc)
508 {
509 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
510 { /* likely */ }
511 else
512 {
513 pMemDesc->release();
514 pMemDesc = NULL;
515 }
516 }
517 }
518 else
519#endif
520 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
521 if (pMemDesc)
522 {
523 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
524 if (IORet == kIOReturnSuccess)
525 {
526 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
527 if (pv)
528 {
529 /*
530 * Check if it's all below 4GB.
531 */
532 addr64_t AddrPrev = 0;
533 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
534 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
535 {
536#ifdef __LP64__
537 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
538#else
539 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
540#endif
541 if ( Addr > MaxPhysAddr
542 || !Addr
543 || (Addr & PAGE_OFFSET_MASK)
544 || ( fContiguous
545 && !off
546 && Addr == AddrPrev + PAGE_SIZE))
547 {
548 /* Buggy API, try allocate the memory another way. */
549 pMemDesc->complete();
550 pMemDesc->release();
551 if (PhysMask)
552 {
553 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
554 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
555 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
556 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
557 }
558 return VERR_ADDRESS_TOO_BIG;
559 }
560 AddrPrev = Addr;
561 }
562
563 /*
564 * Check that it's aligned correctly.
565 */
566 if ((uintptr_t)pv & (uAlignment - 1))
567 {
568 pMemDesc->complete();
569 pMemDesc->release();
570 if (PhysMask)
571 {
572 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
573 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
574 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
575 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
576 }
577 return VERR_NOT_SUPPORTED;
578 }
579
580#ifdef RT_STRICT
581 /* check that the memory is actually mapped. */
582 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
583 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
584 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
585 RTThreadPreemptDisable(&State);
586 rtR0MemObjDarwinTouchPages(pv, cb);
587 RTThreadPreemptRestore(&State);
588#endif
589
590 /*
591 * Create the IPRT memory object.
592 */
593 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
594 if (pMemDarwin)
595 {
596 if (fContiguous)
597 {
598#ifdef __LP64__
599 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
600#else
601 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
602#endif
603 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
604 if (enmType == RTR0MEMOBJTYPE_CONT)
605 pMemDarwin->Core.u.Cont.Phys = PhysBase;
606 else if (enmType == RTR0MEMOBJTYPE_PHYS)
607 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
608 else
609 AssertMsgFailed(("enmType=%d\n", enmType));
610 }
611
612#if 1 /* Experimental code. */
613 if (fExecutable)
614 {
615 rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
616# ifdef RT_STRICT
617 /* check that the memory is actually mapped. */
618 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
619 RTThreadPreemptDisable(&State2);
620 rtR0MemObjDarwinTouchPages(pv, cb);
621 RTThreadPreemptRestore(&State2);
622# endif
623
624 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
625 if ( rc == VERR_PERMISSION_DENIED
626 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
627 rc = VINF_SUCCESS;
628 }
629 else
630#endif
631 rc = VINF_SUCCESS;
632 if (RT_SUCCESS(rc))
633 {
634 pMemDarwin->pMemDesc = pMemDesc;
635 *ppMem = &pMemDarwin->Core;
636 return VINF_SUCCESS;
637 }
638
639 rtR0MemObjDelete(&pMemDarwin->Core);
640 }
641
642 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
643 rc = VERR_NO_PHYS_MEMORY;
644 else if (enmType == RTR0MEMOBJTYPE_LOW)
645 rc = VERR_NO_LOW_MEMORY;
646 else if (enmType == RTR0MEMOBJTYPE_CONT)
647 rc = VERR_NO_CONT_MEMORY;
648 else
649 rc = VERR_NO_MEMORY;
650 }
651 else
652 rc = VERR_MEMOBJ_INIT_FAILED;
653
654 pMemDesc->complete();
655 }
656 else
657 rc = RTErrConvertFromDarwinIO(IORet);
658 pMemDesc->release();
659 }
660 else
661 rc = VERR_MEMOBJ_INIT_FAILED;
662 Assert(rc != VERR_ADDRESS_TOO_BIG);
663 return rc;
664}
665
666
667DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
668{
669 IPRT_DARWIN_SAVE_EFL_AC();
670
671 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
672 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE, PAGE_SIZE);
673
674 IPRT_DARWIN_RESTORE_EFL_AC();
675 return rc;
676}
677
678
679DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
680{
681 IPRT_DARWIN_SAVE_EFL_AC();
682
683 /*
684 * Try IOMallocPhysical/IOMallocAligned first.
685 * Then try optimistically without a physical address mask, which will always
686 * end up using IOMallocAligned.
687 *
688 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
689 */
690 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
691 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
692 if (rc == VERR_ADDRESS_TOO_BIG)
693 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
694 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
695
696 IPRT_DARWIN_RESTORE_EFL_AC();
697 return rc;
698}
699
700
701DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
702{
703 IPRT_DARWIN_SAVE_EFL_AC();
704
705 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
706 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
707 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
708
709 /*
710 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
711 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
712 */
713 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
714 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
715 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
716 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
717 IPRT_DARWIN_RESTORE_EFL_AC();
718 return rc;
719}
720
721
722DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
723{
724 if (uAlignment != PAGE_SIZE)
725 {
726 /* See rtR0MemObjNativeAllocWorker: */
727 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
728 return VERR_NOT_SUPPORTED;
729 }
730
731 IPRT_DARWIN_SAVE_EFL_AC();
732
733 /*
734 * Translate the PhysHighest address into a mask.
735 */
736 int rc;
737 if (PhysHighest == NIL_RTHCPHYS)
738 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
739 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
740 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment);
741 else
742 {
743 mach_vm_address_t PhysMask = 0;
744 PhysMask = ~(mach_vm_address_t)0;
745 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
746 PhysMask >>= 1;
747 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
748 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
749
750 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
751 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS, uAlignment);
752 }
753
754 IPRT_DARWIN_RESTORE_EFL_AC();
755 return rc;
756}
757
758
759DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
760{
761 /** @todo rtR0MemObjNativeAllocPhys / darwin.
762 * This might be a bit problematic and may very well require having to create our own
763 * object which we populate with pages but without mapping it into any address space.
764 * Estimate is 2-3 days.
765 */
766 RT_NOREF(ppMem, cb, PhysHighest);
767 return VERR_NOT_SUPPORTED;
768}
769
770
771DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
772{
773 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
774 IPRT_DARWIN_SAVE_EFL_AC();
775
776 /*
777 * Create a descriptor for it (the validation is always true on intel macs, but
778 * as it doesn't harm us keep it in).
779 */
780 int rc = VERR_ADDRESS_TOO_BIG;
781 IOAddressRange aRanges[1] = { { Phys, cb } };
782 if ( aRanges[0].address == Phys
783 && aRanges[0].length == cb)
784 {
785 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
786 kIODirectionInOut, NULL /*task*/);
787 if (pMemDesc)
788 {
789#ifdef __LP64__
790 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
791#else
792 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
793#endif
794
795 /*
796 * Create the IPRT memory object.
797 */
798 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
799 if (pMemDarwin)
800 {
801 pMemDarwin->Core.u.Phys.PhysBase = Phys;
802 pMemDarwin->Core.u.Phys.fAllocated = false;
803 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
804 pMemDarwin->pMemDesc = pMemDesc;
805 *ppMem = &pMemDarwin->Core;
806 IPRT_DARWIN_RESTORE_EFL_AC();
807 return VINF_SUCCESS;
808 }
809
810 rc = VERR_NO_MEMORY;
811 pMemDesc->release();
812 }
813 else
814 rc = VERR_MEMOBJ_INIT_FAILED;
815 }
816 else
817 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
818 IPRT_DARWIN_RESTORE_EFL_AC();
819 return rc;
820}
821
822
823/**
824 * Internal worker for locking down pages.
825 *
826 * @return IPRT status code.
827 *
828 * @param ppMem Where to store the memory object pointer.
829 * @param pv First page.
830 * @param cb Number of bytes.
831 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
832 * and RTMEM_PROT_WRITE.
833 * @param Task The task \a pv and \a cb refers to.
834 */
835static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
836{
837 IPRT_DARWIN_SAVE_EFL_AC();
838 NOREF(fAccess);
839#ifdef USE_VM_MAP_WIRE
840 vm_map_t Map = get_task_map(Task);
841 Assert(Map);
842
843 /*
844 * First try lock the memory.
845 */
846 int rc = VERR_LOCK_FAILED;
847 kern_return_t kr = vm_map_wire(get_task_map(Task),
848 (vm_map_offset_t)pv,
849 (vm_map_offset_t)pv + cb,
850 VM_PROT_DEFAULT,
851 0 /* not user */);
852 if (kr == KERN_SUCCESS)
853 {
854 /*
855 * Create the IPRT memory object.
856 */
857 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
858 if (pMemDarwin)
859 {
860 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
861 *ppMem = &pMemDarwin->Core;
862
863 IPRT_DARWIN_RESTORE_EFL_AC();
864 return VINF_SUCCESS;
865 }
866
867 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
868 Assert(kr == KERN_SUCCESS);
869 rc = VERR_NO_MEMORY;
870 }
871
872#else
873
874 /*
875 * Create a descriptor and try lock it (prepare).
876 */
877 int rc = VERR_MEMOBJ_INIT_FAILED;
878 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
879 if (pMemDesc)
880 {
881 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
882 if (IORet == kIOReturnSuccess)
883 {
884 /*
885 * Create the IPRT memory object.
886 */
887 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
888 if (pMemDarwin)
889 {
890 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
891 pMemDarwin->pMemDesc = pMemDesc;
892 *ppMem = &pMemDarwin->Core;
893
894 IPRT_DARWIN_RESTORE_EFL_AC();
895 return VINF_SUCCESS;
896 }
897
898 pMemDesc->complete();
899 rc = VERR_NO_MEMORY;
900 }
901 else
902 rc = VERR_LOCK_FAILED;
903 pMemDesc->release();
904 }
905#endif
906 IPRT_DARWIN_RESTORE_EFL_AC();
907 return rc;
908}
909
910
911DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
912{
913 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
914}
915
916
917DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
918{
919 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
920}
921
922
923DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
924{
925 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
926 return VERR_NOT_SUPPORTED;
927}
928
929
930DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
931{
932 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
933 return VERR_NOT_SUPPORTED;
934}
935
936
937DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
938 unsigned fProt, size_t offSub, size_t cbSub)
939{
940 RT_NOREF(fProt);
941 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
942
943 /*
944 * Check that the specified alignment is supported.
945 */
946 if (uAlignment > PAGE_SIZE)
947 return VERR_NOT_SUPPORTED;
948 Assert(!offSub || cbSub);
949
950 IPRT_DARWIN_SAVE_EFL_AC();
951
952 /*
953 * Must have a memory descriptor that we can map.
954 */
955 int rc = VERR_INVALID_PARAMETER;
956 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
957 if (pMemToMapDarwin->pMemDesc)
958 {
959 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
960 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
961 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
962#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
963 static uint32_t volatile s_fOptions = UINT32_MAX;
964 uint32_t fOptions = s_fOptions;
965 if (RT_UNLIKELY(fOptions == UINT32_MAX))
966 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
967
968 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
969 0,
970 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
971 offSub,
972 cbSub);
973#else
974 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
975 0,
976 kIOMapAnywhere | kIOMapDefaultCache,
977 offSub,
978 cbSub);
979#endif
980 if (pMemMap)
981 {
982 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
983 void *pv = (void *)(uintptr_t)VirtAddr;
984 if ((uintptr_t)pv == VirtAddr && pv != NULL)
985 {
986//#ifdef __LP64__
987// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
988//#else
989// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
990//#endif
991// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
992
993// /*
994// * Explicitly lock it so that we're sure it is present and that
995// * its PTEs cannot be recycled.
996// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
997// * to the options which causes prepare() to not wire the pages.
998// * This is probably a bug.
999// */
1000// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1001// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1002// 1 /* count */,
1003// 0 /* offset */,
1004// kernel_task,
1005// kIODirectionInOut | kIOMemoryTypeVirtual,
1006// kIOMapperSystem);
1007// if (pMemDesc)
1008// {
1009// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1010// if (IORet == kIOReturnSuccess)
1011// {
1012 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1013 the pages here so they can safely be accessed from inside simple
1014 locks and when preemption is disabled (no page-ins allowed).
1015 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1016 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1017 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1018 /** @todo First, the memory should've been mapped by now, and second, it
1019 * should have the wired attribute in the PTE (bit 10). Neither seems to
1020 * be the case. The disabled locking code doesn't make any difference,
1021 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1022 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1023//#ifdef __LP64__
1024// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1025//#else
1026// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1027//#endif
1028// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1029
1030 /*
1031 * Create the IPRT memory object.
1032 */
1033 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1034 pv, cbSub ? cbSub : pMemToMap->cb);
1035 if (pMemDarwin)
1036 {
1037 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1038 pMemDarwin->pMemMap = pMemMap;
1039// pMemDarwin->pMemDesc = pMemDesc;
1040 *ppMem = &pMemDarwin->Core;
1041
1042 IPRT_DARWIN_RESTORE_EFL_AC();
1043 return VINF_SUCCESS;
1044 }
1045
1046// pMemDesc->complete();
1047// rc = VERR_NO_MEMORY;
1048// }
1049// else
1050// rc = RTErrConvertFromDarwinIO(IORet);
1051// pMemDesc->release();
1052// }
1053// else
1054// rc = VERR_MEMOBJ_INIT_FAILED;
1055 }
1056 else if (pv)
1057 rc = VERR_ADDRESS_TOO_BIG;
1058 else
1059 rc = VERR_MAP_FAILED;
1060 pMemMap->release();
1061 }
1062 else
1063 rc = VERR_MAP_FAILED;
1064 }
1065
1066 IPRT_DARWIN_RESTORE_EFL_AC();
1067 return rc;
1068}
1069
1070
1071DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1072 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1073{
1074 RT_NOREF(fProt);
1075
1076 /*
1077 * Check for unsupported things.
1078 */
1079 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1080 if (uAlignment > PAGE_SIZE)
1081 return VERR_NOT_SUPPORTED;
1082 Assert(!offSub || cbSub);
1083
1084 IPRT_DARWIN_SAVE_EFL_AC();
1085
1086 /*
1087 * Must have a memory descriptor.
1088 */
1089 int rc = VERR_INVALID_PARAMETER;
1090 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1091 if (pMemToMapDarwin->pMemDesc)
1092 {
1093#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1094 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1095 0,
1096 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1097 offSub,
1098 cbSub);
1099#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1100 static uint32_t volatile s_fOptions = UINT32_MAX;
1101 uint32_t fOptions = s_fOptions;
1102 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1103 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1104 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1105 0,
1106 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1107 offSub,
1108 cbSub);
1109#else
1110 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1111 0,
1112 kIOMapAnywhere | kIOMapDefaultCache,
1113 offSub,
1114 cbSub);
1115#endif
1116 if (pMemMap)
1117 {
1118 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1119 void *pv = (void *)(uintptr_t)VirtAddr;
1120 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1121 {
1122 /*
1123 * Create the IPRT memory object.
1124 */
1125 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1126 pv, cbSub ? cbSub : pMemToMap->cb);
1127 if (pMemDarwin)
1128 {
1129 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1130 pMemDarwin->pMemMap = pMemMap;
1131 *ppMem = &pMemDarwin->Core;
1132
1133 IPRT_DARWIN_RESTORE_EFL_AC();
1134 return VINF_SUCCESS;
1135 }
1136
1137 rc = VERR_NO_MEMORY;
1138 }
1139 else if (pv)
1140 rc = VERR_ADDRESS_TOO_BIG;
1141 else
1142 rc = VERR_MAP_FAILED;
1143 pMemMap->release();
1144 }
1145 else
1146 rc = VERR_MAP_FAILED;
1147 }
1148
1149 IPRT_DARWIN_RESTORE_EFL_AC();
1150 return rc;
1151}
1152
1153
1154DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1155{
1156 IPRT_DARWIN_SAVE_EFL_AC();
1157
1158 /* Get the map for the object. */
1159 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1160 if (!pVmMap)
1161 {
1162 IPRT_DARWIN_RESTORE_EFL_AC();
1163 return VERR_NOT_SUPPORTED;
1164 }
1165
1166 /*
1167 * Convert the protection.
1168 */
1169 vm_prot_t fMachProt;
1170 switch (fProt)
1171 {
1172 case RTMEM_PROT_NONE:
1173 fMachProt = VM_PROT_NONE;
1174 break;
1175 case RTMEM_PROT_READ:
1176 fMachProt = VM_PROT_READ;
1177 break;
1178 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1179 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1180 break;
1181 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1182 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1183 break;
1184 case RTMEM_PROT_WRITE:
1185 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1186 break;
1187 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1188 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1189 break;
1190 case RTMEM_PROT_EXEC:
1191 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1192 break;
1193 default:
1194 AssertFailedReturn(VERR_INVALID_PARAMETER);
1195 }
1196
1197 /*
1198 * Do the job.
1199 */
1200 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1201 kern_return_t krc = vm_protect(pVmMap,
1202 Start,
1203 cbSub,
1204 false,
1205 fMachProt);
1206 if (krc != KERN_SUCCESS)
1207 {
1208 static int s_cComplaints = 0;
1209 if (s_cComplaints < 10)
1210 {
1211 s_cComplaints++;
1212 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1213 pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1214
1215 kern_return_t krc2;
1216 vm_offset_t pvReal = Start;
1217 vm_size_t cbReal = 0;
1218 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1219 struct vm_region_basic_info Info;
1220 RT_ZERO(Info);
1221 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1222 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1223 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1224 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1225 }
1226 IPRT_DARWIN_RESTORE_EFL_AC();
1227 return RTErrConvertFromDarwinKern(krc);
1228 }
1229
1230 /*
1231 * Touch the pages if they should be writable afterwards and accessible
1232 * from code which should never fault. vm_protect() may leave pages
1233 * temporarily write protected, possibly due to pmap no-upgrade rules?
1234 *
1235 * This is the same trick (or HACK ALERT if you like) as applied in
1236 * rtR0MemObjNativeMapKernel.
1237 */
1238 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1239 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1240 {
1241 if (fProt & RTMEM_PROT_WRITE)
1242 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1243 /*
1244 * Sniff (read) read-only pages too, just to be sure.
1245 */
1246 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1247 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1248 }
1249
1250 IPRT_DARWIN_RESTORE_EFL_AC();
1251 return VINF_SUCCESS;
1252}
1253
1254
1255DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1256{
1257 RTHCPHYS PhysAddr;
1258 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1259 IPRT_DARWIN_SAVE_EFL_AC();
1260
1261#ifdef USE_VM_MAP_WIRE
1262 /*
1263 * Locked memory doesn't have a memory descriptor and
1264 * needs to be handled differently.
1265 */
1266 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1267 {
1268 ppnum_t PgNo;
1269 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1270 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1271 else
1272 {
1273 /*
1274 * From what I can tell, Apple seems to have locked up the all the
1275 * available interfaces that could help us obtain the pmap_t of a task
1276 * or vm_map_t.
1277
1278 * So, we'll have to figure out where in the vm_map_t structure it is
1279 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1280 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1281 * Not nice, but it will hopefully do the job in a reliable manner...
1282 *
1283 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1284 */
1285 static int s_offPmap = -1;
1286 if (RT_UNLIKELY(s_offPmap == -1))
1287 {
1288 pmap_t const *p = (pmap_t *)kernel_map;
1289 pmap_t const * const pEnd = p + 64;
1290 for (; p < pEnd; p++)
1291 if (*p == kernel_pmap)
1292 {
1293 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1294 break;
1295 }
1296 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1297 }
1298 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1299 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1300 }
1301
1302 IPRT_DARWIN_RESTORE_EFL_AC();
1303 AssertReturn(PgNo, NIL_RTHCPHYS);
1304 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1305 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1306 }
1307 else
1308#endif /* USE_VM_MAP_WIRE */
1309 {
1310 /*
1311 * Get the memory descriptor.
1312 */
1313 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1314 if (!pMemDesc)
1315 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1316 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1317
1318 /*
1319 * If we've got a memory descriptor, use getPhysicalSegment64().
1320 */
1321#ifdef __LP64__
1322 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1323#else
1324 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1325#endif
1326 IPRT_DARWIN_RESTORE_EFL_AC();
1327 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1328 PhysAddr = Addr;
1329 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1330 }
1331
1332 return PhysAddr;
1333}
1334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette