VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 82898

Last change on this file since 82898 was 82898, checked in by vboxsync, 5 years ago

IPRT/memobj-r0drv-darwin.cpp: Don't set the kIOMemoryPhysicallyContiguous flag when MaxPhysAddr != UINT64MAX, the mask works, though only if alignment is <= 1 (checked sources, must've misread them the other day/night). This should fix the trouble we're having on the testboxes at present. Also added the kIOMemoryHostPhysicallyContiguous flag just in case the xnu code changes. bugref:9627

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.2 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 82898 2020-01-29 00:56:30Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/string.h>
47#include <iprt/thread.h>
48#include "internal/memobj.h"
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
55
56/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * The Darwin version of the memory object structure.
64 */
65typedef struct RTR0MEMOBJDARWIN
66{
67 /** The core structure. */
68 RTR0MEMOBJINTERNAL Core;
69 /** Pointer to the memory descriptor created for allocated and locked memory. */
70 IOMemoryDescriptor *pMemDesc;
71 /** Pointer to the memory mapping object for mapped memory. */
72 IOMemoryMap *pMemMap;
73} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
74
75
76/**
77 * Touch the pages to force the kernel to create or write-enable the page table
78 * entries.
79 *
80 * This is necessary since the kernel gets upset if we take a page fault when
81 * preemption is disabled and/or we own a simple lock (same thing). It has no
82 * problems with us disabling interrupts when taking the traps, weird stuff.
83 *
84 * (This is basically a way of invoking vm_fault on a range of pages.)
85 *
86 * @param pv Pointer to the first page.
87 * @param cb The number of bytes.
88 */
89static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
90{
91 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
92 for (;;)
93 {
94 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
95 if (cb <= PAGE_SIZE)
96 break;
97 cb -= PAGE_SIZE;
98 pu32 += PAGE_SIZE / sizeof(uint32_t);
99 }
100}
101
102
103/**
104 * Read (sniff) every page in the range to make sure there are some page tables
105 * entries backing it.
106 *
107 * This is just to be sure vm_protect didn't remove stuff without re-adding it
108 * if someone should try write-protect something.
109 *
110 * @param pv Pointer to the first page.
111 * @param cb The number of bytes.
112 */
113static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
114{
115 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
116 uint32_t volatile u32Counter = 0;
117 for (;;)
118 {
119 u32Counter += *pu32;
120
121 if (cb <= PAGE_SIZE)
122 break;
123 cb -= PAGE_SIZE;
124 pu32 += PAGE_SIZE / sizeof(uint32_t);
125 }
126}
127
128
129/**
130 * Gets the virtual memory map the specified object is mapped into.
131 *
132 * @returns VM map handle on success, NULL if no map.
133 * @param pMem The memory object.
134 */
135DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
136{
137 switch (pMem->enmType)
138 {
139 case RTR0MEMOBJTYPE_PAGE:
140 case RTR0MEMOBJTYPE_LOW:
141 case RTR0MEMOBJTYPE_CONT:
142 return kernel_map;
143
144 case RTR0MEMOBJTYPE_PHYS:
145 case RTR0MEMOBJTYPE_PHYS_NC:
146 if (pMem->pv)
147 return kernel_map;
148 return NULL;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
152 ? kernel_map
153 : get_task_map((task_t)pMem->u.Lock.R0Process);
154
155 case RTR0MEMOBJTYPE_RES_VIRT:
156 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
157 ? kernel_map
158 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
159
160 case RTR0MEMOBJTYPE_MAPPING:
161 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
162 ? kernel_map
163 : get_task_map((task_t)pMem->u.Mapping.R0Process);
164
165 default:
166 return NULL;
167 }
168}
169
170#if 0 /* not necessary after all*/
171/* My vm_map mockup. */
172struct my_vm_map
173{
174 struct { char pad[8]; } lock;
175 struct my_vm_map_header
176 {
177 struct vm_map_links
178 {
179 void *prev;
180 void *next;
181 vm_map_offset_t start;
182 vm_map_offset_t end;
183 } links;
184 int nentries;
185 boolean_t entries_pageable;
186 } hdr;
187 pmap_t pmap;
188 vm_map_size_t size;
189};
190
191
192/**
193 * Gets the minimum map address, this is similar to get_map_min.
194 *
195 * @returns The start address of the map.
196 * @param pMap The map.
197 */
198static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
199{
200 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
201 static int32_t volatile s_offAdjust = INT32_MAX;
202 int32_t off = s_offAdjust;
203 if (off == INT32_MAX)
204 {
205 for (off = 0; ; off += sizeof(pmap_t))
206 {
207 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
208 break;
209 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
210 }
211 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
212 }
213
214 /* calculate it. */
215 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
216 return pMyMap->hdr.links.start;
217}
218#endif /* unused */
219
220#ifdef RT_STRICT
221# if 0 /* unused */
222
223/**
224 * Read from a physical page.
225 *
226 * @param HCPhys The address to start reading at.
227 * @param cb How many bytes to read.
228 * @param pvDst Where to put the bytes. This is zero'd on failure.
229 */
230static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
231{
232 memset(pvDst, '\0', cb);
233
234 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
235 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
236 kIODirectionIn, NULL /*task*/);
237 if (pMemDesc)
238 {
239#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
240 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
241#else
242 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
243#endif
244 if (pMemMap)
245 {
246 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
247 memcpy(pvDst, pvSrc, cb);
248 pMemMap->release();
249 }
250 else
251 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
252
253 pMemDesc->release();
254 }
255 else
256 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
257}
258
259
260/**
261 * Gets the PTE for a page.
262 *
263 * @returns the PTE.
264 * @param pvPage The virtual address to get the PTE for.
265 */
266static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
267{
268 RTUINT64U u64;
269 RTCCUINTREG cr3 = ASMGetCR3();
270 RTCCUINTREG cr4 = ASMGetCR4();
271 bool fPAE = false;
272 bool fLMA = false;
273 if (cr4 & X86_CR4_PAE)
274 {
275 fPAE = true;
276 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
277 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
278 {
279 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
280 if (efer & MSR_K6_EFER_LMA)
281 fLMA = true;
282 }
283 }
284
285 if (fLMA)
286 {
287 /* PML4 */
288 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
289 if (!(u64.u & X86_PML4E_P))
290 {
291 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
292 return 0;
293 }
294
295 /* PDPTR */
296 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
297 if (!(u64.u & X86_PDPE_P))
298 {
299 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
300 return 0;
301 }
302 if (u64.u & X86_PDPE_LM_PS)
303 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
304
305 /* PD */
306 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
307 if (!(u64.u & X86_PDE_P))
308 {
309 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
310 return 0;
311 }
312 if (u64.u & X86_PDE_PS)
313 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
314
315 /* PT */
316 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
317 if (!(u64.u & X86_PTE_P))
318 {
319 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
320 return 0;
321 }
322 return u64.u;
323 }
324
325 if (fPAE)
326 {
327 /* PDPTR */
328 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
329 if (!(u64.u & X86_PDE_P))
330 return 0;
331
332 /* PD */
333 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
334 if (!(u64.u & X86_PDE_P))
335 return 0;
336 if (u64.u & X86_PDE_PS)
337 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
338
339 /* PT */
340 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
341 if (!(u64.u & X86_PTE_P))
342 return 0;
343 return u64.u;
344 }
345
346 /* PD */
347 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
348 if (!(u64.au32[0] & X86_PDE_P))
349 return 0;
350 if (u64.au32[0] & X86_PDE_PS)
351 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
352
353 /* PT */
354 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
355 if (!(u64.au32[0] & X86_PTE_P))
356 return 0;
357 return u64.au32[0];
358
359 return 0;
360}
361
362# endif /* unused */
363#endif /* RT_STRICT */
364
365DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
366{
367 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
368 IPRT_DARWIN_SAVE_EFL_AC();
369
370 /*
371 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
372 */
373 if (pMemDarwin->pMemDesc)
374 {
375 pMemDarwin->pMemDesc->complete();
376 pMemDarwin->pMemDesc->release();
377 pMemDarwin->pMemDesc = NULL;
378 }
379
380 if (pMemDarwin->pMemMap)
381 {
382 pMemDarwin->pMemMap->release();
383 pMemDarwin->pMemMap = NULL;
384 }
385
386 /*
387 * Release any memory that we've allocated or locked.
388 */
389 switch (pMemDarwin->Core.enmType)
390 {
391 case RTR0MEMOBJTYPE_LOW:
392 case RTR0MEMOBJTYPE_PAGE:
393 case RTR0MEMOBJTYPE_CONT:
394 break;
395
396 case RTR0MEMOBJTYPE_LOCK:
397 {
398#ifdef USE_VM_MAP_WIRE
399 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
400 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
401 : kernel_map;
402 kern_return_t kr = vm_map_unwire(Map,
403 (vm_map_offset_t)pMemDarwin->Core.pv,
404 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
405 0 /* not user */);
406 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
407#endif
408 break;
409 }
410
411 case RTR0MEMOBJTYPE_PHYS:
412 /*if (pMemDarwin->Core.u.Phys.fAllocated)
413 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
414 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
415 break;
416
417 case RTR0MEMOBJTYPE_PHYS_NC:
418 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
419 IPRT_DARWIN_RESTORE_EFL_AC();
420 return VERR_INTERNAL_ERROR;
421
422 case RTR0MEMOBJTYPE_RES_VIRT:
423 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
424 IPRT_DARWIN_RESTORE_EFL_AC();
425 return VERR_INTERNAL_ERROR;
426
427 case RTR0MEMOBJTYPE_MAPPING:
428 /* nothing to do here. */
429 break;
430
431 default:
432 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
433 IPRT_DARWIN_RESTORE_EFL_AC();
434 return VERR_INTERNAL_ERROR;
435 }
436
437 IPRT_DARWIN_RESTORE_EFL_AC();
438 return VINF_SUCCESS;
439}
440
441
442
443/**
444 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
445 *
446 * @returns IPRT status code.
447 * @retval VERR_ADDRESS_TOO_BIG try another way.
448 *
449 * @param ppMem Where to return the memory object.
450 * @param cb The page aligned memory size.
451 * @param fExecutable Whether the mapping needs to be executable.
452 * @param fContiguous Whether the backing memory needs to be contiguous.
453 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
454 * you don't care that much or is speculating.
455 * @param MaxPhysAddr The max address to verify the result against. Use
456 * UINT64_MAX if it doesn't matter.
457 * @param enmType The object type.
458 * @param uAlignment The allocation alignment (in bytes).
459 */
460static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
461 bool fExecutable, bool fContiguous,
462 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
463 RTR0MEMOBJTYPE enmType, size_t uAlignment)
464{
465 int rc;
466
467 /*
468 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
469 * actually respects the physical memory mask (10.5.x is certainly busted),
470 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
471 *
472 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
473 *
474 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
475 */
476
477 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
478 Seen allocating memory for the VM structure, last page corrupted or
479 inaccessible." Made it only apply to snow leopard and older for now. */
480 size_t cbFudged = cb;
481 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
482 { /* likely */ }
483 else
484 cbFudged += PAGE_SIZE;
485
486 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
487 if (fContiguous)
488 {
489 fOptions |= kIOMemoryPhysicallyContiguous;
490 if ( version_major > 12
491 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
492 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
493 }
494 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
495 fOptions |= kIOMemoryMapperNone;
496
497 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
498 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
499 x86 only and didn't have the alignment parameter (slot was different too). */
500 uint64_t uAlignmentActual = uAlignment;
501 IOBufferMemoryDescriptor *pMemDesc;
502#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
503 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
504 {
505 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
506 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
507 be used and the alignment inferred from the PhysMask argument. */
508 if (MaxPhysAddr != UINT64_MAX)
509 {
510 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
511 uAlignmentActual = 1;
512 }
513
514 pMemDesc = new IOBufferMemoryDescriptor;
515 if (pMemDesc)
516 {
517 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
518 { /* likely */ }
519 else
520 {
521 pMemDesc->release();
522 pMemDesc = NULL;
523 }
524 }
525 }
526 else
527#endif
528 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
529 if (pMemDesc)
530 {
531 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
532 if (IORet == kIOReturnSuccess)
533 {
534 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
535 if (pv)
536 {
537 /*
538 * Check if it's all below 4GB.
539 */
540 addr64_t AddrPrev = 0;
541 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
542 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
543 {
544#ifdef __LP64__
545 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
546#else
547 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
548#endif
549 if ( Addr > MaxPhysAddr
550 || !Addr
551 || (Addr & PAGE_OFFSET_MASK)
552 || ( fContiguous
553 && !off
554 && Addr == AddrPrev + PAGE_SIZE))
555 {
556 /* Buggy API, try allocate the memory another way. */
557 pMemDesc->complete();
558 pMemDesc->release();
559 if (PhysMask)
560 {
561 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
562 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
563 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
564 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
565 }
566 return VERR_ADDRESS_TOO_BIG;
567 }
568 AddrPrev = Addr;
569 }
570
571 /*
572 * Check that it's aligned correctly.
573 */
574 if ((uintptr_t)pv & (uAlignment - 1))
575 {
576 pMemDesc->complete();
577 pMemDesc->release();
578 if (PhysMask)
579 {
580 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
581 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
582 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
583 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
584 }
585 return VERR_NOT_SUPPORTED;
586 }
587
588#ifdef RT_STRICT
589 /* check that the memory is actually mapped. */
590 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
591 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
592 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
593 RTThreadPreemptDisable(&State);
594 rtR0MemObjDarwinTouchPages(pv, cb);
595 RTThreadPreemptRestore(&State);
596#endif
597
598 /*
599 * Create the IPRT memory object.
600 */
601 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
602 if (pMemDarwin)
603 {
604 if (fContiguous)
605 {
606#ifdef __LP64__
607 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
608#else
609 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
610#endif
611 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
612 if (enmType == RTR0MEMOBJTYPE_CONT)
613 pMemDarwin->Core.u.Cont.Phys = PhysBase;
614 else if (enmType == RTR0MEMOBJTYPE_PHYS)
615 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
616 else
617 AssertMsgFailed(("enmType=%d\n", enmType));
618 }
619
620#if 1 /* Experimental code. */
621 if (fExecutable)
622 {
623 rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
624# ifdef RT_STRICT
625 /* check that the memory is actually mapped. */
626 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
627 RTThreadPreemptDisable(&State2);
628 rtR0MemObjDarwinTouchPages(pv, cb);
629 RTThreadPreemptRestore(&State2);
630# endif
631
632 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
633 if ( rc == VERR_PERMISSION_DENIED
634 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
635 rc = VINF_SUCCESS;
636 }
637 else
638#endif
639 rc = VINF_SUCCESS;
640 if (RT_SUCCESS(rc))
641 {
642 pMemDarwin->pMemDesc = pMemDesc;
643 *ppMem = &pMemDarwin->Core;
644 return VINF_SUCCESS;
645 }
646
647 rtR0MemObjDelete(&pMemDarwin->Core);
648 }
649
650 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
651 rc = VERR_NO_PHYS_MEMORY;
652 else if (enmType == RTR0MEMOBJTYPE_LOW)
653 rc = VERR_NO_LOW_MEMORY;
654 else if (enmType == RTR0MEMOBJTYPE_CONT)
655 rc = VERR_NO_CONT_MEMORY;
656 else
657 rc = VERR_NO_MEMORY;
658 }
659 else
660 rc = VERR_MEMOBJ_INIT_FAILED;
661
662 pMemDesc->complete();
663 }
664 else
665 rc = RTErrConvertFromDarwinIO(IORet);
666 pMemDesc->release();
667 }
668 else
669 rc = VERR_MEMOBJ_INIT_FAILED;
670 Assert(rc != VERR_ADDRESS_TOO_BIG);
671 return rc;
672}
673
674
675DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
676{
677 IPRT_DARWIN_SAVE_EFL_AC();
678
679 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
680 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE, PAGE_SIZE);
681
682 IPRT_DARWIN_RESTORE_EFL_AC();
683 return rc;
684}
685
686
687DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
688{
689 IPRT_DARWIN_SAVE_EFL_AC();
690
691 /*
692 * Try IOMallocPhysical/IOMallocAligned first.
693 * Then try optimistically without a physical address mask, which will always
694 * end up using IOMallocAligned.
695 *
696 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
697 */
698 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
699 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
700 if (rc == VERR_ADDRESS_TOO_BIG)
701 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
702 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE);
703
704 IPRT_DARWIN_RESTORE_EFL_AC();
705 return rc;
706}
707
708
709DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
710{
711 IPRT_DARWIN_SAVE_EFL_AC();
712
713 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
714 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
715 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
716
717 /*
718 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
719 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
720 */
721 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
722 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
723 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
724 RTR0MEMOBJTYPE_CONT, PAGE_SIZE);
725 IPRT_DARWIN_RESTORE_EFL_AC();
726 return rc;
727}
728
729
730DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
731{
732 if (uAlignment != PAGE_SIZE)
733 {
734 /* See rtR0MemObjNativeAllocWorker: */
735 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
736 return VERR_NOT_SUPPORTED;
737 }
738
739 IPRT_DARWIN_SAVE_EFL_AC();
740
741 /*
742 * Translate the PhysHighest address into a mask.
743 */
744 int rc;
745 if (PhysHighest == NIL_RTHCPHYS)
746 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
747 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
748 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment);
749 else
750 {
751 mach_vm_address_t PhysMask = 0;
752 PhysMask = ~(mach_vm_address_t)0;
753 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
754 PhysMask >>= 1;
755 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
756 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
757
758 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
759 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS, uAlignment);
760 }
761
762 IPRT_DARWIN_RESTORE_EFL_AC();
763 return rc;
764}
765
766
767DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
768{
769 /** @todo rtR0MemObjNativeAllocPhys / darwin.
770 * This might be a bit problematic and may very well require having to create our own
771 * object which we populate with pages but without mapping it into any address space.
772 * Estimate is 2-3 days.
773 */
774 RT_NOREF(ppMem, cb, PhysHighest);
775 return VERR_NOT_SUPPORTED;
776}
777
778
779DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
780{
781 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
782 IPRT_DARWIN_SAVE_EFL_AC();
783
784 /*
785 * Create a descriptor for it (the validation is always true on intel macs, but
786 * as it doesn't harm us keep it in).
787 */
788 int rc = VERR_ADDRESS_TOO_BIG;
789 IOAddressRange aRanges[1] = { { Phys, cb } };
790 if ( aRanges[0].address == Phys
791 && aRanges[0].length == cb)
792 {
793 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
794 kIODirectionInOut, NULL /*task*/);
795 if (pMemDesc)
796 {
797#ifdef __LP64__
798 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
799#else
800 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
801#endif
802
803 /*
804 * Create the IPRT memory object.
805 */
806 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
807 if (pMemDarwin)
808 {
809 pMemDarwin->Core.u.Phys.PhysBase = Phys;
810 pMemDarwin->Core.u.Phys.fAllocated = false;
811 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
812 pMemDarwin->pMemDesc = pMemDesc;
813 *ppMem = &pMemDarwin->Core;
814 IPRT_DARWIN_RESTORE_EFL_AC();
815 return VINF_SUCCESS;
816 }
817
818 rc = VERR_NO_MEMORY;
819 pMemDesc->release();
820 }
821 else
822 rc = VERR_MEMOBJ_INIT_FAILED;
823 }
824 else
825 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
826 IPRT_DARWIN_RESTORE_EFL_AC();
827 return rc;
828}
829
830
831/**
832 * Internal worker for locking down pages.
833 *
834 * @return IPRT status code.
835 *
836 * @param ppMem Where to store the memory object pointer.
837 * @param pv First page.
838 * @param cb Number of bytes.
839 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
840 * and RTMEM_PROT_WRITE.
841 * @param Task The task \a pv and \a cb refers to.
842 */
843static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
844{
845 IPRT_DARWIN_SAVE_EFL_AC();
846 NOREF(fAccess);
847#ifdef USE_VM_MAP_WIRE
848 vm_map_t Map = get_task_map(Task);
849 Assert(Map);
850
851 /*
852 * First try lock the memory.
853 */
854 int rc = VERR_LOCK_FAILED;
855 kern_return_t kr = vm_map_wire(get_task_map(Task),
856 (vm_map_offset_t)pv,
857 (vm_map_offset_t)pv + cb,
858 VM_PROT_DEFAULT,
859 0 /* not user */);
860 if (kr == KERN_SUCCESS)
861 {
862 /*
863 * Create the IPRT memory object.
864 */
865 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
866 if (pMemDarwin)
867 {
868 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
869 *ppMem = &pMemDarwin->Core;
870
871 IPRT_DARWIN_RESTORE_EFL_AC();
872 return VINF_SUCCESS;
873 }
874
875 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
876 Assert(kr == KERN_SUCCESS);
877 rc = VERR_NO_MEMORY;
878 }
879
880#else
881
882 /*
883 * Create a descriptor and try lock it (prepare).
884 */
885 int rc = VERR_MEMOBJ_INIT_FAILED;
886 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
887 if (pMemDesc)
888 {
889 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
890 if (IORet == kIOReturnSuccess)
891 {
892 /*
893 * Create the IPRT memory object.
894 */
895 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
896 if (pMemDarwin)
897 {
898 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
899 pMemDarwin->pMemDesc = pMemDesc;
900 *ppMem = &pMemDarwin->Core;
901
902 IPRT_DARWIN_RESTORE_EFL_AC();
903 return VINF_SUCCESS;
904 }
905
906 pMemDesc->complete();
907 rc = VERR_NO_MEMORY;
908 }
909 else
910 rc = VERR_LOCK_FAILED;
911 pMemDesc->release();
912 }
913#endif
914 IPRT_DARWIN_RESTORE_EFL_AC();
915 return rc;
916}
917
918
919DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
920{
921 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
922}
923
924
925DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
926{
927 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
928}
929
930
931DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
932{
933 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
934 return VERR_NOT_SUPPORTED;
935}
936
937
938DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
939{
940 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
941 return VERR_NOT_SUPPORTED;
942}
943
944
945DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
946 unsigned fProt, size_t offSub, size_t cbSub)
947{
948 RT_NOREF(fProt);
949 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
950
951 /*
952 * Check that the specified alignment is supported.
953 */
954 if (uAlignment > PAGE_SIZE)
955 return VERR_NOT_SUPPORTED;
956 Assert(!offSub || cbSub);
957
958 IPRT_DARWIN_SAVE_EFL_AC();
959
960 /*
961 * Must have a memory descriptor that we can map.
962 */
963 int rc = VERR_INVALID_PARAMETER;
964 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
965 if (pMemToMapDarwin->pMemDesc)
966 {
967 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
968 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
969 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
970#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
971 static uint32_t volatile s_fOptions = UINT32_MAX;
972 uint32_t fOptions = s_fOptions;
973 if (RT_UNLIKELY(fOptions == UINT32_MAX))
974 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
975
976 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
977 0,
978 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
979 offSub,
980 cbSub);
981#else
982 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
983 0,
984 kIOMapAnywhere | kIOMapDefaultCache,
985 offSub,
986 cbSub);
987#endif
988 if (pMemMap)
989 {
990 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
991 void *pv = (void *)(uintptr_t)VirtAddr;
992 if ((uintptr_t)pv == VirtAddr && pv != NULL)
993 {
994//#ifdef __LP64__
995// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
996//#else
997// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
998//#endif
999// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1000
1001// /*
1002// * Explicitly lock it so that we're sure it is present and that
1003// * its PTEs cannot be recycled.
1004// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1005// * to the options which causes prepare() to not wire the pages.
1006// * This is probably a bug.
1007// */
1008// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1009// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1010// 1 /* count */,
1011// 0 /* offset */,
1012// kernel_task,
1013// kIODirectionInOut | kIOMemoryTypeVirtual,
1014// kIOMapperSystem);
1015// if (pMemDesc)
1016// {
1017// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1018// if (IORet == kIOReturnSuccess)
1019// {
1020 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1021 the pages here so they can safely be accessed from inside simple
1022 locks and when preemption is disabled (no page-ins allowed).
1023 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1024 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1025 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1026 /** @todo First, the memory should've been mapped by now, and second, it
1027 * should have the wired attribute in the PTE (bit 10). Neither seems to
1028 * be the case. The disabled locking code doesn't make any difference,
1029 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1030 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1031//#ifdef __LP64__
1032// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1033//#else
1034// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1035//#endif
1036// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1037
1038 /*
1039 * Create the IPRT memory object.
1040 */
1041 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1042 pv, cbSub ? cbSub : pMemToMap->cb);
1043 if (pMemDarwin)
1044 {
1045 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1046 pMemDarwin->pMemMap = pMemMap;
1047// pMemDarwin->pMemDesc = pMemDesc;
1048 *ppMem = &pMemDarwin->Core;
1049
1050 IPRT_DARWIN_RESTORE_EFL_AC();
1051 return VINF_SUCCESS;
1052 }
1053
1054// pMemDesc->complete();
1055// rc = VERR_NO_MEMORY;
1056// }
1057// else
1058// rc = RTErrConvertFromDarwinIO(IORet);
1059// pMemDesc->release();
1060// }
1061// else
1062// rc = VERR_MEMOBJ_INIT_FAILED;
1063 }
1064 else if (pv)
1065 rc = VERR_ADDRESS_TOO_BIG;
1066 else
1067 rc = VERR_MAP_FAILED;
1068 pMemMap->release();
1069 }
1070 else
1071 rc = VERR_MAP_FAILED;
1072 }
1073
1074 IPRT_DARWIN_RESTORE_EFL_AC();
1075 return rc;
1076}
1077
1078
1079DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1080 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1081{
1082 RT_NOREF(fProt);
1083
1084 /*
1085 * Check for unsupported things.
1086 */
1087 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1088 if (uAlignment > PAGE_SIZE)
1089 return VERR_NOT_SUPPORTED;
1090 Assert(!offSub || cbSub);
1091
1092 IPRT_DARWIN_SAVE_EFL_AC();
1093
1094 /*
1095 * Must have a memory descriptor.
1096 */
1097 int rc = VERR_INVALID_PARAMETER;
1098 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1099 if (pMemToMapDarwin->pMemDesc)
1100 {
1101#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1102 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1103 0,
1104 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1105 offSub,
1106 cbSub);
1107#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1108 static uint32_t volatile s_fOptions = UINT32_MAX;
1109 uint32_t fOptions = s_fOptions;
1110 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1111 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1112 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1113 0,
1114 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1115 offSub,
1116 cbSub);
1117#else
1118 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1119 0,
1120 kIOMapAnywhere | kIOMapDefaultCache,
1121 offSub,
1122 cbSub);
1123#endif
1124 if (pMemMap)
1125 {
1126 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1127 void *pv = (void *)(uintptr_t)VirtAddr;
1128 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1129 {
1130 /*
1131 * Create the IPRT memory object.
1132 */
1133 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1134 pv, cbSub ? cbSub : pMemToMap->cb);
1135 if (pMemDarwin)
1136 {
1137 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1138 pMemDarwin->pMemMap = pMemMap;
1139 *ppMem = &pMemDarwin->Core;
1140
1141 IPRT_DARWIN_RESTORE_EFL_AC();
1142 return VINF_SUCCESS;
1143 }
1144
1145 rc = VERR_NO_MEMORY;
1146 }
1147 else if (pv)
1148 rc = VERR_ADDRESS_TOO_BIG;
1149 else
1150 rc = VERR_MAP_FAILED;
1151 pMemMap->release();
1152 }
1153 else
1154 rc = VERR_MAP_FAILED;
1155 }
1156
1157 IPRT_DARWIN_RESTORE_EFL_AC();
1158 return rc;
1159}
1160
1161
1162DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1163{
1164 IPRT_DARWIN_SAVE_EFL_AC();
1165
1166 /* Get the map for the object. */
1167 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1168 if (!pVmMap)
1169 {
1170 IPRT_DARWIN_RESTORE_EFL_AC();
1171 return VERR_NOT_SUPPORTED;
1172 }
1173
1174 /*
1175 * Convert the protection.
1176 */
1177 vm_prot_t fMachProt;
1178 switch (fProt)
1179 {
1180 case RTMEM_PROT_NONE:
1181 fMachProt = VM_PROT_NONE;
1182 break;
1183 case RTMEM_PROT_READ:
1184 fMachProt = VM_PROT_READ;
1185 break;
1186 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1187 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1188 break;
1189 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1190 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1191 break;
1192 case RTMEM_PROT_WRITE:
1193 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1194 break;
1195 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1196 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1197 break;
1198 case RTMEM_PROT_EXEC:
1199 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1200 break;
1201 default:
1202 AssertFailedReturn(VERR_INVALID_PARAMETER);
1203 }
1204
1205 /*
1206 * Do the job.
1207 */
1208 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1209 kern_return_t krc = vm_protect(pVmMap,
1210 Start,
1211 cbSub,
1212 false,
1213 fMachProt);
1214 if (krc != KERN_SUCCESS)
1215 {
1216 static int s_cComplaints = 0;
1217 if (s_cComplaints < 10)
1218 {
1219 s_cComplaints++;
1220 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1221 pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1222
1223 kern_return_t krc2;
1224 vm_offset_t pvReal = Start;
1225 vm_size_t cbReal = 0;
1226 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1227 struct vm_region_basic_info Info;
1228 RT_ZERO(Info);
1229 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1230 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1231 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1232 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1233 }
1234 IPRT_DARWIN_RESTORE_EFL_AC();
1235 return RTErrConvertFromDarwinKern(krc);
1236 }
1237
1238 /*
1239 * Touch the pages if they should be writable afterwards and accessible
1240 * from code which should never fault. vm_protect() may leave pages
1241 * temporarily write protected, possibly due to pmap no-upgrade rules?
1242 *
1243 * This is the same trick (or HACK ALERT if you like) as applied in
1244 * rtR0MemObjNativeMapKernel.
1245 */
1246 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1247 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1248 {
1249 if (fProt & RTMEM_PROT_WRITE)
1250 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1251 /*
1252 * Sniff (read) read-only pages too, just to be sure.
1253 */
1254 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1255 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1256 }
1257
1258 IPRT_DARWIN_RESTORE_EFL_AC();
1259 return VINF_SUCCESS;
1260}
1261
1262
1263DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1264{
1265 RTHCPHYS PhysAddr;
1266 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1267 IPRT_DARWIN_SAVE_EFL_AC();
1268
1269#ifdef USE_VM_MAP_WIRE
1270 /*
1271 * Locked memory doesn't have a memory descriptor and
1272 * needs to be handled differently.
1273 */
1274 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1275 {
1276 ppnum_t PgNo;
1277 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1278 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1279 else
1280 {
1281 /*
1282 * From what I can tell, Apple seems to have locked up the all the
1283 * available interfaces that could help us obtain the pmap_t of a task
1284 * or vm_map_t.
1285
1286 * So, we'll have to figure out where in the vm_map_t structure it is
1287 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1288 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1289 * Not nice, but it will hopefully do the job in a reliable manner...
1290 *
1291 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1292 */
1293 static int s_offPmap = -1;
1294 if (RT_UNLIKELY(s_offPmap == -1))
1295 {
1296 pmap_t const *p = (pmap_t *)kernel_map;
1297 pmap_t const * const pEnd = p + 64;
1298 for (; p < pEnd; p++)
1299 if (*p == kernel_pmap)
1300 {
1301 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1302 break;
1303 }
1304 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1305 }
1306 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1307 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1308 }
1309
1310 IPRT_DARWIN_RESTORE_EFL_AC();
1311 AssertReturn(PgNo, NIL_RTHCPHYS);
1312 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1313 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1314 }
1315 else
1316#endif /* USE_VM_MAP_WIRE */
1317 {
1318 /*
1319 * Get the memory descriptor.
1320 */
1321 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1322 if (!pMemDesc)
1323 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1324 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1325
1326 /*
1327 * If we've got a memory descriptor, use getPhysicalSegment64().
1328 */
1329#ifdef __LP64__
1330 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1331#else
1332 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1333#endif
1334 IPRT_DARWIN_RESTORE_EFL_AC();
1335 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1336 PhysAddr = Addr;
1337 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1338 }
1339
1340 return PhysAddr;
1341}
1342
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette