VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 91482

Last change on this file since 91482 was 91482, checked in by vboxsync, 3 years ago

IPRT/memobj: Passing pszTag around...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 60.1 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 91482 2021-09-30 00:12:26Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include "internal/memobj.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
56
57/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The Darwin version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJDARWIN
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Pointer to the memory descriptor created for allocated and locked memory. */
71 IOMemoryDescriptor *pMemDesc;
72 /** Pointer to the memory mapping object for mapped memory. */
73 IOMemoryMap *pMemMap;
74} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
75
76/**
77 * Common thread_call_allocate/thread_call_enter argument package.
78 */
79typedef struct RTR0MEMOBJDARWINTHREADARGS
80{
81 int32_t volatile rc;
82 RTSEMEVENTMULTI hEvent;
83} RTR0MEMOBJDARWINTHREADARGS;
84
85
86/**
87 * Arguments for rtR0MemObjNativeAllockWorkOnKernelThread.
88 */
89typedef struct RTR0MEMOBJDARWINALLOCARGS
90{
91 RTR0MEMOBJDARWINTHREADARGS Core;
92 PPRTR0MEMOBJINTERNAL ppMem;
93 size_t cb;
94 bool fExecutable;
95 bool fContiguous;
96 mach_vm_address_t PhysMask;
97 uint64_t MaxPhysAddr;
98 RTR0MEMOBJTYPE enmType;
99 size_t uAlignment;
100 const char *pszTag;
101} RTR0MEMOBJDARWINALLOCARGS;
102
103/**
104 * Arguments for rtR0MemObjNativeProtectWorkOnKernelThread.
105 */
106typedef struct RTR0MEMOBJDARWINPROTECTARGS
107{
108 RTR0MEMOBJDARWINTHREADARGS Core;
109 PRTR0MEMOBJINTERNAL pMem;
110 size_t offSub;
111 size_t cbSub;
112 uint32_t fProt;
113} RTR0MEMOBJDARWINPROTECTARGS;
114
115
116/*********************************************************************************************************************************
117* Internal Functions *
118*********************************************************************************************************************************/
119static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1);
120static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt);
121static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1);
122
123
124/**
125 * Touch the pages to force the kernel to create or write-enable the page table
126 * entries.
127 *
128 * This is necessary since the kernel gets upset if we take a page fault when
129 * preemption is disabled and/or we own a simple lock (same thing). It has no
130 * problems with us disabling interrupts when taking the traps, weird stuff.
131 *
132 * (This is basically a way of invoking vm_fault on a range of pages.)
133 *
134 * @param pv Pointer to the first page.
135 * @param cb The number of bytes.
136 */
137static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
138{
139 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
140 for (;;)
141 {
142 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
143 if (cb <= PAGE_SIZE)
144 break;
145 cb -= PAGE_SIZE;
146 pu32 += PAGE_SIZE / sizeof(uint32_t);
147 }
148}
149
150
151/**
152 * Read (sniff) every page in the range to make sure there are some page tables
153 * entries backing it.
154 *
155 * This is just to be sure vm_protect didn't remove stuff without re-adding it
156 * if someone should try write-protect something.
157 *
158 * @param pv Pointer to the first page.
159 * @param cb The number of bytes.
160 */
161static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
162{
163 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
164 uint32_t volatile u32Counter = 0;
165 for (;;)
166 {
167 u32Counter += *pu32;
168
169 if (cb <= PAGE_SIZE)
170 break;
171 cb -= PAGE_SIZE;
172 pu32 += PAGE_SIZE / sizeof(uint32_t);
173 }
174}
175
176
177/**
178 * Gets the virtual memory map the specified object is mapped into.
179 *
180 * @returns VM map handle on success, NULL if no map.
181 * @param pMem The memory object.
182 */
183DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
184{
185 switch (pMem->enmType)
186 {
187 case RTR0MEMOBJTYPE_PAGE:
188 case RTR0MEMOBJTYPE_LOW:
189 case RTR0MEMOBJTYPE_CONT:
190 return kernel_map;
191
192 case RTR0MEMOBJTYPE_PHYS:
193 case RTR0MEMOBJTYPE_PHYS_NC:
194 if (pMem->pv)
195 return kernel_map;
196 return NULL;
197
198 case RTR0MEMOBJTYPE_LOCK:
199 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
200 ? kernel_map
201 : get_task_map((task_t)pMem->u.Lock.R0Process);
202
203 case RTR0MEMOBJTYPE_RES_VIRT:
204 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
205 ? kernel_map
206 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
207
208 case RTR0MEMOBJTYPE_MAPPING:
209 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
210 ? kernel_map
211 : get_task_map((task_t)pMem->u.Mapping.R0Process);
212
213 default:
214 return NULL;
215 }
216}
217
218#if 0 /* not necessary after all*/
219/* My vm_map mockup. */
220struct my_vm_map
221{
222 struct { char pad[8]; } lock;
223 struct my_vm_map_header
224 {
225 struct vm_map_links
226 {
227 void *prev;
228 void *next;
229 vm_map_offset_t start;
230 vm_map_offset_t end;
231 } links;
232 int nentries;
233 boolean_t entries_pageable;
234 } hdr;
235 pmap_t pmap;
236 vm_map_size_t size;
237};
238
239
240/**
241 * Gets the minimum map address, this is similar to get_map_min.
242 *
243 * @returns The start address of the map.
244 * @param pMap The map.
245 */
246static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
247{
248 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
249 static int32_t volatile s_offAdjust = INT32_MAX;
250 int32_t off = s_offAdjust;
251 if (off == INT32_MAX)
252 {
253 for (off = 0; ; off += sizeof(pmap_t))
254 {
255 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
256 break;
257 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
258 }
259 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
260 }
261
262 /* calculate it. */
263 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
264 return pMyMap->hdr.links.start;
265}
266#endif /* unused */
267
268#ifdef RT_STRICT
269# if 0 /* unused */
270
271/**
272 * Read from a physical page.
273 *
274 * @param HCPhys The address to start reading at.
275 * @param cb How many bytes to read.
276 * @param pvDst Where to put the bytes. This is zero'd on failure.
277 */
278static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
279{
280 memset(pvDst, '\0', cb);
281
282 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
283 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
284 kIODirectionIn, NULL /*task*/);
285 if (pMemDesc)
286 {
287#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
288 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
289#else
290 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
291#endif
292 if (pMemMap)
293 {
294 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
295 memcpy(pvDst, pvSrc, cb);
296 pMemMap->release();
297 }
298 else
299 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
300
301 pMemDesc->release();
302 }
303 else
304 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
305}
306
307
308/**
309 * Gets the PTE for a page.
310 *
311 * @returns the PTE.
312 * @param pvPage The virtual address to get the PTE for.
313 */
314static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
315{
316 RTUINT64U u64;
317 RTCCUINTREG cr3 = ASMGetCR3();
318 RTCCUINTREG cr4 = ASMGetCR4();
319 bool fPAE = false;
320 bool fLMA = false;
321 if (cr4 & X86_CR4_PAE)
322 {
323 fPAE = true;
324 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
325 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
326 {
327 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
328 if (efer & MSR_K6_EFER_LMA)
329 fLMA = true;
330 }
331 }
332
333 if (fLMA)
334 {
335 /* PML4 */
336 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
337 if (!(u64.u & X86_PML4E_P))
338 {
339 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
340 return 0;
341 }
342
343 /* PDPTR */
344 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
345 if (!(u64.u & X86_PDPE_P))
346 {
347 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
348 return 0;
349 }
350 if (u64.u & X86_PDPE_LM_PS)
351 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
352
353 /* PD */
354 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
355 if (!(u64.u & X86_PDE_P))
356 {
357 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
358 return 0;
359 }
360 if (u64.u & X86_PDE_PS)
361 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
362
363 /* PT */
364 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
365 if (!(u64.u & X86_PTE_P))
366 {
367 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
368 return 0;
369 }
370 return u64.u;
371 }
372
373 if (fPAE)
374 {
375 /* PDPTR */
376 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
377 if (!(u64.u & X86_PDE_P))
378 return 0;
379
380 /* PD */
381 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
382 if (!(u64.u & X86_PDE_P))
383 return 0;
384 if (u64.u & X86_PDE_PS)
385 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
386
387 /* PT */
388 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
389 if (!(u64.u & X86_PTE_P))
390 return 0;
391 return u64.u;
392 }
393
394 /* PD */
395 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
396 if (!(u64.au32[0] & X86_PDE_P))
397 return 0;
398 if (u64.au32[0] & X86_PDE_PS)
399 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
400
401 /* PT */
402 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
403 if (!(u64.au32[0] & X86_PTE_P))
404 return 0;
405 return u64.au32[0];
406
407 return 0;
408}
409
410# endif /* unused */
411#endif /* RT_STRICT */
412
413DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
414{
415 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
416 IPRT_DARWIN_SAVE_EFL_AC();
417
418 /*
419 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
420 */
421 if (pMemDarwin->pMemDesc)
422 {
423 pMemDarwin->pMemDesc->complete();
424 pMemDarwin->pMemDesc->release();
425 pMemDarwin->pMemDesc = NULL;
426 }
427
428 if (pMemDarwin->pMemMap)
429 {
430 pMemDarwin->pMemMap->release();
431 pMemDarwin->pMemMap = NULL;
432 }
433
434 /*
435 * Release any memory that we've allocated or locked.
436 */
437 switch (pMemDarwin->Core.enmType)
438 {
439 case RTR0MEMOBJTYPE_LOW:
440 case RTR0MEMOBJTYPE_PAGE:
441 case RTR0MEMOBJTYPE_CONT:
442 break;
443
444 case RTR0MEMOBJTYPE_LOCK:
445 {
446#ifdef USE_VM_MAP_WIRE
447 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
448 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
449 : kernel_map;
450 kern_return_t kr = vm_map_unwire(Map,
451 (vm_map_offset_t)pMemDarwin->Core.pv,
452 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
453 0 /* not user */);
454 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
455#endif
456 break;
457 }
458
459 case RTR0MEMOBJTYPE_PHYS:
460 /*if (pMemDarwin->Core.u.Phys.fAllocated)
461 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
462 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
463 break;
464
465 case RTR0MEMOBJTYPE_PHYS_NC:
466 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
467 IPRT_DARWIN_RESTORE_EFL_AC();
468 return VERR_INTERNAL_ERROR;
469
470 case RTR0MEMOBJTYPE_RES_VIRT:
471 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
472 IPRT_DARWIN_RESTORE_EFL_AC();
473 return VERR_INTERNAL_ERROR;
474
475 case RTR0MEMOBJTYPE_MAPPING:
476 /* nothing to do here. */
477 break;
478
479 default:
480 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
481 IPRT_DARWIN_RESTORE_EFL_AC();
482 return VERR_INTERNAL_ERROR;
483 }
484
485 IPRT_DARWIN_RESTORE_EFL_AC();
486 return VINF_SUCCESS;
487}
488
489
490/**
491 * This is a helper function to executes @a pfnWorker in the context of the
492 * kernel_task
493 *
494 * @returns IPRT status code - result from pfnWorker or dispatching error.
495 * @param pfnWorker The function to call.
496 * @param pArgs The arguments to pass to the function.
497 */
498static int rtR0MemObjDarwinDoInKernelTaskThread(thread_call_func_t pfnWorker, RTR0MEMOBJDARWINTHREADARGS *pArgs)
499{
500 pArgs->rc = VERR_IPE_UNINITIALIZED_STATUS;
501 pArgs->hEvent = NIL_RTSEMEVENTMULTI;
502 int rc = RTSemEventMultiCreate(&pArgs->hEvent);
503 if (RT_SUCCESS(rc))
504 {
505 thread_call_t hCall = thread_call_allocate(pfnWorker, (void *)pArgs);
506 if (hCall)
507 {
508 boolean_t fRc = thread_call_enter(hCall);
509 AssertLogRel(fRc == FALSE);
510
511 rc = RTSemEventMultiWaitEx(pArgs->hEvent, RTSEMWAIT_FLAGS_INDEFINITE | RTSEMWAIT_FLAGS_UNINTERRUPTIBLE,
512 RT_INDEFINITE_WAIT);
513 AssertLogRelRC(rc);
514
515 rc = pArgs->rc;
516 thread_call_free(hCall);
517 }
518 else
519 rc = VERR_NO_MEMORY;
520 RTSemEventMultiDestroy(pArgs->hEvent);
521 }
522 return rc;
523}
524
525
526/**
527 * Signals result to thread waiting in rtR0MemObjDarwinDoInKernelTaskThread.
528 *
529 * @param pArgs The argument structure.
530 * @param rc The IPRT status code to signal.
531 */
532static void rtR0MemObjDarwinSignalThreadWaitinOnTask(RTR0MEMOBJDARWINTHREADARGS volatile *pArgs, int rc)
533{
534 if (ASMAtomicCmpXchgS32(&pArgs->rc, rc, VERR_IPE_UNINITIALIZED_STATUS))
535 {
536 rc = RTSemEventMultiSignal(pArgs->hEvent);
537 AssertLogRelRC(rc);
538 }
539}
540
541
542/**
543 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
544 *
545 * @returns IPRT status code.
546 * @retval VERR_ADDRESS_TOO_BIG try another way.
547 *
548 * @param ppMem Where to return the memory object.
549 * @param cb The page aligned memory size.
550 * @param fExecutable Whether the mapping needs to be executable.
551 * @param fContiguous Whether the backing memory needs to be contiguous.
552 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
553 * you don't care that much or is speculating.
554 * @param MaxPhysAddr The max address to verify the result against. Use
555 * UINT64_MAX if it doesn't matter.
556 * @param enmType The object type.
557 * @param uAlignment The allocation alignment (in bytes).
558 * @param pszTag Allocation tag used for statistics and such.
559 * @param fOnKernelThread Set if we're already on the kernel thread.
560 */
561static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
562 bool fExecutable, bool fContiguous,
563 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
564 RTR0MEMOBJTYPE enmType, size_t uAlignment, const char *pszTag, bool fOnKernelThread)
565{
566 int rc;
567
568 /*
569 * Because of process code signing properties leaking into kernel space in
570 * in XNU's vm_fault.c code, we have to defer allocations of exec memory to
571 * a thread running in the kernel_task to get consistent results here.
572 *
573 * Trouble strikes in vm_fault_enter() when cs_enforcement_enabled is determined
574 * to be true because current process has the CS_ENFORCEMENT flag, the page flag
575 * vmp_cs_validated is clear, and the protection mask includes VM_PROT_EXECUTE
576 * (pmap_cs_enforced does not apply to macOS it seems). This test seems to go
577 * back to 10.5, though I'm not sure whether it's enabled for macOS that early
578 * on. Only VM_PROT_EXECUTE is problematic for kernel memory, (though
579 * VM_PROT_WRITE on code signed pages is also problematic in theory). As long as
580 * kernel_task doesn't have CS_ENFORCEMENT enabled, we'll be fine switching to it.
581 */
582 if (!fExecutable || fOnKernelThread)
583 { /* likely */ }
584 else
585 {
586 RTR0MEMOBJDARWINALLOCARGS Args;
587 Args.ppMem = ppMem;
588 Args.cb = cb;
589 Args.fExecutable = fExecutable;
590 Args.fContiguous = fContiguous;
591 Args.PhysMask = PhysMask;
592 Args.MaxPhysAddr = MaxPhysAddr;
593 Args.enmType = enmType;
594 Args.uAlignment = uAlignment;
595 Args.pszTag = pszTag;
596 return rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeAllockWorkerOnKernelThread, &Args.Core);
597 }
598
599 /*
600 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
601 * actually respects the physical memory mask (10.5.x is certainly busted),
602 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
603 *
604 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
605 *
606 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
607 */
608
609 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
610 Seen allocating memory for the VM structure, last page corrupted or
611 inaccessible." Made it only apply to snow leopard and older for now. */
612 size_t cbFudged = cb;
613 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
614 { /* likely */ }
615 else
616 cbFudged += PAGE_SIZE;
617
618 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
619 if (fContiguous)
620 {
621 fOptions |= kIOMemoryPhysicallyContiguous;
622 if ( version_major > 12
623 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
624 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
625 }
626 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
627 fOptions |= kIOMemoryMapperNone;
628
629#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 && 0 /* enable when/if necessary */
630 /* Paranoia: Don't misrepresent our intentions, we won't map kernel executable memory into ring-0. */
631 if (fExecutable && version_major >= 11 /* 10.7.x = Lion, as below */)
632 {
633 fOptions &= ~kIOMemoryKernelUserShared;
634 if (uAlignment < PAGE_SIZE)
635 uAlignment = PAGE_SIZE;
636 }
637#endif
638
639 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
640 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
641 x86 only and didn't have the alignment parameter (slot was different too). */
642 uint64_t uAlignmentActual = uAlignment;
643 IOBufferMemoryDescriptor *pMemDesc;
644#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
645 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
646 {
647 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
648 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
649 be used and the alignment inferred from the PhysMask argument. */
650 if (MaxPhysAddr != UINT64_MAX)
651 {
652 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
653 uAlignmentActual = 1;
654 }
655
656 pMemDesc = new IOBufferMemoryDescriptor;
657 if (pMemDesc)
658 {
659 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
660 { /* likely */ }
661 else
662 {
663 pMemDesc->release();
664 pMemDesc = NULL;
665 }
666 }
667 }
668 else
669#endif
670 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
671 if (pMemDesc)
672 {
673 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
674 if (IORet == kIOReturnSuccess)
675 {
676 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
677 if (pv)
678 {
679 /*
680 * Check if it's all below 4GB.
681 */
682 addr64_t AddrPrev = 0;
683 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
684 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
685 {
686#ifdef __LP64__
687 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
688#else
689 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
690#endif
691 if ( Addr > MaxPhysAddr
692 || !Addr
693 || (Addr & PAGE_OFFSET_MASK)
694 || ( fContiguous
695 && !off
696 && Addr == AddrPrev + PAGE_SIZE))
697 {
698 /* Buggy API, try allocate the memory another way. */
699 pMemDesc->complete();
700 pMemDesc->release();
701 if (PhysMask)
702 {
703 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
704 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
705 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
706 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
707 }
708 return VERR_ADDRESS_TOO_BIG;
709 }
710 AddrPrev = Addr;
711 }
712
713 /*
714 * Check that it's aligned correctly.
715 */
716 if ((uintptr_t)pv & (uAlignment - 1))
717 {
718 pMemDesc->complete();
719 pMemDesc->release();
720 if (PhysMask)
721 {
722 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
723 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
724 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
725 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
726 }
727 return VERR_NOT_SUPPORTED;
728 }
729
730#ifdef RT_STRICT
731 /* check that the memory is actually mapped. */
732 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
733 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
734 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
735 RTThreadPreemptDisable(&State);
736 rtR0MemObjDarwinTouchPages(pv, cb);
737 RTThreadPreemptRestore(&State);
738#endif
739
740 /*
741 * Create the IPRT memory object.
742 */
743 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb, pszTag);
744 if (pMemDarwin)
745 {
746 if (fContiguous)
747 {
748#ifdef __LP64__
749 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
750#else
751 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
752#endif
753 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
754 if (enmType == RTR0MEMOBJTYPE_CONT)
755 pMemDarwin->Core.u.Cont.Phys = PhysBase;
756 else if (enmType == RTR0MEMOBJTYPE_PHYS)
757 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
758 else
759 AssertMsgFailed(("enmType=%d\n", enmType));
760 }
761
762 if (fExecutable)
763 {
764 rc = rtR0MemObjNativeProtectWorker(&pMemDarwin->Core, 0, cb,
765 RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
766#ifdef RT_STRICT
767 if (RT_SUCCESS(rc))
768 {
769 /* check that the memory is actually mapped. */
770 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
771 RTThreadPreemptDisable(&State2);
772 rtR0MemObjDarwinTouchPages(pv, cb);
773 RTThreadPreemptRestore(&State2);
774 }
775#endif
776 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
777 if ( rc == VERR_PERMISSION_DENIED
778 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
779 rc = VINF_SUCCESS;
780 }
781 else
782 rc = VINF_SUCCESS;
783 if (RT_SUCCESS(rc))
784 {
785 pMemDarwin->pMemDesc = pMemDesc;
786 *ppMem = &pMemDarwin->Core;
787 return VINF_SUCCESS;
788 }
789
790 rtR0MemObjDelete(&pMemDarwin->Core);
791 }
792
793 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
794 rc = VERR_NO_PHYS_MEMORY;
795 else if (enmType == RTR0MEMOBJTYPE_LOW)
796 rc = VERR_NO_LOW_MEMORY;
797 else if (enmType == RTR0MEMOBJTYPE_CONT)
798 rc = VERR_NO_CONT_MEMORY;
799 else
800 rc = VERR_NO_MEMORY;
801 }
802 else
803 rc = VERR_MEMOBJ_INIT_FAILED;
804
805 pMemDesc->complete();
806 }
807 else
808 rc = RTErrConvertFromDarwinIO(IORet);
809 pMemDesc->release();
810 }
811 else
812 rc = VERR_MEMOBJ_INIT_FAILED;
813 Assert(rc != VERR_ADDRESS_TOO_BIG);
814 return rc;
815}
816
817
818/**
819 * rtR0MemObjNativeAllocWorker kernel_task wrapper function.
820 */
821static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1)
822{
823 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
824 RTR0MEMOBJDARWINALLOCARGS volatile *pArgs = (RTR0MEMOBJDARWINALLOCARGS volatile *)pvUser0;
825 int rc = rtR0MemObjNativeAllocWorker(pArgs->ppMem, pArgs->cb, pArgs->fExecutable, pArgs->fContiguous, pArgs->PhysMask,
826 pArgs->MaxPhysAddr, pArgs->enmType, pArgs->uAlignment, pArgs->pszTag,
827 true /*fOnKernelThread*/);
828 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
829}
830
831
832DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
833{
834 IPRT_DARWIN_SAVE_EFL_AC();
835
836 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */, UINT64_MAX,
837 RTR0MEMOBJTYPE_PAGE, PAGE_SIZE, NULL, false /*fOnKernelThread*/);
838
839 IPRT_DARWIN_RESTORE_EFL_AC();
840 return rc;
841}
842
843
844DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
845 const char *pszTag)
846{
847 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
848}
849
850
851DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
852{
853 IPRT_DARWIN_SAVE_EFL_AC();
854
855 /*
856 * Try IOMallocPhysical/IOMallocAligned first.
857 * Then try optimistically without a physical address mask, which will always
858 * end up using IOMallocAligned.
859 *
860 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
861 */
862 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, ~(uint32_t)PAGE_OFFSET_MASK,
863 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, NULL, false /*fOnKernelThread*/);
864 if (rc == VERR_ADDRESS_TOO_BIG)
865 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */,
866 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, NULL, false /*fOnKernelThread*/);
867
868 IPRT_DARWIN_RESTORE_EFL_AC();
869 return rc;
870}
871
872
873DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
874{
875 IPRT_DARWIN_SAVE_EFL_AC();
876
877 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
878 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
879 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, NULL, false /*fOnKernelThread*/);
880
881 /*
882 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
883 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
884 */
885 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
886 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
887 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
888 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, NULL, false /*fOnKernelThread*/);
889 IPRT_DARWIN_RESTORE_EFL_AC();
890 return rc;
891}
892
893
894DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
895 const char *pszTag)
896{
897 if (uAlignment != PAGE_SIZE)
898 {
899 /* See rtR0MemObjNativeAllocWorker: */
900 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
901 return VERR_NOT_SUPPORTED;
902 }
903
904 IPRT_DARWIN_SAVE_EFL_AC();
905
906 /*
907 * Translate the PhysHighest address into a mask.
908 */
909 int rc;
910 if (PhysHighest == NIL_RTHCPHYS)
911 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
912 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
913 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
914 else
915 {
916 mach_vm_address_t PhysMask = 0;
917 PhysMask = ~(mach_vm_address_t)0;
918 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
919 PhysMask >>= 1;
920 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
921 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
922
923 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
924 PhysMask, PhysHighest,
925 RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
926 }
927
928 IPRT_DARWIN_RESTORE_EFL_AC();
929 return rc;
930}
931
932
933DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
934{
935 /** @todo rtR0MemObjNativeAllocPhys / darwin.
936 * This might be a bit problematic and may very well require having to create our own
937 * object which we populate with pages but without mapping it into any address space.
938 * Estimate is 2-3 days.
939 */
940 RT_NOREF(ppMem, cb, PhysHighest, pszTag);
941 return VERR_NOT_SUPPORTED;
942}
943
944
945DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
946 const char *pszTag)
947{
948 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
949 IPRT_DARWIN_SAVE_EFL_AC();
950
951 /*
952 * Create a descriptor for it (the validation is always true on intel macs, but
953 * as it doesn't harm us keep it in).
954 */
955 int rc = VERR_ADDRESS_TOO_BIG;
956 IOAddressRange aRanges[1] = { { Phys, cb } };
957 if ( aRanges[0].address == Phys
958 && aRanges[0].length == cb)
959 {
960 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
961 kIODirectionInOut, NULL /*task*/);
962 if (pMemDesc)
963 {
964#ifdef __LP64__
965 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
966#else
967 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
968#endif
969
970 /*
971 * Create the IPRT memory object.
972 */
973 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS,
974 NULL, cb, pszTag);
975 if (pMemDarwin)
976 {
977 pMemDarwin->Core.u.Phys.PhysBase = Phys;
978 pMemDarwin->Core.u.Phys.fAllocated = false;
979 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
980 pMemDarwin->pMemDesc = pMemDesc;
981 *ppMem = &pMemDarwin->Core;
982 IPRT_DARWIN_RESTORE_EFL_AC();
983 return VINF_SUCCESS;
984 }
985
986 rc = VERR_NO_MEMORY;
987 pMemDesc->release();
988 }
989 else
990 rc = VERR_MEMOBJ_INIT_FAILED;
991 }
992 else
993 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
994 IPRT_DARWIN_RESTORE_EFL_AC();
995 return rc;
996}
997
998
999/**
1000 * Internal worker for locking down pages.
1001 *
1002 * @return IPRT status code.
1003 *
1004 * @param ppMem Where to store the memory object pointer.
1005 * @param pv First page.
1006 * @param cb Number of bytes.
1007 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
1008 * and RTMEM_PROT_WRITE.
1009 * @param Task The task \a pv and \a cb refers to.
1010 * @param pszTag Allocation tag used for statistics and such.
1011 */
1012static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task,
1013 const char *pszTag)
1014{
1015 IPRT_DARWIN_SAVE_EFL_AC();
1016 NOREF(fAccess);
1017#ifdef USE_VM_MAP_WIRE
1018 vm_map_t Map = get_task_map(Task);
1019 Assert(Map);
1020
1021 /*
1022 * First try lock the memory.
1023 */
1024 int rc = VERR_LOCK_FAILED;
1025 kern_return_t kr = vm_map_wire(get_task_map(Task),
1026 (vm_map_offset_t)pv,
1027 (vm_map_offset_t)pv + cb,
1028 VM_PROT_DEFAULT,
1029 0 /* not user */);
1030 if (kr == KERN_SUCCESS)
1031 {
1032 /*
1033 * Create the IPRT memory object.
1034 */
1035 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
1036 if (pMemDarwin)
1037 {
1038 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1039 *ppMem = &pMemDarwin->Core;
1040
1041 IPRT_DARWIN_RESTORE_EFL_AC();
1042 return VINF_SUCCESS;
1043 }
1044
1045 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
1046 Assert(kr == KERN_SUCCESS);
1047 rc = VERR_NO_MEMORY;
1048 }
1049
1050#else
1051
1052 /*
1053 * Create a descriptor and try lock it (prepare).
1054 */
1055 int rc = VERR_MEMOBJ_INIT_FAILED;
1056 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
1057 if (pMemDesc)
1058 {
1059 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1060 if (IORet == kIOReturnSuccess)
1061 {
1062 /*
1063 * Create the IPRT memory object.
1064 */
1065 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK,
1066 pv, cb, pszTag);
1067 if (pMemDarwin)
1068 {
1069 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1070 pMemDarwin->pMemDesc = pMemDesc;
1071 *ppMem = &pMemDarwin->Core;
1072
1073 IPRT_DARWIN_RESTORE_EFL_AC();
1074 return VINF_SUCCESS;
1075 }
1076
1077 pMemDesc->complete();
1078 rc = VERR_NO_MEMORY;
1079 }
1080 else
1081 rc = VERR_LOCK_FAILED;
1082 pMemDesc->release();
1083 }
1084#endif
1085 IPRT_DARWIN_RESTORE_EFL_AC();
1086 return rc;
1087}
1088
1089
1090DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
1091 RTR0PROCESS R0Process, const char *pszTag)
1092{
1093 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process, pszTag);
1094}
1095
1096
1097DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
1098{
1099 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task, pszTag);
1100}
1101
1102
1103DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
1104 const char *pszTag)
1105{
1106 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
1107 return VERR_NOT_SUPPORTED;
1108}
1109
1110
1111DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
1112 RTR0PROCESS R0Process, const char *pszTag)
1113{
1114 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
1115 return VERR_NOT_SUPPORTED;
1116}
1117
1118
1119DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1120 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1121{
1122 RT_NOREF(fProt);
1123 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
1124
1125 /*
1126 * Check that the specified alignment is supported.
1127 */
1128 if (uAlignment > PAGE_SIZE)
1129 return VERR_NOT_SUPPORTED;
1130 Assert(!offSub || cbSub);
1131
1132 IPRT_DARWIN_SAVE_EFL_AC();
1133
1134 /*
1135 * Must have a memory descriptor that we can map.
1136 */
1137 int rc = VERR_INVALID_PARAMETER;
1138 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1139 if (pMemToMapDarwin->pMemDesc)
1140 {
1141 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
1142 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
1143 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
1144#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1145 static uint32_t volatile s_fOptions = UINT32_MAX;
1146 uint32_t fOptions = s_fOptions;
1147 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1148 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
1149
1150 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
1151 0,
1152 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1153 offSub,
1154 cbSub);
1155#else
1156 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
1157 0,
1158 kIOMapAnywhere | kIOMapDefaultCache,
1159 offSub,
1160 cbSub);
1161#endif
1162 if (pMemMap)
1163 {
1164 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1165 void *pv = (void *)(uintptr_t)VirtAddr;
1166 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1167 {
1168//#ifdef __LP64__
1169// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1170//#else
1171// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1172//#endif
1173// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1174
1175// /*
1176// * Explicitly lock it so that we're sure it is present and that
1177// * its PTEs cannot be recycled.
1178// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1179// * to the options which causes prepare() to not wire the pages.
1180// * This is probably a bug.
1181// */
1182// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1183// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1184// 1 /* count */,
1185// 0 /* offset */,
1186// kernel_task,
1187// kIODirectionInOut | kIOMemoryTypeVirtual,
1188// kIOMapperSystem);
1189// if (pMemDesc)
1190// {
1191// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1192// if (IORet == kIOReturnSuccess)
1193// {
1194 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1195 the pages here so they can safely be accessed from inside simple
1196 locks and when preemption is disabled (no page-ins allowed).
1197 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1198 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1199 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1200 /** @todo First, the memory should've been mapped by now, and second, it
1201 * should have the wired attribute in the PTE (bit 10). Neither seems to
1202 * be the case. The disabled locking code doesn't make any difference,
1203 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1204 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1205//#ifdef __LP64__
1206// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1207//#else
1208// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1209//#endif
1210// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1211
1212 /*
1213 * Create the IPRT memory object.
1214 */
1215 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1216 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1217 if (pMemDarwin)
1218 {
1219 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1220 pMemDarwin->pMemMap = pMemMap;
1221// pMemDarwin->pMemDesc = pMemDesc;
1222 *ppMem = &pMemDarwin->Core;
1223
1224 IPRT_DARWIN_RESTORE_EFL_AC();
1225 return VINF_SUCCESS;
1226 }
1227
1228// pMemDesc->complete();
1229// rc = VERR_NO_MEMORY;
1230// }
1231// else
1232// rc = RTErrConvertFromDarwinIO(IORet);
1233// pMemDesc->release();
1234// }
1235// else
1236// rc = VERR_MEMOBJ_INIT_FAILED;
1237 }
1238 else if (pv)
1239 rc = VERR_ADDRESS_TOO_BIG;
1240 else
1241 rc = VERR_MAP_FAILED;
1242 pMemMap->release();
1243 }
1244 else
1245 rc = VERR_MAP_FAILED;
1246 }
1247
1248 IPRT_DARWIN_RESTORE_EFL_AC();
1249 return rc;
1250}
1251
1252
1253DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1254 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1255{
1256 RT_NOREF(fProt);
1257
1258 /*
1259 * Check for unsupported things.
1260 */
1261 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1262 if (uAlignment > PAGE_SIZE)
1263 return VERR_NOT_SUPPORTED;
1264 Assert(!offSub || cbSub);
1265
1266 IPRT_DARWIN_SAVE_EFL_AC();
1267
1268 /*
1269 * Must have a memory descriptor.
1270 */
1271 int rc = VERR_INVALID_PARAMETER;
1272 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1273 if (pMemToMapDarwin->pMemDesc)
1274 {
1275#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1276 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1277 0,
1278 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1279 offSub,
1280 cbSub);
1281#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1282 static uint32_t volatile s_fOptions = UINT32_MAX;
1283 uint32_t fOptions = s_fOptions;
1284 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1285 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1286 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1287 0,
1288 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1289 offSub,
1290 cbSub);
1291#else
1292 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1293 0,
1294 kIOMapAnywhere | kIOMapDefaultCache,
1295 offSub,
1296 cbSub);
1297#endif
1298 if (pMemMap)
1299 {
1300 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1301 void *pv = (void *)(uintptr_t)VirtAddr;
1302 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1303 {
1304 /*
1305 * Create the IPRT memory object.
1306 */
1307 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1308 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1309 if (pMemDarwin)
1310 {
1311 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1312 pMemDarwin->pMemMap = pMemMap;
1313 *ppMem = &pMemDarwin->Core;
1314
1315 IPRT_DARWIN_RESTORE_EFL_AC();
1316 return VINF_SUCCESS;
1317 }
1318
1319 rc = VERR_NO_MEMORY;
1320 }
1321 else if (pv)
1322 rc = VERR_ADDRESS_TOO_BIG;
1323 else
1324 rc = VERR_MAP_FAILED;
1325 pMemMap->release();
1326 }
1327 else
1328 rc = VERR_MAP_FAILED;
1329 }
1330
1331 IPRT_DARWIN_RESTORE_EFL_AC();
1332 return rc;
1333}
1334
1335
1336/**
1337 * Worker for rtR0MemObjNativeProtect that's typically called in a different
1338 * context.
1339 */
1340static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1341{
1342 IPRT_DARWIN_SAVE_EFL_AC();
1343
1344 /* Get the map for the object. */
1345 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1346 if (!pVmMap)
1347 {
1348 IPRT_DARWIN_RESTORE_EFL_AC();
1349 return VERR_NOT_SUPPORTED;
1350 }
1351
1352 /*
1353 * Convert the protection.
1354 */
1355 vm_prot_t fMachProt;
1356 switch (fProt)
1357 {
1358 case RTMEM_PROT_NONE:
1359 fMachProt = VM_PROT_NONE;
1360 break;
1361 case RTMEM_PROT_READ:
1362 fMachProt = VM_PROT_READ;
1363 break;
1364 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1365 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1366 break;
1367 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1368 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1369 break;
1370 case RTMEM_PROT_WRITE:
1371 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1372 break;
1373 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1374 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1375 break;
1376 case RTMEM_PROT_EXEC:
1377 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1378 break;
1379 default:
1380 AssertFailedReturn(VERR_INVALID_PARAMETER);
1381 }
1382
1383 /*
1384 * Do the job.
1385 */
1386 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1387 kern_return_t krc = vm_protect(pVmMap,
1388 Start,
1389 cbSub,
1390 false,
1391 fMachProt);
1392 if (krc != KERN_SUCCESS)
1393 {
1394 static int s_cComplaints = 0;
1395 if (s_cComplaints < 10)
1396 {
1397 s_cComplaints++;
1398 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1399 (void *)pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1400
1401 kern_return_t krc2;
1402 vm_offset_t pvReal = Start;
1403 vm_size_t cbReal = 0;
1404 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1405 struct vm_region_basic_info Info;
1406 RT_ZERO(Info);
1407 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1408 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1409 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1410 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1411 }
1412 IPRT_DARWIN_RESTORE_EFL_AC();
1413 return RTErrConvertFromDarwinKern(krc);
1414 }
1415
1416 /*
1417 * Touch the pages if they should be writable afterwards and accessible
1418 * from code which should never fault. vm_protect() may leave pages
1419 * temporarily write protected, possibly due to pmap no-upgrade rules?
1420 *
1421 * This is the same trick (or HACK ALERT if you like) as applied in
1422 * rtR0MemObjNativeMapKernel.
1423 */
1424 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1425 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1426 {
1427 if (fProt & RTMEM_PROT_WRITE)
1428 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1429 /*
1430 * Sniff (read) read-only pages too, just to be sure.
1431 */
1432 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1433 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1434 }
1435
1436 IPRT_DARWIN_RESTORE_EFL_AC();
1437 return VINF_SUCCESS;
1438}
1439
1440
1441/**
1442 * rtR0MemObjNativeProtect kernel_task wrapper function.
1443 */
1444static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1)
1445{
1446 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
1447 RTR0MEMOBJDARWINPROTECTARGS *pArgs = (RTR0MEMOBJDARWINPROTECTARGS *)pvUser0;
1448 int rc = rtR0MemObjNativeProtectWorker(pArgs->pMem, pArgs->offSub, pArgs->cbSub, pArgs->fProt);
1449 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
1450}
1451
1452
1453DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1454{
1455 /*
1456 * The code won't work right because process codesigning properties leaks
1457 * into kernel_map memory management. So, if the user process we're running
1458 * in has CS restrictions active, we cannot play around with the EXEC
1459 * protection because some vm_fault.c think we're modifying the process map
1460 * or something.
1461 */
1462 int rc;
1463 if (rtR0MemObjDarwinGetMap(pMem) == kernel_map)
1464 {
1465 RTR0MEMOBJDARWINPROTECTARGS Args;
1466 Args.pMem = pMem;
1467 Args.offSub = offSub;
1468 Args.cbSub = cbSub;
1469 Args.fProt = fProt;
1470 rc = rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeProtectWorkerOnKernelThread, &Args.Core);
1471 }
1472 else
1473 rc = rtR0MemObjNativeProtectWorker(pMem, offSub, cbSub, fProt);
1474 return rc;
1475}
1476
1477
1478DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1479{
1480 RTHCPHYS PhysAddr;
1481 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1482 IPRT_DARWIN_SAVE_EFL_AC();
1483
1484#ifdef USE_VM_MAP_WIRE
1485 /*
1486 * Locked memory doesn't have a memory descriptor and
1487 * needs to be handled differently.
1488 */
1489 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1490 {
1491 ppnum_t PgNo;
1492 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1493 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1494 else
1495 {
1496 /*
1497 * From what I can tell, Apple seems to have locked up the all the
1498 * available interfaces that could help us obtain the pmap_t of a task
1499 * or vm_map_t.
1500
1501 * So, we'll have to figure out where in the vm_map_t structure it is
1502 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1503 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1504 * Not nice, but it will hopefully do the job in a reliable manner...
1505 *
1506 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1507 */
1508 static int s_offPmap = -1;
1509 if (RT_UNLIKELY(s_offPmap == -1))
1510 {
1511 pmap_t const *p = (pmap_t *)kernel_map;
1512 pmap_t const * const pEnd = p + 64;
1513 for (; p < pEnd; p++)
1514 if (*p == kernel_pmap)
1515 {
1516 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1517 break;
1518 }
1519 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1520 }
1521 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1522 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1523 }
1524
1525 IPRT_DARWIN_RESTORE_EFL_AC();
1526 AssertReturn(PgNo, NIL_RTHCPHYS);
1527 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1528 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1529 }
1530 else
1531#endif /* USE_VM_MAP_WIRE */
1532 {
1533 /*
1534 * Get the memory descriptor.
1535 */
1536 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1537 if (!pMemDesc)
1538 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1539 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1540
1541 /*
1542 * If we've got a memory descriptor, use getPhysicalSegment64().
1543 */
1544#ifdef __LP64__
1545 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1546#else
1547 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1548#endif
1549 IPRT_DARWIN_RESTORE_EFL_AC();
1550 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1551 PhysAddr = Addr;
1552 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1553 }
1554
1555 return PhysAddr;
1556}
1557
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette