VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 91478

Last change on this file since 91478 was 91478, checked in by vboxsync, 3 years ago

IPRT/memobj: Added pszTag to rtR0MemObjNew.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 59.1 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 91478 2021-09-29 23:36:54Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include "internal/memobj.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
56
57/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The Darwin version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJDARWIN
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Pointer to the memory descriptor created for allocated and locked memory. */
71 IOMemoryDescriptor *pMemDesc;
72 /** Pointer to the memory mapping object for mapped memory. */
73 IOMemoryMap *pMemMap;
74} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
75
76/**
77 * Common thread_call_allocate/thread_call_enter argument package.
78 */
79typedef struct RTR0MEMOBJDARWINTHREADARGS
80{
81 int32_t volatile rc;
82 RTSEMEVENTMULTI hEvent;
83} RTR0MEMOBJDARWINTHREADARGS;
84
85
86/**
87 * Arguments for rtR0MemObjNativeAllockWorkOnKernelThread.
88 */
89typedef struct RTR0MEMOBJDARWINALLOCARGS
90{
91 RTR0MEMOBJDARWINTHREADARGS Core;
92 PPRTR0MEMOBJINTERNAL ppMem;
93 size_t cb;
94 bool fExecutable;
95 bool fContiguous;
96 mach_vm_address_t PhysMask;
97 uint64_t MaxPhysAddr;
98 RTR0MEMOBJTYPE enmType;
99 size_t uAlignment;
100} RTR0MEMOBJDARWINALLOCARGS;
101
102/**
103 * Arguments for rtR0MemObjNativeProtectWorkOnKernelThread.
104 */
105typedef struct RTR0MEMOBJDARWINPROTECTARGS
106{
107 RTR0MEMOBJDARWINTHREADARGS Core;
108 PRTR0MEMOBJINTERNAL pMem;
109 size_t offSub;
110 size_t cbSub;
111 uint32_t fProt;
112} RTR0MEMOBJDARWINPROTECTARGS;
113
114
115/*********************************************************************************************************************************
116* Internal Functions *
117*********************************************************************************************************************************/
118static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1);
119static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt);
120static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1);
121
122
123/**
124 * Touch the pages to force the kernel to create or write-enable the page table
125 * entries.
126 *
127 * This is necessary since the kernel gets upset if we take a page fault when
128 * preemption is disabled and/or we own a simple lock (same thing). It has no
129 * problems with us disabling interrupts when taking the traps, weird stuff.
130 *
131 * (This is basically a way of invoking vm_fault on a range of pages.)
132 *
133 * @param pv Pointer to the first page.
134 * @param cb The number of bytes.
135 */
136static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
137{
138 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
139 for (;;)
140 {
141 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
142 if (cb <= PAGE_SIZE)
143 break;
144 cb -= PAGE_SIZE;
145 pu32 += PAGE_SIZE / sizeof(uint32_t);
146 }
147}
148
149
150/**
151 * Read (sniff) every page in the range to make sure there are some page tables
152 * entries backing it.
153 *
154 * This is just to be sure vm_protect didn't remove stuff without re-adding it
155 * if someone should try write-protect something.
156 *
157 * @param pv Pointer to the first page.
158 * @param cb The number of bytes.
159 */
160static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
161{
162 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
163 uint32_t volatile u32Counter = 0;
164 for (;;)
165 {
166 u32Counter += *pu32;
167
168 if (cb <= PAGE_SIZE)
169 break;
170 cb -= PAGE_SIZE;
171 pu32 += PAGE_SIZE / sizeof(uint32_t);
172 }
173}
174
175
176/**
177 * Gets the virtual memory map the specified object is mapped into.
178 *
179 * @returns VM map handle on success, NULL if no map.
180 * @param pMem The memory object.
181 */
182DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
183{
184 switch (pMem->enmType)
185 {
186 case RTR0MEMOBJTYPE_PAGE:
187 case RTR0MEMOBJTYPE_LOW:
188 case RTR0MEMOBJTYPE_CONT:
189 return kernel_map;
190
191 case RTR0MEMOBJTYPE_PHYS:
192 case RTR0MEMOBJTYPE_PHYS_NC:
193 if (pMem->pv)
194 return kernel_map;
195 return NULL;
196
197 case RTR0MEMOBJTYPE_LOCK:
198 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
199 ? kernel_map
200 : get_task_map((task_t)pMem->u.Lock.R0Process);
201
202 case RTR0MEMOBJTYPE_RES_VIRT:
203 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
204 ? kernel_map
205 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
206
207 case RTR0MEMOBJTYPE_MAPPING:
208 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
209 ? kernel_map
210 : get_task_map((task_t)pMem->u.Mapping.R0Process);
211
212 default:
213 return NULL;
214 }
215}
216
217#if 0 /* not necessary after all*/
218/* My vm_map mockup. */
219struct my_vm_map
220{
221 struct { char pad[8]; } lock;
222 struct my_vm_map_header
223 {
224 struct vm_map_links
225 {
226 void *prev;
227 void *next;
228 vm_map_offset_t start;
229 vm_map_offset_t end;
230 } links;
231 int nentries;
232 boolean_t entries_pageable;
233 } hdr;
234 pmap_t pmap;
235 vm_map_size_t size;
236};
237
238
239/**
240 * Gets the minimum map address, this is similar to get_map_min.
241 *
242 * @returns The start address of the map.
243 * @param pMap The map.
244 */
245static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
246{
247 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
248 static int32_t volatile s_offAdjust = INT32_MAX;
249 int32_t off = s_offAdjust;
250 if (off == INT32_MAX)
251 {
252 for (off = 0; ; off += sizeof(pmap_t))
253 {
254 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
255 break;
256 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
257 }
258 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
259 }
260
261 /* calculate it. */
262 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
263 return pMyMap->hdr.links.start;
264}
265#endif /* unused */
266
267#ifdef RT_STRICT
268# if 0 /* unused */
269
270/**
271 * Read from a physical page.
272 *
273 * @param HCPhys The address to start reading at.
274 * @param cb How many bytes to read.
275 * @param pvDst Where to put the bytes. This is zero'd on failure.
276 */
277static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
278{
279 memset(pvDst, '\0', cb);
280
281 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
282 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
283 kIODirectionIn, NULL /*task*/);
284 if (pMemDesc)
285 {
286#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
287 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
288#else
289 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
290#endif
291 if (pMemMap)
292 {
293 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
294 memcpy(pvDst, pvSrc, cb);
295 pMemMap->release();
296 }
297 else
298 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
299
300 pMemDesc->release();
301 }
302 else
303 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
304}
305
306
307/**
308 * Gets the PTE for a page.
309 *
310 * @returns the PTE.
311 * @param pvPage The virtual address to get the PTE for.
312 */
313static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
314{
315 RTUINT64U u64;
316 RTCCUINTREG cr3 = ASMGetCR3();
317 RTCCUINTREG cr4 = ASMGetCR4();
318 bool fPAE = false;
319 bool fLMA = false;
320 if (cr4 & X86_CR4_PAE)
321 {
322 fPAE = true;
323 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
324 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
325 {
326 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
327 if (efer & MSR_K6_EFER_LMA)
328 fLMA = true;
329 }
330 }
331
332 if (fLMA)
333 {
334 /* PML4 */
335 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
336 if (!(u64.u & X86_PML4E_P))
337 {
338 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
339 return 0;
340 }
341
342 /* PDPTR */
343 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
344 if (!(u64.u & X86_PDPE_P))
345 {
346 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
347 return 0;
348 }
349 if (u64.u & X86_PDPE_LM_PS)
350 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
351
352 /* PD */
353 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
354 if (!(u64.u & X86_PDE_P))
355 {
356 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
357 return 0;
358 }
359 if (u64.u & X86_PDE_PS)
360 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
361
362 /* PT */
363 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
364 if (!(u64.u & X86_PTE_P))
365 {
366 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
367 return 0;
368 }
369 return u64.u;
370 }
371
372 if (fPAE)
373 {
374 /* PDPTR */
375 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
376 if (!(u64.u & X86_PDE_P))
377 return 0;
378
379 /* PD */
380 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
381 if (!(u64.u & X86_PDE_P))
382 return 0;
383 if (u64.u & X86_PDE_PS)
384 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
385
386 /* PT */
387 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
388 if (!(u64.u & X86_PTE_P))
389 return 0;
390 return u64.u;
391 }
392
393 /* PD */
394 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
395 if (!(u64.au32[0] & X86_PDE_P))
396 return 0;
397 if (u64.au32[0] & X86_PDE_PS)
398 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
399
400 /* PT */
401 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
402 if (!(u64.au32[0] & X86_PTE_P))
403 return 0;
404 return u64.au32[0];
405
406 return 0;
407}
408
409# endif /* unused */
410#endif /* RT_STRICT */
411
412DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
413{
414 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
415 IPRT_DARWIN_SAVE_EFL_AC();
416
417 /*
418 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
419 */
420 if (pMemDarwin->pMemDesc)
421 {
422 pMemDarwin->pMemDesc->complete();
423 pMemDarwin->pMemDesc->release();
424 pMemDarwin->pMemDesc = NULL;
425 }
426
427 if (pMemDarwin->pMemMap)
428 {
429 pMemDarwin->pMemMap->release();
430 pMemDarwin->pMemMap = NULL;
431 }
432
433 /*
434 * Release any memory that we've allocated or locked.
435 */
436 switch (pMemDarwin->Core.enmType)
437 {
438 case RTR0MEMOBJTYPE_LOW:
439 case RTR0MEMOBJTYPE_PAGE:
440 case RTR0MEMOBJTYPE_CONT:
441 break;
442
443 case RTR0MEMOBJTYPE_LOCK:
444 {
445#ifdef USE_VM_MAP_WIRE
446 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
447 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
448 : kernel_map;
449 kern_return_t kr = vm_map_unwire(Map,
450 (vm_map_offset_t)pMemDarwin->Core.pv,
451 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
452 0 /* not user */);
453 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
454#endif
455 break;
456 }
457
458 case RTR0MEMOBJTYPE_PHYS:
459 /*if (pMemDarwin->Core.u.Phys.fAllocated)
460 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
461 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
462 break;
463
464 case RTR0MEMOBJTYPE_PHYS_NC:
465 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
466 IPRT_DARWIN_RESTORE_EFL_AC();
467 return VERR_INTERNAL_ERROR;
468
469 case RTR0MEMOBJTYPE_RES_VIRT:
470 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
471 IPRT_DARWIN_RESTORE_EFL_AC();
472 return VERR_INTERNAL_ERROR;
473
474 case RTR0MEMOBJTYPE_MAPPING:
475 /* nothing to do here. */
476 break;
477
478 default:
479 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
480 IPRT_DARWIN_RESTORE_EFL_AC();
481 return VERR_INTERNAL_ERROR;
482 }
483
484 IPRT_DARWIN_RESTORE_EFL_AC();
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * This is a helper function to executes @a pfnWorker in the context of the
491 * kernel_task
492 *
493 * @returns IPRT status code - result from pfnWorker or dispatching error.
494 * @param pfnWorker The function to call.
495 * @param pArgs The arguments to pass to the function.
496 */
497static int rtR0MemObjDarwinDoInKernelTaskThread(thread_call_func_t pfnWorker, RTR0MEMOBJDARWINTHREADARGS *pArgs)
498{
499 pArgs->rc = VERR_IPE_UNINITIALIZED_STATUS;
500 pArgs->hEvent = NIL_RTSEMEVENTMULTI;
501 int rc = RTSemEventMultiCreate(&pArgs->hEvent);
502 if (RT_SUCCESS(rc))
503 {
504 thread_call_t hCall = thread_call_allocate(pfnWorker, (void *)pArgs);
505 if (hCall)
506 {
507 boolean_t fRc = thread_call_enter(hCall);
508 AssertLogRel(fRc == FALSE);
509
510 rc = RTSemEventMultiWaitEx(pArgs->hEvent, RTSEMWAIT_FLAGS_INDEFINITE | RTSEMWAIT_FLAGS_UNINTERRUPTIBLE,
511 RT_INDEFINITE_WAIT);
512 AssertLogRelRC(rc);
513
514 rc = pArgs->rc;
515 thread_call_free(hCall);
516 }
517 else
518 rc = VERR_NO_MEMORY;
519 RTSemEventMultiDestroy(pArgs->hEvent);
520 }
521 return rc;
522}
523
524
525/**
526 * Signals result to thread waiting in rtR0MemObjDarwinDoInKernelTaskThread.
527 *
528 * @param pArgs The argument structure.
529 * @param rc The IPRT status code to signal.
530 */
531static void rtR0MemObjDarwinSignalThreadWaitinOnTask(RTR0MEMOBJDARWINTHREADARGS volatile *pArgs, int rc)
532{
533 if (ASMAtomicCmpXchgS32(&pArgs->rc, rc, VERR_IPE_UNINITIALIZED_STATUS))
534 {
535 rc = RTSemEventMultiSignal(pArgs->hEvent);
536 AssertLogRelRC(rc);
537 }
538}
539
540
541/**
542 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
543 *
544 * @returns IPRT status code.
545 * @retval VERR_ADDRESS_TOO_BIG try another way.
546 *
547 * @param ppMem Where to return the memory object.
548 * @param cb The page aligned memory size.
549 * @param fExecutable Whether the mapping needs to be executable.
550 * @param fContiguous Whether the backing memory needs to be contiguous.
551 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
552 * you don't care that much or is speculating.
553 * @param MaxPhysAddr The max address to verify the result against. Use
554 * UINT64_MAX if it doesn't matter.
555 * @param enmType The object type.
556 * @param uAlignment The allocation alignment (in bytes).
557 * @param fOnKernelThread Set if we're already on the kernel thread.
558 */
559static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
560 bool fExecutable, bool fContiguous,
561 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
562 RTR0MEMOBJTYPE enmType, size_t uAlignment, bool fOnKernelThread)
563{
564 int rc;
565
566 /*
567 * Because of process code signing properties leaking into kernel space in
568 * in XNU's vm_fault.c code, we have to defer allocations of exec memory to
569 * a thread running in the kernel_task to get consistent results here.
570 *
571 * Trouble strikes in vm_fault_enter() when cs_enforcement_enabled is determined
572 * to be true because current process has the CS_ENFORCEMENT flag, the page flag
573 * vmp_cs_validated is clear, and the protection mask includes VM_PROT_EXECUTE
574 * (pmap_cs_enforced does not apply to macOS it seems). This test seems to go
575 * back to 10.5, though I'm not sure whether it's enabled for macOS that early
576 * on. Only VM_PROT_EXECUTE is problematic for kernel memory, (though
577 * VM_PROT_WRITE on code signed pages is also problematic in theory). As long as
578 * kernel_task doesn't have CS_ENFORCEMENT enabled, we'll be fine switching to it.
579 */
580 if (!fExecutable || fOnKernelThread)
581 { /* likely */ }
582 else
583 {
584 RTR0MEMOBJDARWINALLOCARGS Args;
585 Args.ppMem = ppMem;
586 Args.cb = cb;
587 Args.fExecutable = fExecutable;
588 Args.fContiguous = fContiguous;
589 Args.PhysMask = PhysMask;
590 Args.MaxPhysAddr = MaxPhysAddr;
591 Args.enmType = enmType;
592 Args.uAlignment = uAlignment;
593 return rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeAllockWorkerOnKernelThread, &Args.Core);
594 }
595
596 /*
597 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
598 * actually respects the physical memory mask (10.5.x is certainly busted),
599 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
600 *
601 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
602 *
603 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
604 */
605
606 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
607 Seen allocating memory for the VM structure, last page corrupted or
608 inaccessible." Made it only apply to snow leopard and older for now. */
609 size_t cbFudged = cb;
610 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
611 { /* likely */ }
612 else
613 cbFudged += PAGE_SIZE;
614
615 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
616 if (fContiguous)
617 {
618 fOptions |= kIOMemoryPhysicallyContiguous;
619 if ( version_major > 12
620 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
621 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
622 }
623 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
624 fOptions |= kIOMemoryMapperNone;
625
626#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 && 0 /* enable when/if necessary */
627 /* Paranoia: Don't misrepresent our intentions, we won't map kernel executable memory into ring-0. */
628 if (fExecutable && version_major >= 11 /* 10.7.x = Lion, as below */)
629 {
630 fOptions &= ~kIOMemoryKernelUserShared;
631 if (uAlignment < PAGE_SIZE)
632 uAlignment = PAGE_SIZE;
633 }
634#endif
635
636 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
637 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
638 x86 only and didn't have the alignment parameter (slot was different too). */
639 uint64_t uAlignmentActual = uAlignment;
640 IOBufferMemoryDescriptor *pMemDesc;
641#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
642 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
643 {
644 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
645 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
646 be used and the alignment inferred from the PhysMask argument. */
647 if (MaxPhysAddr != UINT64_MAX)
648 {
649 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
650 uAlignmentActual = 1;
651 }
652
653 pMemDesc = new IOBufferMemoryDescriptor;
654 if (pMemDesc)
655 {
656 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
657 { /* likely */ }
658 else
659 {
660 pMemDesc->release();
661 pMemDesc = NULL;
662 }
663 }
664 }
665 else
666#endif
667 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
668 if (pMemDesc)
669 {
670 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
671 if (IORet == kIOReturnSuccess)
672 {
673 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
674 if (pv)
675 {
676 /*
677 * Check if it's all below 4GB.
678 */
679 addr64_t AddrPrev = 0;
680 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
681 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
682 {
683#ifdef __LP64__
684 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
685#else
686 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
687#endif
688 if ( Addr > MaxPhysAddr
689 || !Addr
690 || (Addr & PAGE_OFFSET_MASK)
691 || ( fContiguous
692 && !off
693 && Addr == AddrPrev + PAGE_SIZE))
694 {
695 /* Buggy API, try allocate the memory another way. */
696 pMemDesc->complete();
697 pMemDesc->release();
698 if (PhysMask)
699 {
700 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
701 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
702 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
703 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
704 }
705 return VERR_ADDRESS_TOO_BIG;
706 }
707 AddrPrev = Addr;
708 }
709
710 /*
711 * Check that it's aligned correctly.
712 */
713 if ((uintptr_t)pv & (uAlignment - 1))
714 {
715 pMemDesc->complete();
716 pMemDesc->release();
717 if (PhysMask)
718 {
719 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
720 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
721 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
722 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
723 }
724 return VERR_NOT_SUPPORTED;
725 }
726
727#ifdef RT_STRICT
728 /* check that the memory is actually mapped. */
729 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
730 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
731 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
732 RTThreadPreemptDisable(&State);
733 rtR0MemObjDarwinTouchPages(pv, cb);
734 RTThreadPreemptRestore(&State);
735#endif
736
737 /*
738 * Create the IPRT memory object.
739 */
740 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb, NULL);
741 if (pMemDarwin)
742 {
743 if (fContiguous)
744 {
745#ifdef __LP64__
746 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
747#else
748 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
749#endif
750 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
751 if (enmType == RTR0MEMOBJTYPE_CONT)
752 pMemDarwin->Core.u.Cont.Phys = PhysBase;
753 else if (enmType == RTR0MEMOBJTYPE_PHYS)
754 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
755 else
756 AssertMsgFailed(("enmType=%d\n", enmType));
757 }
758
759 if (fExecutable)
760 {
761 rc = rtR0MemObjNativeProtectWorker(&pMemDarwin->Core, 0, cb,
762 RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
763#ifdef RT_STRICT
764 if (RT_SUCCESS(rc))
765 {
766 /* check that the memory is actually mapped. */
767 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
768 RTThreadPreemptDisable(&State2);
769 rtR0MemObjDarwinTouchPages(pv, cb);
770 RTThreadPreemptRestore(&State2);
771 }
772#endif
773 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
774 if ( rc == VERR_PERMISSION_DENIED
775 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
776 rc = VINF_SUCCESS;
777 }
778 else
779 rc = VINF_SUCCESS;
780 if (RT_SUCCESS(rc))
781 {
782 pMemDarwin->pMemDesc = pMemDesc;
783 *ppMem = &pMemDarwin->Core;
784 return VINF_SUCCESS;
785 }
786
787 rtR0MemObjDelete(&pMemDarwin->Core);
788 }
789
790 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
791 rc = VERR_NO_PHYS_MEMORY;
792 else if (enmType == RTR0MEMOBJTYPE_LOW)
793 rc = VERR_NO_LOW_MEMORY;
794 else if (enmType == RTR0MEMOBJTYPE_CONT)
795 rc = VERR_NO_CONT_MEMORY;
796 else
797 rc = VERR_NO_MEMORY;
798 }
799 else
800 rc = VERR_MEMOBJ_INIT_FAILED;
801
802 pMemDesc->complete();
803 }
804 else
805 rc = RTErrConvertFromDarwinIO(IORet);
806 pMemDesc->release();
807 }
808 else
809 rc = VERR_MEMOBJ_INIT_FAILED;
810 Assert(rc != VERR_ADDRESS_TOO_BIG);
811 return rc;
812}
813
814
815/**
816 * rtR0MemObjNativeAllocWorker kernel_task wrapper function.
817 */
818static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1)
819{
820 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
821 RTR0MEMOBJDARWINALLOCARGS volatile *pArgs = (RTR0MEMOBJDARWINALLOCARGS volatile *)pvUser0;
822 int rc = rtR0MemObjNativeAllocWorker(pArgs->ppMem, pArgs->cb, pArgs->fExecutable, pArgs->fContiguous, pArgs->PhysMask,
823 pArgs->MaxPhysAddr, pArgs->enmType, pArgs->uAlignment, true /*fOnKernelThread*/);
824 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
825}
826
827
828DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
829{
830 IPRT_DARWIN_SAVE_EFL_AC();
831
832 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */, UINT64_MAX,
833 RTR0MEMOBJTYPE_PAGE, PAGE_SIZE, false /*fOnKernelThread*/);
834
835 IPRT_DARWIN_RESTORE_EFL_AC();
836 return rc;
837}
838
839
840DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
841 const char *pszTag)
842{
843 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
844}
845
846
847DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
848{
849 IPRT_DARWIN_SAVE_EFL_AC();
850
851 /*
852 * Try IOMallocPhysical/IOMallocAligned first.
853 * Then try optimistically without a physical address mask, which will always
854 * end up using IOMallocAligned.
855 *
856 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
857 */
858 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, ~(uint32_t)PAGE_OFFSET_MASK,
859 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, false /*fOnKernelThread*/);
860 if (rc == VERR_ADDRESS_TOO_BIG)
861 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */,
862 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, false /*fOnKernelThread*/);
863
864 IPRT_DARWIN_RESTORE_EFL_AC();
865 return rc;
866}
867
868
869DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
870{
871 IPRT_DARWIN_SAVE_EFL_AC();
872
873 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
874 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
875 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, false /*fOnKernelThread*/);
876
877 /*
878 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
879 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
880 */
881 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
882 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
883 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
884 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, false /*fOnKernelThread*/);
885 IPRT_DARWIN_RESTORE_EFL_AC();
886 return rc;
887}
888
889
890DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
891{
892 if (uAlignment != PAGE_SIZE)
893 {
894 /* See rtR0MemObjNativeAllocWorker: */
895 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
896 return VERR_NOT_SUPPORTED;
897 }
898
899 IPRT_DARWIN_SAVE_EFL_AC();
900
901 /*
902 * Translate the PhysHighest address into a mask.
903 */
904 int rc;
905 if (PhysHighest == NIL_RTHCPHYS)
906 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
907 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
908 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment, false /*fOnKernelThread*/);
909 else
910 {
911 mach_vm_address_t PhysMask = 0;
912 PhysMask = ~(mach_vm_address_t)0;
913 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
914 PhysMask >>= 1;
915 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
916 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
917
918 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
919 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS, uAlignment, false /*fOnKernelThread*/);
920 }
921
922 IPRT_DARWIN_RESTORE_EFL_AC();
923 return rc;
924}
925
926
927DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
928{
929 /** @todo rtR0MemObjNativeAllocPhys / darwin.
930 * This might be a bit problematic and may very well require having to create our own
931 * object which we populate with pages but without mapping it into any address space.
932 * Estimate is 2-3 days.
933 */
934 RT_NOREF(ppMem, cb, PhysHighest);
935 return VERR_NOT_SUPPORTED;
936}
937
938
939DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
940{
941 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
942 IPRT_DARWIN_SAVE_EFL_AC();
943
944 /*
945 * Create a descriptor for it (the validation is always true on intel macs, but
946 * as it doesn't harm us keep it in).
947 */
948 int rc = VERR_ADDRESS_TOO_BIG;
949 IOAddressRange aRanges[1] = { { Phys, cb } };
950 if ( aRanges[0].address == Phys
951 && aRanges[0].length == cb)
952 {
953 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
954 kIODirectionInOut, NULL /*task*/);
955 if (pMemDesc)
956 {
957#ifdef __LP64__
958 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
959#else
960 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
961#endif
962
963 /*
964 * Create the IPRT memory object.
965 */
966 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb, NULL);
967 if (pMemDarwin)
968 {
969 pMemDarwin->Core.u.Phys.PhysBase = Phys;
970 pMemDarwin->Core.u.Phys.fAllocated = false;
971 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
972 pMemDarwin->pMemDesc = pMemDesc;
973 *ppMem = &pMemDarwin->Core;
974 IPRT_DARWIN_RESTORE_EFL_AC();
975 return VINF_SUCCESS;
976 }
977
978 rc = VERR_NO_MEMORY;
979 pMemDesc->release();
980 }
981 else
982 rc = VERR_MEMOBJ_INIT_FAILED;
983 }
984 else
985 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
986 IPRT_DARWIN_RESTORE_EFL_AC();
987 return rc;
988}
989
990
991/**
992 * Internal worker for locking down pages.
993 *
994 * @return IPRT status code.
995 *
996 * @param ppMem Where to store the memory object pointer.
997 * @param pv First page.
998 * @param cb Number of bytes.
999 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
1000 * and RTMEM_PROT_WRITE.
1001 * @param Task The task \a pv and \a cb refers to.
1002 */
1003static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
1004{
1005 IPRT_DARWIN_SAVE_EFL_AC();
1006 NOREF(fAccess);
1007#ifdef USE_VM_MAP_WIRE
1008 vm_map_t Map = get_task_map(Task);
1009 Assert(Map);
1010
1011 /*
1012 * First try lock the memory.
1013 */
1014 int rc = VERR_LOCK_FAILED;
1015 kern_return_t kr = vm_map_wire(get_task_map(Task),
1016 (vm_map_offset_t)pv,
1017 (vm_map_offset_t)pv + cb,
1018 VM_PROT_DEFAULT,
1019 0 /* not user */);
1020 if (kr == KERN_SUCCESS)
1021 {
1022 /*
1023 * Create the IPRT memory object.
1024 */
1025 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb, NULL);
1026 if (pMemDarwin)
1027 {
1028 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1029 *ppMem = &pMemDarwin->Core;
1030
1031 IPRT_DARWIN_RESTORE_EFL_AC();
1032 return VINF_SUCCESS;
1033 }
1034
1035 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
1036 Assert(kr == KERN_SUCCESS);
1037 rc = VERR_NO_MEMORY;
1038 }
1039
1040#else
1041
1042 /*
1043 * Create a descriptor and try lock it (prepare).
1044 */
1045 int rc = VERR_MEMOBJ_INIT_FAILED;
1046 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
1047 if (pMemDesc)
1048 {
1049 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1050 if (IORet == kIOReturnSuccess)
1051 {
1052 /*
1053 * Create the IPRT memory object.
1054 */
1055 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb, NULL);
1056 if (pMemDarwin)
1057 {
1058 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1059 pMemDarwin->pMemDesc = pMemDesc;
1060 *ppMem = &pMemDarwin->Core;
1061
1062 IPRT_DARWIN_RESTORE_EFL_AC();
1063 return VINF_SUCCESS;
1064 }
1065
1066 pMemDesc->complete();
1067 rc = VERR_NO_MEMORY;
1068 }
1069 else
1070 rc = VERR_LOCK_FAILED;
1071 pMemDesc->release();
1072 }
1073#endif
1074 IPRT_DARWIN_RESTORE_EFL_AC();
1075 return rc;
1076}
1077
1078
1079DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
1080{
1081 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
1082}
1083
1084
1085DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
1086{
1087 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
1088}
1089
1090
1091DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
1092{
1093 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
1094 return VERR_NOT_SUPPORTED;
1095}
1096
1097
1098DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
1099{
1100 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
1101 return VERR_NOT_SUPPORTED;
1102}
1103
1104
1105DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1106 unsigned fProt, size_t offSub, size_t cbSub)
1107{
1108 RT_NOREF(fProt);
1109 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
1110
1111 /*
1112 * Check that the specified alignment is supported.
1113 */
1114 if (uAlignment > PAGE_SIZE)
1115 return VERR_NOT_SUPPORTED;
1116 Assert(!offSub || cbSub);
1117
1118 IPRT_DARWIN_SAVE_EFL_AC();
1119
1120 /*
1121 * Must have a memory descriptor that we can map.
1122 */
1123 int rc = VERR_INVALID_PARAMETER;
1124 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1125 if (pMemToMapDarwin->pMemDesc)
1126 {
1127 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
1128 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
1129 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
1130#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1131 static uint32_t volatile s_fOptions = UINT32_MAX;
1132 uint32_t fOptions = s_fOptions;
1133 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1134 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
1135
1136 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
1137 0,
1138 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1139 offSub,
1140 cbSub);
1141#else
1142 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
1143 0,
1144 kIOMapAnywhere | kIOMapDefaultCache,
1145 offSub,
1146 cbSub);
1147#endif
1148 if (pMemMap)
1149 {
1150 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1151 void *pv = (void *)(uintptr_t)VirtAddr;
1152 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1153 {
1154//#ifdef __LP64__
1155// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1156//#else
1157// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1158//#endif
1159// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1160
1161// /*
1162// * Explicitly lock it so that we're sure it is present and that
1163// * its PTEs cannot be recycled.
1164// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1165// * to the options which causes prepare() to not wire the pages.
1166// * This is probably a bug.
1167// */
1168// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1169// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1170// 1 /* count */,
1171// 0 /* offset */,
1172// kernel_task,
1173// kIODirectionInOut | kIOMemoryTypeVirtual,
1174// kIOMapperSystem);
1175// if (pMemDesc)
1176// {
1177// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1178// if (IORet == kIOReturnSuccess)
1179// {
1180 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1181 the pages here so they can safely be accessed from inside simple
1182 locks and when preemption is disabled (no page-ins allowed).
1183 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1184 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1185 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1186 /** @todo First, the memory should've been mapped by now, and second, it
1187 * should have the wired attribute in the PTE (bit 10). Neither seems to
1188 * be the case. The disabled locking code doesn't make any difference,
1189 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1190 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1191//#ifdef __LP64__
1192// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1193//#else
1194// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1195//#endif
1196// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1197
1198 /*
1199 * Create the IPRT memory object.
1200 */
1201 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1202 pv, cbSub ? cbSub : pMemToMap->cb, NULL);
1203 if (pMemDarwin)
1204 {
1205 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1206 pMemDarwin->pMemMap = pMemMap;
1207// pMemDarwin->pMemDesc = pMemDesc;
1208 *ppMem = &pMemDarwin->Core;
1209
1210 IPRT_DARWIN_RESTORE_EFL_AC();
1211 return VINF_SUCCESS;
1212 }
1213
1214// pMemDesc->complete();
1215// rc = VERR_NO_MEMORY;
1216// }
1217// else
1218// rc = RTErrConvertFromDarwinIO(IORet);
1219// pMemDesc->release();
1220// }
1221// else
1222// rc = VERR_MEMOBJ_INIT_FAILED;
1223 }
1224 else if (pv)
1225 rc = VERR_ADDRESS_TOO_BIG;
1226 else
1227 rc = VERR_MAP_FAILED;
1228 pMemMap->release();
1229 }
1230 else
1231 rc = VERR_MAP_FAILED;
1232 }
1233
1234 IPRT_DARWIN_RESTORE_EFL_AC();
1235 return rc;
1236}
1237
1238
1239DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1240 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1241{
1242 RT_NOREF(fProt);
1243
1244 /*
1245 * Check for unsupported things.
1246 */
1247 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1248 if (uAlignment > PAGE_SIZE)
1249 return VERR_NOT_SUPPORTED;
1250 Assert(!offSub || cbSub);
1251
1252 IPRT_DARWIN_SAVE_EFL_AC();
1253
1254 /*
1255 * Must have a memory descriptor.
1256 */
1257 int rc = VERR_INVALID_PARAMETER;
1258 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1259 if (pMemToMapDarwin->pMemDesc)
1260 {
1261#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1262 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1263 0,
1264 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1265 offSub,
1266 cbSub);
1267#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1268 static uint32_t volatile s_fOptions = UINT32_MAX;
1269 uint32_t fOptions = s_fOptions;
1270 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1271 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1272 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1273 0,
1274 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1275 offSub,
1276 cbSub);
1277#else
1278 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1279 0,
1280 kIOMapAnywhere | kIOMapDefaultCache,
1281 offSub,
1282 cbSub);
1283#endif
1284 if (pMemMap)
1285 {
1286 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1287 void *pv = (void *)(uintptr_t)VirtAddr;
1288 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1289 {
1290 /*
1291 * Create the IPRT memory object.
1292 */
1293 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1294 pv, cbSub ? cbSub : pMemToMap->cb, NULL);
1295 if (pMemDarwin)
1296 {
1297 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1298 pMemDarwin->pMemMap = pMemMap;
1299 *ppMem = &pMemDarwin->Core;
1300
1301 IPRT_DARWIN_RESTORE_EFL_AC();
1302 return VINF_SUCCESS;
1303 }
1304
1305 rc = VERR_NO_MEMORY;
1306 }
1307 else if (pv)
1308 rc = VERR_ADDRESS_TOO_BIG;
1309 else
1310 rc = VERR_MAP_FAILED;
1311 pMemMap->release();
1312 }
1313 else
1314 rc = VERR_MAP_FAILED;
1315 }
1316
1317 IPRT_DARWIN_RESTORE_EFL_AC();
1318 return rc;
1319}
1320
1321
1322/**
1323 * Worker for rtR0MemObjNativeProtect that's typically called in a different
1324 * context.
1325 */
1326static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1327{
1328 IPRT_DARWIN_SAVE_EFL_AC();
1329
1330 /* Get the map for the object. */
1331 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1332 if (!pVmMap)
1333 {
1334 IPRT_DARWIN_RESTORE_EFL_AC();
1335 return VERR_NOT_SUPPORTED;
1336 }
1337
1338 /*
1339 * Convert the protection.
1340 */
1341 vm_prot_t fMachProt;
1342 switch (fProt)
1343 {
1344 case RTMEM_PROT_NONE:
1345 fMachProt = VM_PROT_NONE;
1346 break;
1347 case RTMEM_PROT_READ:
1348 fMachProt = VM_PROT_READ;
1349 break;
1350 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1351 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1352 break;
1353 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1354 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1355 break;
1356 case RTMEM_PROT_WRITE:
1357 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1358 break;
1359 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1360 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1361 break;
1362 case RTMEM_PROT_EXEC:
1363 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1364 break;
1365 default:
1366 AssertFailedReturn(VERR_INVALID_PARAMETER);
1367 }
1368
1369 /*
1370 * Do the job.
1371 */
1372 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1373 kern_return_t krc = vm_protect(pVmMap,
1374 Start,
1375 cbSub,
1376 false,
1377 fMachProt);
1378 if (krc != KERN_SUCCESS)
1379 {
1380 static int s_cComplaints = 0;
1381 if (s_cComplaints < 10)
1382 {
1383 s_cComplaints++;
1384 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1385 (void *)pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1386
1387 kern_return_t krc2;
1388 vm_offset_t pvReal = Start;
1389 vm_size_t cbReal = 0;
1390 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1391 struct vm_region_basic_info Info;
1392 RT_ZERO(Info);
1393 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1394 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1395 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1396 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1397 }
1398 IPRT_DARWIN_RESTORE_EFL_AC();
1399 return RTErrConvertFromDarwinKern(krc);
1400 }
1401
1402 /*
1403 * Touch the pages if they should be writable afterwards and accessible
1404 * from code which should never fault. vm_protect() may leave pages
1405 * temporarily write protected, possibly due to pmap no-upgrade rules?
1406 *
1407 * This is the same trick (or HACK ALERT if you like) as applied in
1408 * rtR0MemObjNativeMapKernel.
1409 */
1410 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1411 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1412 {
1413 if (fProt & RTMEM_PROT_WRITE)
1414 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1415 /*
1416 * Sniff (read) read-only pages too, just to be sure.
1417 */
1418 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1419 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1420 }
1421
1422 IPRT_DARWIN_RESTORE_EFL_AC();
1423 return VINF_SUCCESS;
1424}
1425
1426
1427/**
1428 * rtR0MemObjNativeProtect kernel_task wrapper function.
1429 */
1430static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1)
1431{
1432 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
1433 RTR0MEMOBJDARWINPROTECTARGS *pArgs = (RTR0MEMOBJDARWINPROTECTARGS *)pvUser0;
1434 int rc = rtR0MemObjNativeProtectWorker(pArgs->pMem, pArgs->offSub, pArgs->cbSub, pArgs->fProt);
1435 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
1436}
1437
1438
1439DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1440{
1441 /*
1442 * The code won't work right because process codesigning properties leaks
1443 * into kernel_map memory management. So, if the user process we're running
1444 * in has CS restrictions active, we cannot play around with the EXEC
1445 * protection because some vm_fault.c think we're modifying the process map
1446 * or something.
1447 */
1448 int rc;
1449 if (rtR0MemObjDarwinGetMap(pMem) == kernel_map)
1450 {
1451 RTR0MEMOBJDARWINPROTECTARGS Args;
1452 Args.pMem = pMem;
1453 Args.offSub = offSub;
1454 Args.cbSub = cbSub;
1455 Args.fProt = fProt;
1456 rc = rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeProtectWorkerOnKernelThread, &Args.Core);
1457 }
1458 else
1459 rc = rtR0MemObjNativeProtectWorker(pMem, offSub, cbSub, fProt);
1460 return rc;
1461}
1462
1463
1464DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1465{
1466 RTHCPHYS PhysAddr;
1467 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1468 IPRT_DARWIN_SAVE_EFL_AC();
1469
1470#ifdef USE_VM_MAP_WIRE
1471 /*
1472 * Locked memory doesn't have a memory descriptor and
1473 * needs to be handled differently.
1474 */
1475 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1476 {
1477 ppnum_t PgNo;
1478 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1479 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1480 else
1481 {
1482 /*
1483 * From what I can tell, Apple seems to have locked up the all the
1484 * available interfaces that could help us obtain the pmap_t of a task
1485 * or vm_map_t.
1486
1487 * So, we'll have to figure out where in the vm_map_t structure it is
1488 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1489 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1490 * Not nice, but it will hopefully do the job in a reliable manner...
1491 *
1492 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1493 */
1494 static int s_offPmap = -1;
1495 if (RT_UNLIKELY(s_offPmap == -1))
1496 {
1497 pmap_t const *p = (pmap_t *)kernel_map;
1498 pmap_t const * const pEnd = p + 64;
1499 for (; p < pEnd; p++)
1500 if (*p == kernel_pmap)
1501 {
1502 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1503 break;
1504 }
1505 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1506 }
1507 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1508 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1509 }
1510
1511 IPRT_DARWIN_RESTORE_EFL_AC();
1512 AssertReturn(PgNo, NIL_RTHCPHYS);
1513 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1514 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1515 }
1516 else
1517#endif /* USE_VM_MAP_WIRE */
1518 {
1519 /*
1520 * Get the memory descriptor.
1521 */
1522 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1523 if (!pMemDesc)
1524 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1525 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1526
1527 /*
1528 * If we've got a memory descriptor, use getPhysicalSegment64().
1529 */
1530#ifdef __LP64__
1531 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1532#else
1533 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1534#endif
1535 IPRT_DARWIN_RESTORE_EFL_AC();
1536 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1537 PhysAddr = Addr;
1538 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1539 }
1540
1541 return PhysAddr;
1542}
1543
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette