VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 85569

Last change on this file since 85569 was 85167, checked in by vboxsync, 5 years ago

IPRT/r0drv/darwin: Workarounds for stupid printf %p validation. bugref:9790

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 58.8 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 85167 2020-07-10 10:06:55Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include "internal/memobj.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
56
57/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The Darwin version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJDARWIN
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Pointer to the memory descriptor created for allocated and locked memory. */
71 IOMemoryDescriptor *pMemDesc;
72 /** Pointer to the memory mapping object for mapped memory. */
73 IOMemoryMap *pMemMap;
74} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
75
76/**
77 * Common thread_call_allocate/thread_call_enter argument package.
78 */
79typedef struct RTR0MEMOBJDARWINTHREADARGS
80{
81 int32_t volatile rc;
82 RTSEMEVENTMULTI hEvent;
83} RTR0MEMOBJDARWINTHREADARGS;
84
85
86/**
87 * Arguments for rtR0MemObjNativeAllockWorkOnKernelThread.
88 */
89typedef struct RTR0MEMOBJDARWINALLOCARGS
90{
91 RTR0MEMOBJDARWINTHREADARGS Core;
92 PPRTR0MEMOBJINTERNAL ppMem;
93 size_t cb;
94 bool fExecutable;
95 bool fContiguous;
96 mach_vm_address_t PhysMask;
97 uint64_t MaxPhysAddr;
98 RTR0MEMOBJTYPE enmType;
99 size_t uAlignment;
100} RTR0MEMOBJDARWINALLOCARGS;
101
102/**
103 * Arguments for rtR0MemObjNativeProtectWorkOnKernelThread.
104 */
105typedef struct RTR0MEMOBJDARWINPROTECTARGS
106{
107 RTR0MEMOBJDARWINTHREADARGS Core;
108 PRTR0MEMOBJINTERNAL pMem;
109 size_t offSub;
110 size_t cbSub;
111 uint32_t fProt;
112} RTR0MEMOBJDARWINPROTECTARGS;
113
114
115/*********************************************************************************************************************************
116* Internal Functions *
117*********************************************************************************************************************************/
118static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1);
119static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt);
120static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1);
121
122
123/**
124 * Touch the pages to force the kernel to create or write-enable the page table
125 * entries.
126 *
127 * This is necessary since the kernel gets upset if we take a page fault when
128 * preemption is disabled and/or we own a simple lock (same thing). It has no
129 * problems with us disabling interrupts when taking the traps, weird stuff.
130 *
131 * (This is basically a way of invoking vm_fault on a range of pages.)
132 *
133 * @param pv Pointer to the first page.
134 * @param cb The number of bytes.
135 */
136static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
137{
138 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
139 for (;;)
140 {
141 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
142 if (cb <= PAGE_SIZE)
143 break;
144 cb -= PAGE_SIZE;
145 pu32 += PAGE_SIZE / sizeof(uint32_t);
146 }
147}
148
149
150/**
151 * Read (sniff) every page in the range to make sure there are some page tables
152 * entries backing it.
153 *
154 * This is just to be sure vm_protect didn't remove stuff without re-adding it
155 * if someone should try write-protect something.
156 *
157 * @param pv Pointer to the first page.
158 * @param cb The number of bytes.
159 */
160static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
161{
162 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
163 uint32_t volatile u32Counter = 0;
164 for (;;)
165 {
166 u32Counter += *pu32;
167
168 if (cb <= PAGE_SIZE)
169 break;
170 cb -= PAGE_SIZE;
171 pu32 += PAGE_SIZE / sizeof(uint32_t);
172 }
173}
174
175
176/**
177 * Gets the virtual memory map the specified object is mapped into.
178 *
179 * @returns VM map handle on success, NULL if no map.
180 * @param pMem The memory object.
181 */
182DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
183{
184 switch (pMem->enmType)
185 {
186 case RTR0MEMOBJTYPE_PAGE:
187 case RTR0MEMOBJTYPE_LOW:
188 case RTR0MEMOBJTYPE_CONT:
189 return kernel_map;
190
191 case RTR0MEMOBJTYPE_PHYS:
192 case RTR0MEMOBJTYPE_PHYS_NC:
193 if (pMem->pv)
194 return kernel_map;
195 return NULL;
196
197 case RTR0MEMOBJTYPE_LOCK:
198 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
199 ? kernel_map
200 : get_task_map((task_t)pMem->u.Lock.R0Process);
201
202 case RTR0MEMOBJTYPE_RES_VIRT:
203 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
204 ? kernel_map
205 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
206
207 case RTR0MEMOBJTYPE_MAPPING:
208 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
209 ? kernel_map
210 : get_task_map((task_t)pMem->u.Mapping.R0Process);
211
212 default:
213 return NULL;
214 }
215}
216
217#if 0 /* not necessary after all*/
218/* My vm_map mockup. */
219struct my_vm_map
220{
221 struct { char pad[8]; } lock;
222 struct my_vm_map_header
223 {
224 struct vm_map_links
225 {
226 void *prev;
227 void *next;
228 vm_map_offset_t start;
229 vm_map_offset_t end;
230 } links;
231 int nentries;
232 boolean_t entries_pageable;
233 } hdr;
234 pmap_t pmap;
235 vm_map_size_t size;
236};
237
238
239/**
240 * Gets the minimum map address, this is similar to get_map_min.
241 *
242 * @returns The start address of the map.
243 * @param pMap The map.
244 */
245static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
246{
247 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
248 static int32_t volatile s_offAdjust = INT32_MAX;
249 int32_t off = s_offAdjust;
250 if (off == INT32_MAX)
251 {
252 for (off = 0; ; off += sizeof(pmap_t))
253 {
254 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
255 break;
256 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
257 }
258 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
259 }
260
261 /* calculate it. */
262 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
263 return pMyMap->hdr.links.start;
264}
265#endif /* unused */
266
267#ifdef RT_STRICT
268# if 0 /* unused */
269
270/**
271 * Read from a physical page.
272 *
273 * @param HCPhys The address to start reading at.
274 * @param cb How many bytes to read.
275 * @param pvDst Where to put the bytes. This is zero'd on failure.
276 */
277static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
278{
279 memset(pvDst, '\0', cb);
280
281 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
282 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
283 kIODirectionIn, NULL /*task*/);
284 if (pMemDesc)
285 {
286#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
287 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
288#else
289 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
290#endif
291 if (pMemMap)
292 {
293 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
294 memcpy(pvDst, pvSrc, cb);
295 pMemMap->release();
296 }
297 else
298 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
299
300 pMemDesc->release();
301 }
302 else
303 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
304}
305
306
307/**
308 * Gets the PTE for a page.
309 *
310 * @returns the PTE.
311 * @param pvPage The virtual address to get the PTE for.
312 */
313static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
314{
315 RTUINT64U u64;
316 RTCCUINTREG cr3 = ASMGetCR3();
317 RTCCUINTREG cr4 = ASMGetCR4();
318 bool fPAE = false;
319 bool fLMA = false;
320 if (cr4 & X86_CR4_PAE)
321 {
322 fPAE = true;
323 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
324 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
325 {
326 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
327 if (efer & MSR_K6_EFER_LMA)
328 fLMA = true;
329 }
330 }
331
332 if (fLMA)
333 {
334 /* PML4 */
335 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
336 if (!(u64.u & X86_PML4E_P))
337 {
338 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
339 return 0;
340 }
341
342 /* PDPTR */
343 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
344 if (!(u64.u & X86_PDPE_P))
345 {
346 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
347 return 0;
348 }
349 if (u64.u & X86_PDPE_LM_PS)
350 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
351
352 /* PD */
353 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
354 if (!(u64.u & X86_PDE_P))
355 {
356 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
357 return 0;
358 }
359 if (u64.u & X86_PDE_PS)
360 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
361
362 /* PT */
363 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
364 if (!(u64.u & X86_PTE_P))
365 {
366 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
367 return 0;
368 }
369 return u64.u;
370 }
371
372 if (fPAE)
373 {
374 /* PDPTR */
375 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
376 if (!(u64.u & X86_PDE_P))
377 return 0;
378
379 /* PD */
380 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
381 if (!(u64.u & X86_PDE_P))
382 return 0;
383 if (u64.u & X86_PDE_PS)
384 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
385
386 /* PT */
387 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
388 if (!(u64.u & X86_PTE_P))
389 return 0;
390 return u64.u;
391 }
392
393 /* PD */
394 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
395 if (!(u64.au32[0] & X86_PDE_P))
396 return 0;
397 if (u64.au32[0] & X86_PDE_PS)
398 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
399
400 /* PT */
401 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
402 if (!(u64.au32[0] & X86_PTE_P))
403 return 0;
404 return u64.au32[0];
405
406 return 0;
407}
408
409# endif /* unused */
410#endif /* RT_STRICT */
411
412DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
413{
414 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
415 IPRT_DARWIN_SAVE_EFL_AC();
416
417 /*
418 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
419 */
420 if (pMemDarwin->pMemDesc)
421 {
422 pMemDarwin->pMemDesc->complete();
423 pMemDarwin->pMemDesc->release();
424 pMemDarwin->pMemDesc = NULL;
425 }
426
427 if (pMemDarwin->pMemMap)
428 {
429 pMemDarwin->pMemMap->release();
430 pMemDarwin->pMemMap = NULL;
431 }
432
433 /*
434 * Release any memory that we've allocated or locked.
435 */
436 switch (pMemDarwin->Core.enmType)
437 {
438 case RTR0MEMOBJTYPE_LOW:
439 case RTR0MEMOBJTYPE_PAGE:
440 case RTR0MEMOBJTYPE_CONT:
441 break;
442
443 case RTR0MEMOBJTYPE_LOCK:
444 {
445#ifdef USE_VM_MAP_WIRE
446 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
447 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
448 : kernel_map;
449 kern_return_t kr = vm_map_unwire(Map,
450 (vm_map_offset_t)pMemDarwin->Core.pv,
451 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
452 0 /* not user */);
453 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
454#endif
455 break;
456 }
457
458 case RTR0MEMOBJTYPE_PHYS:
459 /*if (pMemDarwin->Core.u.Phys.fAllocated)
460 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
461 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
462 break;
463
464 case RTR0MEMOBJTYPE_PHYS_NC:
465 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
466 IPRT_DARWIN_RESTORE_EFL_AC();
467 return VERR_INTERNAL_ERROR;
468
469 case RTR0MEMOBJTYPE_RES_VIRT:
470 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
471 IPRT_DARWIN_RESTORE_EFL_AC();
472 return VERR_INTERNAL_ERROR;
473
474 case RTR0MEMOBJTYPE_MAPPING:
475 /* nothing to do here. */
476 break;
477
478 default:
479 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
480 IPRT_DARWIN_RESTORE_EFL_AC();
481 return VERR_INTERNAL_ERROR;
482 }
483
484 IPRT_DARWIN_RESTORE_EFL_AC();
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * This is a helper function to executes @a pfnWorker in the context of the
491 * kernel_task
492 *
493 * @returns IPRT status code - result from pfnWorker or dispatching error.
494 * @param pfnWorker The function to call.
495 * @param pArgs The arguments to pass to the function.
496 */
497static int rtR0MemObjDarwinDoInKernelTaskThread(thread_call_func_t pfnWorker, RTR0MEMOBJDARWINTHREADARGS *pArgs)
498{
499 pArgs->rc = VERR_IPE_UNINITIALIZED_STATUS;
500 pArgs->hEvent = NIL_RTSEMEVENTMULTI;
501 int rc = RTSemEventMultiCreate(&pArgs->hEvent);
502 if (RT_SUCCESS(rc))
503 {
504 thread_call_t hCall = thread_call_allocate(pfnWorker, (void *)pArgs);
505 if (hCall)
506 {
507 boolean_t fRc = thread_call_enter(hCall);
508 AssertLogRel(fRc == FALSE);
509
510 rc = RTSemEventMultiWaitEx(pArgs->hEvent, RTSEMWAIT_FLAGS_INDEFINITE | RTSEMWAIT_FLAGS_UNINTERRUPTIBLE,
511 RT_INDEFINITE_WAIT);
512 AssertLogRelRC(rc);
513
514 rc = pArgs->rc;
515 thread_call_free(hCall);
516 }
517 else
518 rc = VERR_NO_MEMORY;
519 RTSemEventMultiDestroy(pArgs->hEvent);
520 }
521 return rc;
522}
523
524
525/**
526 * Signals result to thread waiting in rtR0MemObjDarwinDoInKernelTaskThread.
527 *
528 * @param pArgs The argument structure.
529 * @param rc The IPRT status code to signal.
530 */
531static void rtR0MemObjDarwinSignalThreadWaitinOnTask(RTR0MEMOBJDARWINTHREADARGS volatile *pArgs, int rc)
532{
533 if (ASMAtomicCmpXchgS32(&pArgs->rc, rc, VERR_IPE_UNINITIALIZED_STATUS))
534 {
535 rc = RTSemEventMultiSignal(pArgs->hEvent);
536 AssertLogRelRC(rc);
537 }
538}
539
540
541/**
542 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
543 *
544 * @returns IPRT status code.
545 * @retval VERR_ADDRESS_TOO_BIG try another way.
546 *
547 * @param ppMem Where to return the memory object.
548 * @param cb The page aligned memory size.
549 * @param fExecutable Whether the mapping needs to be executable.
550 * @param fContiguous Whether the backing memory needs to be contiguous.
551 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
552 * you don't care that much or is speculating.
553 * @param MaxPhysAddr The max address to verify the result against. Use
554 * UINT64_MAX if it doesn't matter.
555 * @param enmType The object type.
556 * @param uAlignment The allocation alignment (in bytes).
557 * @param fOnKernelThread Set if we're already on the kernel thread.
558 */
559static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
560 bool fExecutable, bool fContiguous,
561 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
562 RTR0MEMOBJTYPE enmType, size_t uAlignment, bool fOnKernelThread)
563{
564 int rc;
565
566 /*
567 * Because of process code signing properties leaking into kernel space in
568 * in XNU's vm_fault.c code, we have to defer allocations of exec memory to
569 * a thread running in the kernel_task to get consistent results here.
570 *
571 * Trouble strikes in vm_fault_enter() when cs_enforcement_enabled is determined
572 * to be true because current process has the CS_ENFORCEMENT flag, the page flag
573 * vmp_cs_validated is clear, and the protection mask includes VM_PROT_EXECUTE
574 * (pmap_cs_enforced does not apply to macOS it seems). This test seems to go
575 * back to 10.5, though I'm not sure whether it's enabled for macOS that early
576 * on. Only VM_PROT_EXECUTE is problematic for kernel memory, (though
577 * VM_PROT_WRITE on code signed pages is also problematic in theory). As long as
578 * kernel_task doesn't have CS_ENFORCEMENT enabled, we'll be fine switching to it.
579 */
580 if (!fExecutable || fOnKernelThread)
581 { /* likely */ }
582 else
583 {
584 RTR0MEMOBJDARWINALLOCARGS Args;
585 Args.ppMem = ppMem;
586 Args.cb = cb;
587 Args.fExecutable = fExecutable;
588 Args.fContiguous = fContiguous;
589 Args.PhysMask = PhysMask;
590 Args.MaxPhysAddr = MaxPhysAddr;
591 Args.enmType = enmType;
592 Args.uAlignment = uAlignment;
593 return rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeAllockWorkerOnKernelThread, &Args.Core);
594 }
595
596 /*
597 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
598 * actually respects the physical memory mask (10.5.x is certainly busted),
599 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
600 *
601 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
602 *
603 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
604 */
605
606 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
607 Seen allocating memory for the VM structure, last page corrupted or
608 inaccessible." Made it only apply to snow leopard and older for now. */
609 size_t cbFudged = cb;
610 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
611 { /* likely */ }
612 else
613 cbFudged += PAGE_SIZE;
614
615 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
616 if (fContiguous)
617 {
618 fOptions |= kIOMemoryPhysicallyContiguous;
619 if ( version_major > 12
620 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
621 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
622 }
623 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
624 fOptions |= kIOMemoryMapperNone;
625
626#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 && 0 /* enable when/if necessary */
627 /* Paranoia: Don't misrepresent our intentions, we won't map kernel executable memory into ring-0. */
628 if (fExecutable && version_major >= 11 /* 10.7.x = Lion, as below */)
629 {
630 fOptions &= ~kIOMemoryKernelUserShared;
631 if (uAlignment < PAGE_SIZE)
632 uAlignment = PAGE_SIZE;
633 }
634#endif
635
636 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
637 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
638 x86 only and didn't have the alignment parameter (slot was different too). */
639 uint64_t uAlignmentActual = uAlignment;
640 IOBufferMemoryDescriptor *pMemDesc;
641#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
642 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
643 {
644 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
645 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
646 be used and the alignment inferred from the PhysMask argument. */
647 if (MaxPhysAddr != UINT64_MAX)
648 {
649 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
650 uAlignmentActual = 1;
651 }
652
653 pMemDesc = new IOBufferMemoryDescriptor;
654 if (pMemDesc)
655 {
656 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
657 { /* likely */ }
658 else
659 {
660 pMemDesc->release();
661 pMemDesc = NULL;
662 }
663 }
664 }
665 else
666#endif
667 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
668 if (pMemDesc)
669 {
670 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
671 if (IORet == kIOReturnSuccess)
672 {
673 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
674 if (pv)
675 {
676 /*
677 * Check if it's all below 4GB.
678 */
679 addr64_t AddrPrev = 0;
680 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
681 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
682 {
683#ifdef __LP64__
684 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
685#else
686 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
687#endif
688 if ( Addr > MaxPhysAddr
689 || !Addr
690 || (Addr & PAGE_OFFSET_MASK)
691 || ( fContiguous
692 && !off
693 && Addr == AddrPrev + PAGE_SIZE))
694 {
695 /* Buggy API, try allocate the memory another way. */
696 pMemDesc->complete();
697 pMemDesc->release();
698 if (PhysMask)
699 {
700 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
701 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
702 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
703 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
704 }
705 return VERR_ADDRESS_TOO_BIG;
706 }
707 AddrPrev = Addr;
708 }
709
710 /*
711 * Check that it's aligned correctly.
712 */
713 if ((uintptr_t)pv & (uAlignment - 1))
714 {
715 pMemDesc->complete();
716 pMemDesc->release();
717 if (PhysMask)
718 {
719 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
720 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
721 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
722 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
723 }
724 return VERR_NOT_SUPPORTED;
725 }
726
727#ifdef RT_STRICT
728 /* check that the memory is actually mapped. */
729 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
730 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
731 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
732 RTThreadPreemptDisable(&State);
733 rtR0MemObjDarwinTouchPages(pv, cb);
734 RTThreadPreemptRestore(&State);
735#endif
736
737 /*
738 * Create the IPRT memory object.
739 */
740 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
741 if (pMemDarwin)
742 {
743 if (fContiguous)
744 {
745#ifdef __LP64__
746 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
747#else
748 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
749#endif
750 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
751 if (enmType == RTR0MEMOBJTYPE_CONT)
752 pMemDarwin->Core.u.Cont.Phys = PhysBase;
753 else if (enmType == RTR0MEMOBJTYPE_PHYS)
754 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
755 else
756 AssertMsgFailed(("enmType=%d\n", enmType));
757 }
758
759 if (fExecutable)
760 {
761 rc = rtR0MemObjNativeProtectWorker(&pMemDarwin->Core, 0, cb,
762 RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
763#ifdef RT_STRICT
764 if (RT_SUCCESS(rc))
765 {
766 /* check that the memory is actually mapped. */
767 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
768 RTThreadPreemptDisable(&State2);
769 rtR0MemObjDarwinTouchPages(pv, cb);
770 RTThreadPreemptRestore(&State2);
771 }
772#endif
773 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
774 if ( rc == VERR_PERMISSION_DENIED
775 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
776 rc = VINF_SUCCESS;
777 }
778 else
779 rc = VINF_SUCCESS;
780 if (RT_SUCCESS(rc))
781 {
782 pMemDarwin->pMemDesc = pMemDesc;
783 *ppMem = &pMemDarwin->Core;
784 return VINF_SUCCESS;
785 }
786
787 rtR0MemObjDelete(&pMemDarwin->Core);
788 }
789
790 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
791 rc = VERR_NO_PHYS_MEMORY;
792 else if (enmType == RTR0MEMOBJTYPE_LOW)
793 rc = VERR_NO_LOW_MEMORY;
794 else if (enmType == RTR0MEMOBJTYPE_CONT)
795 rc = VERR_NO_CONT_MEMORY;
796 else
797 rc = VERR_NO_MEMORY;
798 }
799 else
800 rc = VERR_MEMOBJ_INIT_FAILED;
801
802 pMemDesc->complete();
803 }
804 else
805 rc = RTErrConvertFromDarwinIO(IORet);
806 pMemDesc->release();
807 }
808 else
809 rc = VERR_MEMOBJ_INIT_FAILED;
810 Assert(rc != VERR_ADDRESS_TOO_BIG);
811 return rc;
812}
813
814
815/**
816 * rtR0MemObjNativeAllocWorker kernel_task wrapper function.
817 */
818static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1)
819{
820 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
821 RTR0MEMOBJDARWINALLOCARGS volatile *pArgs = (RTR0MEMOBJDARWINALLOCARGS volatile *)pvUser0;
822 int rc = rtR0MemObjNativeAllocWorker(pArgs->ppMem, pArgs->cb, pArgs->fExecutable, pArgs->fContiguous, pArgs->PhysMask,
823 pArgs->MaxPhysAddr, pArgs->enmType, pArgs->uAlignment, true /*fOnKernelThread*/);
824 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
825}
826
827
828DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
829{
830 IPRT_DARWIN_SAVE_EFL_AC();
831
832 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */, UINT64_MAX,
833 RTR0MEMOBJTYPE_PAGE, PAGE_SIZE, false /*fOnKernelThread*/);
834
835 IPRT_DARWIN_RESTORE_EFL_AC();
836 return rc;
837}
838
839
840DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
841{
842 IPRT_DARWIN_SAVE_EFL_AC();
843
844 /*
845 * Try IOMallocPhysical/IOMallocAligned first.
846 * Then try optimistically without a physical address mask, which will always
847 * end up using IOMallocAligned.
848 *
849 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
850 */
851 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, ~(uint32_t)PAGE_OFFSET_MASK,
852 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, false /*fOnKernelThread*/);
853 if (rc == VERR_ADDRESS_TOO_BIG)
854 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */,
855 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, false /*fOnKernelThread*/);
856
857 IPRT_DARWIN_RESTORE_EFL_AC();
858 return rc;
859}
860
861
862DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
863{
864 IPRT_DARWIN_SAVE_EFL_AC();
865
866 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
867 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
868 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, false /*fOnKernelThread*/);
869
870 /*
871 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
872 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
873 */
874 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
875 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
876 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
877 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, false /*fOnKernelThread*/);
878 IPRT_DARWIN_RESTORE_EFL_AC();
879 return rc;
880}
881
882
883DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
884{
885 if (uAlignment != PAGE_SIZE)
886 {
887 /* See rtR0MemObjNativeAllocWorker: */
888 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
889 return VERR_NOT_SUPPORTED;
890 }
891
892 IPRT_DARWIN_SAVE_EFL_AC();
893
894 /*
895 * Translate the PhysHighest address into a mask.
896 */
897 int rc;
898 if (PhysHighest == NIL_RTHCPHYS)
899 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
900 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
901 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment, false /*fOnKernelThread*/);
902 else
903 {
904 mach_vm_address_t PhysMask = 0;
905 PhysMask = ~(mach_vm_address_t)0;
906 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
907 PhysMask >>= 1;
908 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
909 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
910
911 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
912 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS, uAlignment, false /*fOnKernelThread*/);
913 }
914
915 IPRT_DARWIN_RESTORE_EFL_AC();
916 return rc;
917}
918
919
920DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
921{
922 /** @todo rtR0MemObjNativeAllocPhys / darwin.
923 * This might be a bit problematic and may very well require having to create our own
924 * object which we populate with pages but without mapping it into any address space.
925 * Estimate is 2-3 days.
926 */
927 RT_NOREF(ppMem, cb, PhysHighest);
928 return VERR_NOT_SUPPORTED;
929}
930
931
932DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
933{
934 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
935 IPRT_DARWIN_SAVE_EFL_AC();
936
937 /*
938 * Create a descriptor for it (the validation is always true on intel macs, but
939 * as it doesn't harm us keep it in).
940 */
941 int rc = VERR_ADDRESS_TOO_BIG;
942 IOAddressRange aRanges[1] = { { Phys, cb } };
943 if ( aRanges[0].address == Phys
944 && aRanges[0].length == cb)
945 {
946 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
947 kIODirectionInOut, NULL /*task*/);
948 if (pMemDesc)
949 {
950#ifdef __LP64__
951 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
952#else
953 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
954#endif
955
956 /*
957 * Create the IPRT memory object.
958 */
959 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
960 if (pMemDarwin)
961 {
962 pMemDarwin->Core.u.Phys.PhysBase = Phys;
963 pMemDarwin->Core.u.Phys.fAllocated = false;
964 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
965 pMemDarwin->pMemDesc = pMemDesc;
966 *ppMem = &pMemDarwin->Core;
967 IPRT_DARWIN_RESTORE_EFL_AC();
968 return VINF_SUCCESS;
969 }
970
971 rc = VERR_NO_MEMORY;
972 pMemDesc->release();
973 }
974 else
975 rc = VERR_MEMOBJ_INIT_FAILED;
976 }
977 else
978 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
979 IPRT_DARWIN_RESTORE_EFL_AC();
980 return rc;
981}
982
983
984/**
985 * Internal worker for locking down pages.
986 *
987 * @return IPRT status code.
988 *
989 * @param ppMem Where to store the memory object pointer.
990 * @param pv First page.
991 * @param cb Number of bytes.
992 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
993 * and RTMEM_PROT_WRITE.
994 * @param Task The task \a pv and \a cb refers to.
995 */
996static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
997{
998 IPRT_DARWIN_SAVE_EFL_AC();
999 NOREF(fAccess);
1000#ifdef USE_VM_MAP_WIRE
1001 vm_map_t Map = get_task_map(Task);
1002 Assert(Map);
1003
1004 /*
1005 * First try lock the memory.
1006 */
1007 int rc = VERR_LOCK_FAILED;
1008 kern_return_t kr = vm_map_wire(get_task_map(Task),
1009 (vm_map_offset_t)pv,
1010 (vm_map_offset_t)pv + cb,
1011 VM_PROT_DEFAULT,
1012 0 /* not user */);
1013 if (kr == KERN_SUCCESS)
1014 {
1015 /*
1016 * Create the IPRT memory object.
1017 */
1018 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
1019 if (pMemDarwin)
1020 {
1021 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1022 *ppMem = &pMemDarwin->Core;
1023
1024 IPRT_DARWIN_RESTORE_EFL_AC();
1025 return VINF_SUCCESS;
1026 }
1027
1028 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
1029 Assert(kr == KERN_SUCCESS);
1030 rc = VERR_NO_MEMORY;
1031 }
1032
1033#else
1034
1035 /*
1036 * Create a descriptor and try lock it (prepare).
1037 */
1038 int rc = VERR_MEMOBJ_INIT_FAILED;
1039 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
1040 if (pMemDesc)
1041 {
1042 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1043 if (IORet == kIOReturnSuccess)
1044 {
1045 /*
1046 * Create the IPRT memory object.
1047 */
1048 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
1049 if (pMemDarwin)
1050 {
1051 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1052 pMemDarwin->pMemDesc = pMemDesc;
1053 *ppMem = &pMemDarwin->Core;
1054
1055 IPRT_DARWIN_RESTORE_EFL_AC();
1056 return VINF_SUCCESS;
1057 }
1058
1059 pMemDesc->complete();
1060 rc = VERR_NO_MEMORY;
1061 }
1062 else
1063 rc = VERR_LOCK_FAILED;
1064 pMemDesc->release();
1065 }
1066#endif
1067 IPRT_DARWIN_RESTORE_EFL_AC();
1068 return rc;
1069}
1070
1071
1072DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
1073{
1074 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
1075}
1076
1077
1078DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
1079{
1080 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
1081}
1082
1083
1084DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
1085{
1086 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
1087 return VERR_NOT_SUPPORTED;
1088}
1089
1090
1091DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
1092{
1093 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
1094 return VERR_NOT_SUPPORTED;
1095}
1096
1097
1098DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1099 unsigned fProt, size_t offSub, size_t cbSub)
1100{
1101 RT_NOREF(fProt);
1102 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
1103
1104 /*
1105 * Check that the specified alignment is supported.
1106 */
1107 if (uAlignment > PAGE_SIZE)
1108 return VERR_NOT_SUPPORTED;
1109 Assert(!offSub || cbSub);
1110
1111 IPRT_DARWIN_SAVE_EFL_AC();
1112
1113 /*
1114 * Must have a memory descriptor that we can map.
1115 */
1116 int rc = VERR_INVALID_PARAMETER;
1117 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1118 if (pMemToMapDarwin->pMemDesc)
1119 {
1120 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
1121 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
1122 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
1123#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1124 static uint32_t volatile s_fOptions = UINT32_MAX;
1125 uint32_t fOptions = s_fOptions;
1126 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1127 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
1128
1129 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
1130 0,
1131 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1132 offSub,
1133 cbSub);
1134#else
1135 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
1136 0,
1137 kIOMapAnywhere | kIOMapDefaultCache,
1138 offSub,
1139 cbSub);
1140#endif
1141 if (pMemMap)
1142 {
1143 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1144 void *pv = (void *)(uintptr_t)VirtAddr;
1145 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1146 {
1147//#ifdef __LP64__
1148// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1149//#else
1150// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1151//#endif
1152// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1153
1154// /*
1155// * Explicitly lock it so that we're sure it is present and that
1156// * its PTEs cannot be recycled.
1157// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1158// * to the options which causes prepare() to not wire the pages.
1159// * This is probably a bug.
1160// */
1161// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1162// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1163// 1 /* count */,
1164// 0 /* offset */,
1165// kernel_task,
1166// kIODirectionInOut | kIOMemoryTypeVirtual,
1167// kIOMapperSystem);
1168// if (pMemDesc)
1169// {
1170// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1171// if (IORet == kIOReturnSuccess)
1172// {
1173 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1174 the pages here so they can safely be accessed from inside simple
1175 locks and when preemption is disabled (no page-ins allowed).
1176 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1177 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1178 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1179 /** @todo First, the memory should've been mapped by now, and second, it
1180 * should have the wired attribute in the PTE (bit 10). Neither seems to
1181 * be the case. The disabled locking code doesn't make any difference,
1182 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1183 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1184//#ifdef __LP64__
1185// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1186//#else
1187// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1188//#endif
1189// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1190
1191 /*
1192 * Create the IPRT memory object.
1193 */
1194 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1195 pv, cbSub ? cbSub : pMemToMap->cb);
1196 if (pMemDarwin)
1197 {
1198 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1199 pMemDarwin->pMemMap = pMemMap;
1200// pMemDarwin->pMemDesc = pMemDesc;
1201 *ppMem = &pMemDarwin->Core;
1202
1203 IPRT_DARWIN_RESTORE_EFL_AC();
1204 return VINF_SUCCESS;
1205 }
1206
1207// pMemDesc->complete();
1208// rc = VERR_NO_MEMORY;
1209// }
1210// else
1211// rc = RTErrConvertFromDarwinIO(IORet);
1212// pMemDesc->release();
1213// }
1214// else
1215// rc = VERR_MEMOBJ_INIT_FAILED;
1216 }
1217 else if (pv)
1218 rc = VERR_ADDRESS_TOO_BIG;
1219 else
1220 rc = VERR_MAP_FAILED;
1221 pMemMap->release();
1222 }
1223 else
1224 rc = VERR_MAP_FAILED;
1225 }
1226
1227 IPRT_DARWIN_RESTORE_EFL_AC();
1228 return rc;
1229}
1230
1231
1232DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1233 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1234{
1235 RT_NOREF(fProt);
1236
1237 /*
1238 * Check for unsupported things.
1239 */
1240 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1241 if (uAlignment > PAGE_SIZE)
1242 return VERR_NOT_SUPPORTED;
1243 Assert(!offSub || cbSub);
1244
1245 IPRT_DARWIN_SAVE_EFL_AC();
1246
1247 /*
1248 * Must have a memory descriptor.
1249 */
1250 int rc = VERR_INVALID_PARAMETER;
1251 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1252 if (pMemToMapDarwin->pMemDesc)
1253 {
1254#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1255 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1256 0,
1257 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1258 offSub,
1259 cbSub);
1260#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1261 static uint32_t volatile s_fOptions = UINT32_MAX;
1262 uint32_t fOptions = s_fOptions;
1263 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1264 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1265 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1266 0,
1267 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1268 offSub,
1269 cbSub);
1270#else
1271 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1272 0,
1273 kIOMapAnywhere | kIOMapDefaultCache,
1274 offSub,
1275 cbSub);
1276#endif
1277 if (pMemMap)
1278 {
1279 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1280 void *pv = (void *)(uintptr_t)VirtAddr;
1281 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1282 {
1283 /*
1284 * Create the IPRT memory object.
1285 */
1286 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1287 pv, cbSub ? cbSub : pMemToMap->cb);
1288 if (pMemDarwin)
1289 {
1290 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1291 pMemDarwin->pMemMap = pMemMap;
1292 *ppMem = &pMemDarwin->Core;
1293
1294 IPRT_DARWIN_RESTORE_EFL_AC();
1295 return VINF_SUCCESS;
1296 }
1297
1298 rc = VERR_NO_MEMORY;
1299 }
1300 else if (pv)
1301 rc = VERR_ADDRESS_TOO_BIG;
1302 else
1303 rc = VERR_MAP_FAILED;
1304 pMemMap->release();
1305 }
1306 else
1307 rc = VERR_MAP_FAILED;
1308 }
1309
1310 IPRT_DARWIN_RESTORE_EFL_AC();
1311 return rc;
1312}
1313
1314
1315/**
1316 * Worker for rtR0MemObjNativeProtect that's typically called in a different
1317 * context.
1318 */
1319static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1320{
1321 IPRT_DARWIN_SAVE_EFL_AC();
1322
1323 /* Get the map for the object. */
1324 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1325 if (!pVmMap)
1326 {
1327 IPRT_DARWIN_RESTORE_EFL_AC();
1328 return VERR_NOT_SUPPORTED;
1329 }
1330
1331 /*
1332 * Convert the protection.
1333 */
1334 vm_prot_t fMachProt;
1335 switch (fProt)
1336 {
1337 case RTMEM_PROT_NONE:
1338 fMachProt = VM_PROT_NONE;
1339 break;
1340 case RTMEM_PROT_READ:
1341 fMachProt = VM_PROT_READ;
1342 break;
1343 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1344 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1345 break;
1346 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1347 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1348 break;
1349 case RTMEM_PROT_WRITE:
1350 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1351 break;
1352 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1353 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1354 break;
1355 case RTMEM_PROT_EXEC:
1356 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1357 break;
1358 default:
1359 AssertFailedReturn(VERR_INVALID_PARAMETER);
1360 }
1361
1362 /*
1363 * Do the job.
1364 */
1365 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1366 kern_return_t krc = vm_protect(pVmMap,
1367 Start,
1368 cbSub,
1369 false,
1370 fMachProt);
1371 if (krc != KERN_SUCCESS)
1372 {
1373 static int s_cComplaints = 0;
1374 if (s_cComplaints < 10)
1375 {
1376 s_cComplaints++;
1377 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1378 (void *)pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1379
1380 kern_return_t krc2;
1381 vm_offset_t pvReal = Start;
1382 vm_size_t cbReal = 0;
1383 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1384 struct vm_region_basic_info Info;
1385 RT_ZERO(Info);
1386 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1387 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1388 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1389 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1390 }
1391 IPRT_DARWIN_RESTORE_EFL_AC();
1392 return RTErrConvertFromDarwinKern(krc);
1393 }
1394
1395 /*
1396 * Touch the pages if they should be writable afterwards and accessible
1397 * from code which should never fault. vm_protect() may leave pages
1398 * temporarily write protected, possibly due to pmap no-upgrade rules?
1399 *
1400 * This is the same trick (or HACK ALERT if you like) as applied in
1401 * rtR0MemObjNativeMapKernel.
1402 */
1403 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1404 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1405 {
1406 if (fProt & RTMEM_PROT_WRITE)
1407 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1408 /*
1409 * Sniff (read) read-only pages too, just to be sure.
1410 */
1411 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1412 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1413 }
1414
1415 IPRT_DARWIN_RESTORE_EFL_AC();
1416 return VINF_SUCCESS;
1417}
1418
1419
1420/**
1421 * rtR0MemObjNativeProtect kernel_task wrapper function.
1422 */
1423static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1)
1424{
1425 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
1426 RTR0MEMOBJDARWINPROTECTARGS *pArgs = (RTR0MEMOBJDARWINPROTECTARGS *)pvUser0;
1427 int rc = rtR0MemObjNativeProtectWorker(pArgs->pMem, pArgs->offSub, pArgs->cbSub, pArgs->fProt);
1428 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
1429}
1430
1431
1432DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1433{
1434 /*
1435 * The code won't work right because process codesigning properties leaks
1436 * into kernel_map memory management. So, if the user process we're running
1437 * in has CS restrictions active, we cannot play around with the EXEC
1438 * protection because some vm_fault.c think we're modifying the process map
1439 * or something.
1440 */
1441 int rc;
1442 if (rtR0MemObjDarwinGetMap(pMem) == kernel_map)
1443 {
1444 RTR0MEMOBJDARWINPROTECTARGS Args;
1445 Args.pMem = pMem;
1446 Args.offSub = offSub;
1447 Args.cbSub = cbSub;
1448 Args.fProt = fProt;
1449 rc = rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeProtectWorkerOnKernelThread, &Args.Core);
1450 }
1451 else
1452 rc = rtR0MemObjNativeProtectWorker(pMem, offSub, cbSub, fProt);
1453 return rc;
1454}
1455
1456
1457DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1458{
1459 RTHCPHYS PhysAddr;
1460 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1461 IPRT_DARWIN_SAVE_EFL_AC();
1462
1463#ifdef USE_VM_MAP_WIRE
1464 /*
1465 * Locked memory doesn't have a memory descriptor and
1466 * needs to be handled differently.
1467 */
1468 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1469 {
1470 ppnum_t PgNo;
1471 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1472 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1473 else
1474 {
1475 /*
1476 * From what I can tell, Apple seems to have locked up the all the
1477 * available interfaces that could help us obtain the pmap_t of a task
1478 * or vm_map_t.
1479
1480 * So, we'll have to figure out where in the vm_map_t structure it is
1481 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1482 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1483 * Not nice, but it will hopefully do the job in a reliable manner...
1484 *
1485 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1486 */
1487 static int s_offPmap = -1;
1488 if (RT_UNLIKELY(s_offPmap == -1))
1489 {
1490 pmap_t const *p = (pmap_t *)kernel_map;
1491 pmap_t const * const pEnd = p + 64;
1492 for (; p < pEnd; p++)
1493 if (*p == kernel_pmap)
1494 {
1495 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1496 break;
1497 }
1498 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1499 }
1500 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1501 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1502 }
1503
1504 IPRT_DARWIN_RESTORE_EFL_AC();
1505 AssertReturn(PgNo, NIL_RTHCPHYS);
1506 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1507 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1508 }
1509 else
1510#endif /* USE_VM_MAP_WIRE */
1511 {
1512 /*
1513 * Get the memory descriptor.
1514 */
1515 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1516 if (!pMemDesc)
1517 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1518 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1519
1520 /*
1521 * If we've got a memory descriptor, use getPhysicalSegment64().
1522 */
1523#ifdef __LP64__
1524 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1525#else
1526 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1527#endif
1528 IPRT_DARWIN_RESTORE_EFL_AC();
1529 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1530 PhysAddr = Addr;
1531 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1532 }
1533
1534 return PhysAddr;
1535}
1536
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette