VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 18920

Last change on this file since 18920 was 18920, checked in by vboxsync, 16 years ago

Runtime/R0/FreeBSD: Implement method to map kernel memory into user space

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.9 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 18920 2009-04-15 20:59:03Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46/*******************************************************************************
47* Structures and Typedefs *
48*******************************************************************************/
49/**
50 * The FreeBSD version of the memory object structure.
51 */
52typedef struct RTR0MEMOBJFREEBSD
53{
54 /** The core structure. */
55 RTR0MEMOBJINTERNAL Core;
56 /** The VM object associated with the allocation. */
57 vm_object_t pObject;
58 /** the VM object associated with the mapping.
59 * In mapping mem object, this is the shadow object?
60 * In a allocation/enter mem object, this is the shared object we constructed (contig, perhaps alloc). */
61 vm_object_t pMappingObject;
62} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
63
64
65MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71
72int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
75 int rc;
76
77 switch (pMemFreeBSD->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_CONT:
80 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
81 if (pMemFreeBSD->pMappingObject)
82 {
83 rc = vm_map_remove(kernel_map,
84 (vm_offset_t)pMemFreeBSD->Core.pv,
85 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
86 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
87 }
88 break;
89
90 case RTR0MEMOBJTYPE_PAGE:
91 if (pMemFreeBSD->pObject)
92 {
93 rc = vm_map_remove(kernel_map,
94 (vm_offset_t)pMemFreeBSD->Core.pv,
95 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
96 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
97 }
98 else
99 {
100 free(pMemFreeBSD->Core.pv, M_IPRTMOBJ);
101 if (pMemFreeBSD->pMappingObject)
102 {
103 rc = vm_map_remove(kernel_map,
104 (vm_offset_t)pMemFreeBSD->Core.pv,
105 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
106 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
107 }
108 }
109 break;
110
111 case RTR0MEMOBJTYPE_LOCK:
112 {
113 vm_map_t pMap = kernel_map;
114 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
115 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
116 rc = vm_map_unwire(pMap,
117 (vm_offset_t)pMemFreeBSD->Core.pv,
118 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
119 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
120 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
121 break;
122 }
123
124 case RTR0MEMOBJTYPE_RES_VIRT:
125 {
126 vm_map_t pMap = kernel_map;
127 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
128 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
129 rc = vm_map_remove(pMap,
130 (vm_offset_t)pMemFreeBSD->Core.pv,
131 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
132 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
133 break;
134 }
135
136 case RTR0MEMOBJTYPE_MAPPING:
137 {
138 vm_map_t pMap = kernel_map;
139
140 /* vm_map_remove will unmap the pages we inserted with pmap_enter */
141 AssertMsg(pMemFreeBSD->pMappingObject != NULL, ("MappingObject is NULL\n"));
142 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
143 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
144
145 rc = vm_map_remove(pMap,
146 (vm_offset_t)pMemFreeBSD->Core.pv,
147 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
148 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
149 break;
150 }
151
152 /* unused: */
153 case RTR0MEMOBJTYPE_LOW:
154 case RTR0MEMOBJTYPE_PHYS:
155 case RTR0MEMOBJTYPE_PHYS_NC:
156 default:
157 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
158 return VERR_INTERNAL_ERROR;
159 }
160
161 return VINF_SUCCESS;
162}
163
164
165int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
166{
167 int rc;
168
169 /* create the object. */
170 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
171 if (!pMemFreeBSD)
172 return VERR_NO_MEMORY;
173
174 /*
175 * We've two options here both expressed nicely by how kld allocates
176 * memory for the module bits:
177 * http://fxr.watson.org/fxr/source/kern/link_elf.c?v=RELENG62#L701
178 */
179#if 0
180 pMemFreeBSD->Core.pv = malloc(cb, M_IPRTMOBJ, M_ZERO);
181 if (pMemFreeBSD->Core.pv)
182 {
183 *ppMem = &pMemFreeBSD->Core;
184 return VINF_SUCCESS;
185 }
186 rc = VERR_NO_MEMORY;
187 NOREF(fExecutable);
188
189#else
190 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
191 if (pMemFreeBSD->pObject)
192 {
193 vm_offset_t MapAddress = vm_map_min(kernel_map);
194 rc = vm_map_find(kernel_map, /* map */
195 pMemFreeBSD->pObject, /* object */
196 0, /* offset */
197 &MapAddress, /* addr (IN/OUT) */
198 cb, /* length */
199 TRUE, /* find_space */
200 fExecutable /* protection */
201 ? VM_PROT_ALL
202 : VM_PROT_RW,
203 VM_PROT_ALL, /* max(_prot) */
204 FALSE); /* cow (copy-on-write) */
205 if (rc == KERN_SUCCESS)
206 {
207 rc = vm_map_wire(kernel_map, /* map */
208 MapAddress, /* start */
209 MapAddress + cb, /* end */
210 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
211 if (rc == KERN_SUCCESS)
212 {
213 pMemFreeBSD->Core.pv = (void *)MapAddress;
214 *ppMem = &pMemFreeBSD->Core;
215 return VINF_SUCCESS;
216 }
217
218 vm_map_remove(kernel_map,
219 MapAddress,
220 MapAddress + cb);
221 }
222 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
223 }
224 else
225 rc = VERR_NO_MEMORY;
226#endif
227
228 rtR0MemObjDelete(&pMemFreeBSD->Core);
229 return rc;
230}
231
232
233int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
234{
235 /*
236 * Try a Alloc first and see if we get luck, if not try contigmalloc.
237 * Might wish to try find our own pages or something later if this
238 * turns into a problemspot on AMD64 boxes.
239 */
240 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
241 if (RT_SUCCESS(rc))
242 {
243 size_t iPage = cb >> PAGE_SHIFT;
244 while (iPage-- > 0)
245 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
246 {
247 RTR0MemObjFree(*ppMem, false);
248 *ppMem = NULL;
249 rc = VERR_NO_MEMORY;
250 break;
251 }
252 }
253 if (RT_FAILURE(rc))
254 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
255 return rc;
256}
257
258
259int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
260{
261 /* create the object. */
262 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
263 if (!pMemFreeBSD)
264 return VERR_NO_MEMORY;
265
266 /* do the allocation. */
267 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
268 M_IPRTMOBJ, /* type */
269 M_NOWAIT | M_ZERO, /* flags */
270 0, /* lowest physical address*/
271 _4G-1, /* highest physical address */
272 PAGE_SIZE, /* alignment. */
273 0); /* boundrary */
274 if (pMemFreeBSD->Core.pv)
275 {
276 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
277 *ppMem = &pMemFreeBSD->Core;
278 return VINF_SUCCESS;
279 }
280
281 NOREF(fExecutable);
282 rtR0MemObjDelete(&pMemFreeBSD->Core);
283 return VERR_NO_MEMORY;
284}
285
286
287int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
288{
289 /** @todo check if there is a more appropriate API somewhere.. */
290
291 /* create the object. */
292 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
293 if (!pMemFreeBSD)
294 return VERR_NO_MEMORY;
295
296 /* do the allocation. */
297 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
298 M_IPRTMOBJ, /* type */
299 M_NOWAIT | M_ZERO, /* flags */
300 0, /* lowest physical address*/
301 PhysHighest, /* highest physical address */
302 PAGE_SIZE, /* alignment. */
303 0); /* boundrary */
304 if (pMemFreeBSD->Core.pv)
305 {
306 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
307 *ppMem = &pMemFreeBSD->Core;
308 return VINF_SUCCESS;
309 }
310
311 rtR0MemObjDelete(&pMemFreeBSD->Core);
312 return VERR_NO_MEMORY;
313}
314
315
316int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
317{
318 /** @todo rtR0MemObjNativeAllocPhys / freebsd */
319 return VERR_NOT_SUPPORTED;
320}
321
322
323int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
324{
325 /* create the object. */
326 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
327 if (!pMemFreeBSD)
328 return VERR_NO_MEMORY;
329
330 /* there is no allocation here, it needs to be mapped somewhere first. */
331 pMemFreeBSD->Core.u.Phys.fAllocated = false;
332 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
333 *ppMem = &pMemFreeBSD->Core;
334 return VINF_SUCCESS;
335}
336
337
338int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
339{
340 int rc;
341
342 /* create the object. */
343 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
344 if (!pMemFreeBSD)
345 return VERR_NO_MEMORY;
346
347 /*
348 * We could've used vslock here, but we don't wish to be subject to
349 * resource usage restrictions, so we'll call vm_map_wire directly.
350 */
351 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */
352 (vm_offset_t)R3Ptr, /* start */
353 (vm_offset_t)R3Ptr + cb, /* end */
354 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); /* flags */
355 if (rc == KERN_SUCCESS)
356 {
357 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
358 *ppMem = &pMemFreeBSD->Core;
359 return VINF_SUCCESS;
360 }
361 rtR0MemObjDelete(&pMemFreeBSD->Core);
362 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
363}
364
365
366int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
367{
368 int rc;
369
370 /* create the object. */
371 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
372 if (!pMemFreeBSD)
373 return VERR_NO_MEMORY;
374
375 /* lock the memory */
376 rc = vm_map_wire(kernel_map, /* the map */
377 (vm_offset_t)pv, /* start */
378 (vm_offset_t)pv + cb, /* end */
379 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */
380 if (rc == KERN_SUCCESS)
381 {
382 pMemFreeBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
383 *ppMem = &pMemFreeBSD->Core;
384 return VINF_SUCCESS;
385 }
386 rtR0MemObjDelete(&pMemFreeBSD->Core);
387 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
388}
389
390
391/**
392 * Worker for the two virtual address space reservers.
393 *
394 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
395 */
396static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
397{
398 int rc;
399
400 /*
401 * The pvFixed address range must be within the VM space when specified.
402 */
403 if (pvFixed != (void *)-1
404 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
405 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
406 return VERR_INVALID_PARAMETER;
407
408 /*
409 * Create the object.
410 */
411 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
412 if (!pMemFreeBSD)
413 return VERR_NO_MEMORY;
414
415 /*
416 * Allocate an empty VM object and map it into the requested map.
417 */
418 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
419 if (pMemFreeBSD->pObject)
420 {
421 vm_offset_t MapAddress = pvFixed != (void *)-1
422 ? (vm_offset_t)pvFixed
423 : vm_map_min(pMap);
424 if (pvFixed)
425 vm_map_remove(pMap,
426 MapAddress,
427 MapAddress + cb);
428
429 rc = vm_map_find(pMap, /* map */
430 pMemFreeBSD->pObject, /* object */
431 0, /* offset */
432 &MapAddress, /* addr (IN/OUT) */
433 cb, /* length */
434 pvFixed == (void *)-1, /* find_space */
435 VM_PROT_NONE, /* protection */
436 VM_PROT_ALL, /* max(_prot) ?? */
437 0); /* cow (copy-on-write) */
438 if (rc == KERN_SUCCESS)
439 {
440 if (R0Process != NIL_RTR0PROCESS)
441 {
442 rc = vm_map_inherit(pMap,
443 MapAddress,
444 MapAddress + cb,
445 VM_INHERIT_SHARE);
446 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
447 }
448 pMemFreeBSD->Core.pv = (void *)MapAddress;
449 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
450 *ppMem = &pMemFreeBSD->Core;
451 return VINF_SUCCESS;
452 }
453 vm_object_deallocate(pMemFreeBSD->pObject);
454 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
455 }
456 else
457 rc = VERR_NO_MEMORY;
458 rtR0MemObjDelete(&pMemFreeBSD->Core);
459 return rc;
460
461}
462
463int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
464{
465 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
466}
467
468
469int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
470{
471 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
472 &((struct proc *)R0Process)->p_vmspace->vm_map);
473}
474
475
476int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
477 unsigned fProt, size_t offSub, size_t cbSub)
478{
479 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
480 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
481
482/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
483
484#if 0
485/** @todo finish the implementation. */
486
487 int rc;
488 void *pvR0 = NULL;
489 PRTR0MEMOBJFREEBSD pMemToMapOs2 = (PRTR0MEMOBJFREEBSD)pMemToMap;
490 switch (pMemToMapOs2->Core.enmType)
491 {
492 /*
493 * These has kernel mappings.
494 */
495 case RTR0MEMOBJTYPE_PAGE:
496 case RTR0MEMOBJTYPE_LOW:
497 case RTR0MEMOBJTYPE_CONT:
498 pvR0 = pMemToMapOs2->Core.pv;
499 break;
500
501 case RTR0MEMOBJTYPE_PHYS_NC:
502 case RTR0MEMOBJTYPE_PHYS:
503 pvR0 = pMemToMapOs2->Core.pv;
504 if (!pvR0)
505 {
506 /* no ring-0 mapping, so allocate a mapping in the process. */
507 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
508 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
509 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
510 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
511 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
512 if (rc)
513 return RTErrConvertFromOS2(rc);
514 pMemToMapOs2->Core.pv = pvR0;
515 }
516 break;
517
518 case RTR0MEMOBJTYPE_LOCK:
519 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
520 return VERR_NOT_SUPPORTED; /** @todo implement this... */
521 pvR0 = pMemToMapOs2->Core.pv;
522 break;
523
524 case RTR0MEMOBJTYPE_RES_VIRT:
525 case RTR0MEMOBJTYPE_MAPPING:
526 default:
527 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
528 return VERR_INTERNAL_ERROR;
529 }
530
531 /*
532 * Create a dummy mapping object for it.
533 *
534 * All mappings are read/write/execute in OS/2 and there isn't
535 * any cache options, so sharing is ok. And the main memory object
536 * isn't actually freed until all the mappings have been freed up
537 * (reference counting).
538 */
539 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
540 if (pMemFreeBSD)
541 {
542 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
543 *ppMem = &pMemFreeBSD->Core;
544 return VINF_SUCCESS;
545 }
546 return VERR_NO_MEMORY;
547#endif
548 return VERR_NOT_IMPLEMENTED;
549}
550
551/* see http://markmail.org/message/udhq33tefgtyfozs */
552int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
553{
554 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
555 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
556
557 int rc;
558 vm_object_t pObjectToMap = ((PRTR0MEMOBJFREEBSD)pMemToMap)->pObject;
559 struct proc *pProc = (struct proc *)R0Process;
560 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
561 vm_offset_t AddrR3 = 0;
562 vm_prot_t ProtectionFlags = 0;
563
564 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
565 ProtectionFlags = VM_PROT_NONE;
566 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
567 ProtectionFlags |= VM_PROT_READ;
568 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
569 ProtectionFlags |= VM_PROT_WRITE;
570 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
571 ProtectionFlags |= VM_PROT_EXECUTE;
572
573 PROC_LOCK(pProc);
574 AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
575 PROC_UNLOCK(pProc);
576
577 /*
578 * Mapping into R3 is easy if the mem object has a associated VM object.
579 * If there is not such an object we have to get it from the address.
580 */
581 if (!pObjectToMap)
582 {
583 vm_object_t pObjectNew = vm_object_allocate(OBJT_PHYS, pMemToMap->cb >> PAGE_SHIFT);
584 if (pObjectNew)
585 {
586 /* Insert the object in the map. */
587 rc = vm_map_find(pProcMap, /* Map to insert the object in */
588 pObjectNew , /* Object to map */
589 0, /* Start offset in the object */
590 &AddrR3, /* Start address IN/OUT */
591 pMemToMap->cb, /* Size of the mapping */
592 TRUE, /* Whether a suitable address should be searched for first */
593 ProtectionFlags, /* protection flags */
594 VM_PROT_ALL, /* Maximum protection flags */
595 0); /* Copy on write */
596 if (rc == KERN_SUCCESS)
597 {
598 int cPages = pMemToMap->cb >> PAGE_SHIFT;
599 int iPage;
600 void *AddrToMap = pMemToMap->pv;
601 pmap_t pPhysicalMap = pProcMap->pmap;
602 vm_offset_t AddrR3Dest = AddrR3;
603
604 /* Insert the memory page by page into the mapping. */
605 for (iPage = 0; iPage < cPages; iPage++)
606 {
607 vm_page_t Page = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
608
609 pmap_enter(pPhysicalMap, AddrR3Dest, Page, ProtectionFlags, TRUE);
610 AddrToMap = (uint8_t *)AddrToMap + PAGE_SIZE;
611 AddrR3Dest += PAGE_SIZE;
612 }
613 }
614 else
615 vm_object_deallocate(pObjectNew);
616 }
617 else
618 {
619 AssertMsgFailed(("Could not allocate VM object\n"));
620 rc = 1; /* @todo fix */
621 }
622 }
623 else
624 {
625 /*
626 * Reference the object. If this isn't done the object will removed from kernel space
627 * if the mapping is destroyed.
628 */
629 vm_object_reference(pObjectToMap);
630
631 rc = vm_map_find(pProcMap, /* Map to insert the object in */
632 pObjectToMap, /* Object to map */
633 0, /* Start offset in the object */
634 &AddrR3, /* Start address IN/OUT */
635 pMemToMap->cb, /* Size of the mapping */
636 TRUE, /* Whether a suitable address should be searched for first */
637 ProtectionFlags, /* protection flags */
638 VM_PROT_ALL, /* Maximum protection flags */
639 0); /* Copy on write */
640 }
641
642 if (rc == KERN_SUCCESS)
643 {
644 /*
645 * Create a mapping object for it.
646 */
647 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
648 RTR0MEMOBJTYPE_MAPPING,
649 (void *)AddrR3,
650 pMemToMap->cb);
651 if (pMemFreeBSD)
652 {
653 Assert(pMemFreeBSD->Core.pv == (void *)AddrR3);
654 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
655 pMemFreeBSD->pMappingObject = pObjectToMap;
656 *ppMem = &pMemFreeBSD->Core;
657 return VINF_SUCCESS;
658 }
659
660 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
661 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
662 }
663
664 if (pObjectToMap)
665 vm_object_deallocate(pObjectToMap);
666
667 return VERR_NO_MEMORY;
668}
669
670
671RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
672{
673 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
674
675 switch (pMemFreeBSD->Core.enmType)
676 {
677 case RTR0MEMOBJTYPE_LOCK:
678 {
679 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
680 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
681 {
682 /* later */
683 return NIL_RTHCPHYS;
684 }
685 }
686 case RTR0MEMOBJTYPE_PAGE:
687 case RTR0MEMOBJTYPE_MAPPING:
688 {
689 uint8_t *pb = (uint8_t *)pMemFreeBSD->Core.pv + ((size_t)iPage << PAGE_SHIFT);
690 return vtophys(pb);
691 }
692
693 case RTR0MEMOBJTYPE_CONT:
694 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
695
696 case RTR0MEMOBJTYPE_PHYS:
697 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
698
699 case RTR0MEMOBJTYPE_PHYS_NC:
700 case RTR0MEMOBJTYPE_RES_VIRT:
701 case RTR0MEMOBJTYPE_LOW:
702 default:
703 return NIL_RTHCPHYS;
704 }
705}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette