VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 4136

Last change on this file since 4136 was 4136, checked in by vboxsync, 17 years ago

Added RTR0MemObjAllocPhysNC. Changed the two APIs taking ring-3 addresses to use RTR3PTR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.1 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 4136 2007-08-14 01:59:36Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * The FreeBSD version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJFREEBSD
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** The VM object associated with the allocation. */
58 vm_object_t pObject;
59 /** the VM object associated with the mapping.
60 * In mapping mem object, this is the shadow object?
61 * In a allocation/enter mem object, this is the shared object we constructed (contig, perhaps alloc). */
62 vm_object_t pMappingObject;
63} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
64
65
66MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "innotek Portable Runtime - R0MemObj");
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71
72
73int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
74{
75 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
76 int rc;
77
78 switch (pMemFreeBSD->Core.enmType)
79 {
80 case RTR0MEMOBJTYPE_CONT:
81 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
82 if (pMemFreeBSD->pMappingObject)
83 {
84 rc = vm_map_remove(kernel_map,
85 (vm_offset_t)pMemFreeBSD->Core.pv,
86 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
87 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
88 }
89 break;
90
91 case RTR0MEMOBJTYPE_PAGE:
92 if (pMemFreeBSD->pObject)
93 {
94 rc = vm_map_remove(kernel_map,
95 (vm_offset_t)pMemFreeBSD->Core.pv,
96 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
97 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
98 }
99 else
100 {
101 free(pMemFreeBSD->Core.pv, M_IPRTMOBJ);
102 if (pMemFreeBSD->pMappingObject)
103 {
104 rc = vm_map_remove(kernel_map,
105 (vm_offset_t)pMemFreeBSD->Core.pv,
106 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
107 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
108 }
109 }
110 break;
111
112 case RTR0MEMOBJTYPE_LOCK:
113 {
114 vm_map_t pMap = kernel_map;
115 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
116 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
117 rc = vm_map_unwire(pMap,
118 (vm_offset_t)pMemFreeBSD->Core.pv,
119 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
120 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
121 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
122 break;
123 }
124
125 case RTR0MEMOBJTYPE_RES_VIRT:
126 {
127 vm_map_t pMap = kernel_map;
128 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
129 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
130 rc = vm_map_remove(pMap,
131 (vm_offset_t)pMemFreeBSD->Core.pv,
132 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
133 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
134 break;
135 }
136
137 case RTR0MEMOBJTYPE_MAPPING:
138 {
139 /** @todo Figure out mapping... */
140 }
141
142 /* unused: */
143 case RTR0MEMOBJTYPE_LOW:
144 case RTR0MEMOBJTYPE_PHYS:
145 case RTR0MEMOBJTYPE_PHYS_NC:
146 default:
147 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
148 return VERR_INTERNAL_ERROR;
149 }
150
151 Assert(!pMemFreeBSD->pMappingObject);
152
153 return VINF_SUCCESS;
154}
155
156
157int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
158{
159 int rc;
160
161 /* create the object. */
162 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
163 if (!pMemFreeBSD)
164 return VERR_NO_MEMORY;
165
166 /*
167 * We've two options here both expressed nicely by how kld allocates
168 * memory for the module bits:
169 * http://fxr.watson.org/fxr/source/kern/link_elf.c?v=RELENG62#L701
170 */
171#if 0
172 pMemFreeBSD->Core.pv = malloc(cb, M_IPRTMOBJ, M_ZERO);
173 if (pMemFreeBSD->Core.pv)
174 {
175 *ppMem = &pMemFreeBSD->Core;
176 return VINF_SUCCESS;
177 }
178 rc = VERR_NO_MEMORY;
179 NOREF(fExecutable);
180
181#else
182 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
183 if (pMemFreeBSD->pObject)
184 {
185 vm_offset_t MapAddress = vm_map_min(kernel_map);
186 rc = vm_map_find(kernel_map, /* map */
187 pMemFreeBSD->pObject, /* object */
188 0, /* offset */
189 &MapAddress, /* addr (IN/OUT) */
190 cb, /* length */
191 TRUE, /* find_space */
192 fExecutable /* protection */
193 ? VM_PROT_ALL
194 : VM_PROT_RW,
195 VM_PROT_ALL, /* max(_prot) */
196 FALSE); /* cow (copy-on-write) */
197 if (rc == KERN_SUCCESS)
198 {
199 rc = vm_map_wire(kernel_map, /* map */
200 MapAddress, /* start */
201 MapAddress + cb, /* end */
202 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
203 if (rc == KERN_SUCCESS)
204 {
205 pMemFreeBSD->Core.pv = (void *)MapAddress;
206 *ppMem = &pMemFreeBSD->Core;
207 return VINF_SUCCESS;
208 }
209
210 vm_map_remove(kernel_map,
211 MapAddress,
212 MapAddress + cb);
213 }
214 else
215 vm_object_deallocate(pMemFreeBSD->pObject);
216 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
217 }
218 else
219 rc = VERR_NO_MEMORY;
220#endif
221
222 rtR0MemObjDelete(&pMemFreeBSD->Core);
223 return rc;
224}
225
226
227int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
228{
229 /*
230 * Try a Alloc first and see if we get luck, if not try contigmalloc.
231 * Might wish to try find our own pages or something later if this
232 * turns into a problemspot on AMD64 boxes.
233 */
234 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
235 if (RT_SUCCESS(rc))
236 {
237 size_t iPage = cb >> PAGE_SHIFT;
238 while (iPage-- > 0)
239 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
240 {
241 RTR0MemObjFree(*ppMem, false);
242 *ppMem = NULL;
243 rc = VERR_NO_MEMORY;
244 break;
245 }
246 }
247 if (RT_FAILURE(rc))
248 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
249 return rc;
250}
251
252
253int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
254{
255 /* create the object. */
256 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
257 if (!pMemFreeBSD)
258 return VERR_NO_MEMORY;
259
260 /* do the allocation. */
261 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
262 M_IPRTMOBJ, /* type */
263 M_NOWAIT | M_ZERO, /* flags */
264 0, /* lowest physical address*/
265 _4G-1, /* highest physical address */
266 PAGE_SIZE, /* alignment. */
267 0); /* boundrary */
268 if (pMemFreeBSD->Core.pv)
269 {
270 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
271 *ppMem = &pMemFreeBSD->Core;
272 return VINF_SUCCESS;
273 }
274
275 NOREF(fExecutable);
276 rtR0MemObjDelete(&pMemFreeBSD->Core);
277 return VERR_NO_MEMORY;
278}
279
280
281int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
282{
283 /** @todo check if there is a more appropriate API somewhere.. */
284
285 /* create the object. */
286 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
287 if (!pMemFreeBSD)
288 return VERR_NO_MEMORY;
289
290 /* do the allocation. */
291 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
292 M_IPRTMOBJ, /* type */
293 M_NOWAIT | M_ZERO, /* flags */
294 0, /* lowest physical address*/
295 PhysHighest, /* highest physical address */
296 PAGE_SIZE, /* alignment. */
297 0); /* boundrary */
298 if (pMemFreeBSD->Core.pv)
299 {
300 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
301 *ppMem = &pMemFreeBSD->Core;
302 return VINF_SUCCESS;
303 }
304
305 rtR0MemObjDelete(&pMemFreeBSD->Core);
306 return VERR_NO_MEMORY;
307}
308
309
310int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
311{
312 /** @todo rtR0MemObjNativeAllocPhys / freebsd */
313 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest);
314}
315
316
317int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
318{
319 /* create the object. */
320 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
321 if (!pMemFreeBSD)
322 return VERR_NO_MEMORY;
323
324 /* there is no allocation here, it needs to be mapped somewhere first. */
325 pMemFreeBSD->Core.u.Phys.fAllocated = false;
326 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
327 *ppMem = &pMemFreeBSD->Core;
328 return VINF_SUCCESS;
329}
330
331
332int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
333{
334 int rc;
335
336 /* create the object. */
337 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
338 if (!pMemFreeBSD)
339 return VERR_NO_MEMORY;
340
341 /*
342 * We could've used vslock here, but we don't wish to be subject to
343 * resource usage restrictions, so we'll call vm_map_wire directly.
344 */
345 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */
346 (vm_offset_t)pv, /* start */
347 (vm_offset_t)pv + cb, /* end */
348 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */
349 if (rc == KERN_SUCCESS)
350 {
351 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
352 *ppMem = &pMemFreeBSD->Core;
353 return VINF_SUCCESS;
354 }
355 rtR0MemObjDelete(&pMemFreeBSD->Core);
356 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
357}
358
359
360int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
361{
362 int rc;
363
364 /* create the object. */
365 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
366 if (!pMemFreeBSD)
367 return VERR_NO_MEMORY;
368
369 /* lock the memory */
370 rc = vm_map_wire(kernel_map, /* the map */
371 (vm_offset_t)pv, /* start */
372 (vm_offset_t)pv + cb, /* end */
373 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */
374 if (rc == KERN_SUCCESS)
375 {
376 pMemFreeBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
377 *ppMem = &pMemFreeBSD->Core;
378 return VINF_SUCCESS;
379 }
380 rtR0MemObjDelete(&pMemFreeBSD->Core);
381 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
382}
383
384
385/**
386 * Worker for the two virtual address space reservers.
387 *
388 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
389 */
390static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
391{
392 int rc;
393
394 /*
395 * The pvFixed address range must be within the VM space when specified.
396 */
397 if (pvFixed != (void *)-1
398 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
399 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
400 return VERR_INVALID_PARAMETER;
401
402 /*
403 * Create the object.
404 */
405 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
406 if (!pMemFreeBSD)
407 return VERR_NO_MEMORY;
408
409 /*
410 * Allocate an empty VM object and map it into the requested map.
411 */
412 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
413 if (pMemFreeBSD->pObject)
414 {
415 vm_offset_t MapAddress = pvFixed != (void *)-1
416 ? (vm_offset_t)pvFixed
417 : vm_map_min(kernel_map);
418 if (pvFixed)
419 vm_map_remove(pMap,
420 MapAddress,
421 MapAddress + cb);
422
423 rc = vm_map_find(pMap, /* map */
424 pMemFreeBSD->pObject, /* object */
425 0, /* offset */
426 &MapAddress, /* addr (IN/OUT) */
427 cb, /* length */
428 pvFixed == (void *)-1, /* find_space */
429 VM_PROT_NONE, /* protection */
430 VM_PROT_ALL, /* max(_prot) ?? */
431 FALSE); /* cow (copy-on-write) */
432 if (rc == KERN_SUCCESS)
433 {
434 if (R0Process != NIL_RTR0PROCESS)
435 {
436 rc = vm_map_inherit(pMap,
437 MapAddress,
438 MapAddress + cb,
439 VM_INHERIT_SHARE);
440 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
441 }
442 pMemFreeBSD->Core.pv = (void *)MapAddress;
443 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
444 *ppMem = &pMemFreeBSD->Core;
445 return VINF_SUCCESS;
446 }
447 vm_object_deallocate(pMemFreeBSD->pObject);
448 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
449 }
450 else
451 rc = VERR_NO_MEMORY;
452 rtR0MemObjDelete(&pMemFreeBSD->Core);
453 return rc;
454
455}
456
457int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
458{
459 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
460}
461
462
463int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
464{
465 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
466 &((struct proc *)R0Process)->p_vmspace->vm_map);
467}
468
469
470int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
471{
472 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
473
474/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
475
476#if 0
477/** @todo finish the implementation. */
478
479 int rc;
480 void *pvR0 = NULL;
481 PRTR0MEMOBJFREEBSD pMemToMapOs2 = (PRTR0MEMOBJFREEBSD)pMemToMap;
482 switch (pMemToMapOs2->Core.enmType)
483 {
484 /*
485 * These has kernel mappings.
486 */
487 case RTR0MEMOBJTYPE_PAGE:
488 case RTR0MEMOBJTYPE_LOW:
489 case RTR0MEMOBJTYPE_CONT:
490 pvR0 = pMemToMapOs2->Core.pv;
491 break;
492
493 case RTR0MEMOBJTYPE_PHYS_NC:
494 case RTR0MEMOBJTYPE_PHYS:
495 pvR0 = pMemToMapOs2->Core.pv;
496 if (!pvR0)
497 {
498 /* no ring-0 mapping, so allocate a mapping in the process. */
499 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
500 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
501 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
502 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
503 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
504 if (rc)
505 return RTErrConvertFromOS2(rc);
506 pMemToMapOs2->Core.pv = pvR0;
507 }
508 break;
509
510 case RTR0MEMOBJTYPE_LOCK:
511 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
512 return VERR_NOT_SUPPORTED; /** @todo implement this... */
513 pvR0 = pMemToMapOs2->Core.pv;
514 break;
515
516 case RTR0MEMOBJTYPE_RES_VIRT:
517 case RTR0MEMOBJTYPE_MAPPING:
518 default:
519 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
520 return VERR_INTERNAL_ERROR;
521 }
522
523 /*
524 * Create a dummy mapping object for it.
525 *
526 * All mappings are read/write/execute in OS/2 and there isn't
527 * any cache options, so sharing is ok. And the main memory object
528 * isn't actually freed until all the mappings have been freed up
529 * (reference counting).
530 */
531 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
532 if (pMemFreeBSD)
533 {
534 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
535 *ppMem = &pMemFreeBSD->Core;
536 return VINF_SUCCESS;
537 }
538 return VERR_NO_MEMORY;
539#endif
540 return VERR_NOT_IMPLEMENTED;
541}
542
543
544int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
545{
546 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
547 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
548
549#if 0
550 int rc;
551 void *pvR0;
552 void *pvR3 = NULL;
553 PRTR0MEMOBJFREEBSD pMemToMapOs2 = (PRTR0MEMOBJFREEBSD)pMemToMap;
554 switch (pMemToMapOs2->Core.enmType)
555 {
556 /*
557 * These has kernel mappings.
558 */
559 case RTR0MEMOBJTYPE_PAGE:
560 case RTR0MEMOBJTYPE_LOW:
561 case RTR0MEMOBJTYPE_CONT:
562 pvR0 = pMemToMapOs2->Core.pv;
563 break;
564
565 case RTR0MEMOBJTYPE_PHYS:
566 pvR0 = pMemToMapOs2->Core.pv;
567#if 0/* this is wrong. */
568 if (!pvR0)
569 {
570 /* no ring-0 mapping, so allocate a mapping in the process. */
571 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
572 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
573 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
574 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
575 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
576 if (rc)
577 return RTErrConvertFromOS2(rc);
578 }
579 break;
580#endif
581 return VERR_NOT_SUPPORTED;
582
583 case RTR0MEMOBJTYPE_LOCK:
584 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
585 return VERR_NOT_SUPPORTED; /** @todo implement this... */
586 pvR0 = pMemToMapOs2->Core.pv;
587 break;
588
589 case RTR0MEMOBJTYPE_PHYS_NC:
590 case RTR0MEMOBJTYPE_RES_VIRT:
591 case RTR0MEMOBJTYPE_MAPPING:
592 default:
593 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
594 return VERR_INTERNAL_ERROR;
595 }
596
597 /*
598 * Map the ring-0 memory into the current process.
599 */
600 if (!pvR3)
601 {
602 Assert(pvR0);
603 ULONG flFlags = 0;
604 if (uAlignment == PAGE_SIZE)
605 flFlags |= VMDHGP_4MB;
606 if (fProt & RTMEM_PROT_WRITE)
607 flFlags |= VMDHGP_WRITE;
608 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
609 if (rc)
610 return RTErrConvertFromOS2(rc);
611 }
612 Assert(pvR3);
613
614 /*
615 * Create a mapping object for it.
616 */
617 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR3, pMemToMapOs2->Core.cb);
618 if (pMemFreeBSD)
619 {
620 Assert(pMemFreeBSD->Core.pv == pvR3);
621 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
622 *ppMem = &pMemFreeBSD->Core;
623 return VINF_SUCCESS;
624 }
625 KernVMFree(pvR3);
626 return VERR_NO_MEMORY;
627#endif
628 return VERR_NOT_IMPLEMENTED;
629}
630
631
632RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
633{
634 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
635
636 switch (pMemFreeBSD->Core.enmType)
637 {
638 case RTR0MEMOBJTYPE_LOCK:
639 {
640 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
641 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
642 {
643 /* later */
644 return NIL_RTHCPHYS;
645 }
646 }
647 case RTR0MEMOBJTYPE_PAGE:
648 {
649 uint8_t *pb = (uint8_t *)pMemFreeBSD->Core.pv + ((size_t)iPage << PAGE_SHIFT);
650 return vtophys(pb);
651 }
652
653 case RTR0MEMOBJTYPE_CONT:
654 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
655
656 case RTR0MEMOBJTYPE_PHYS:
657 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
658
659 case RTR0MEMOBJTYPE_PHYS_NC:
660 case RTR0MEMOBJTYPE_RES_VIRT:
661 case RTR0MEMOBJTYPE_MAPPING:
662 case RTR0MEMOBJTYPE_LOW:
663 default:
664 return NIL_RTHCPHYS;
665 }
666}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette