VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 26886

Last change on this file since 26886 was 26886, checked in by vboxsync, 15 years ago

IPRT/FreeBSD: Fix GetPagePhysAddr for lock objects

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.8 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 26886 2010-02-27 20:05:38Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * The FreeBSD version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJFREEBSD
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** The VM object associated with the allocation. */
58 vm_object_t pObject;
59 /** the VM object associated with the mapping.
60 * In mapping mem object, this is the shadow object?
61 * In a allocation/enter mem object, this is the shared object we constructed (contig, perhaps alloc). */
62 vm_object_t pMappingObject;
63} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
64
65
66MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71
72
73int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
74{
75 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
76 int rc;
77
78 switch (pMemFreeBSD->Core.enmType)
79 {
80 case RTR0MEMOBJTYPE_CONT:
81 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
82 if (pMemFreeBSD->pMappingObject)
83 {
84 rc = vm_map_remove(kernel_map,
85 (vm_offset_t)pMemFreeBSD->Core.pv,
86 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
87 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
88 }
89 break;
90
91 case RTR0MEMOBJTYPE_PAGE:
92 if (pMemFreeBSD->pObject)
93 {
94 rc = vm_map_remove(kernel_map,
95 (vm_offset_t)pMemFreeBSD->Core.pv,
96 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
97 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
98 }
99 else
100 {
101 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
102 if (pMemFreeBSD->pMappingObject)
103 {
104 rc = vm_map_remove(kernel_map,
105 (vm_offset_t)pMemFreeBSD->Core.pv,
106 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
107 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
108 }
109 }
110 break;
111
112 case RTR0MEMOBJTYPE_LOCK:
113 {
114 int fFlags = VM_MAP_WIRE_NOHOLES;
115 vm_map_t pMap = kernel_map;
116
117 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
118 {
119 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
120 fFlags |= VM_MAP_WIRE_USER;
121 }
122 else
123 fFlags |= VM_MAP_WIRE_SYSTEM;
124
125 rc = vm_map_unwire(pMap,
126 (vm_offset_t)pMemFreeBSD->Core.pv,
127 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
128 fFlags);
129 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
130 break;
131 }
132
133 case RTR0MEMOBJTYPE_RES_VIRT:
134 {
135 vm_map_t pMap = kernel_map;
136 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
137 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
138 rc = vm_map_remove(pMap,
139 (vm_offset_t)pMemFreeBSD->Core.pv,
140 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
141 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
142 break;
143 }
144
145 case RTR0MEMOBJTYPE_MAPPING:
146 {
147 vm_map_t pMap = kernel_map;
148
149 /* vm_map_remove will unmap the pages we inserted with pmap_enter */
150 AssertMsg(pMemFreeBSD->pMappingObject != NULL, ("MappingObject is NULL\n"));
151 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
152 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
153
154 rc = vm_map_remove(pMap,
155 (vm_offset_t)pMemFreeBSD->Core.pv,
156 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
157 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
158 break;
159 }
160
161 /* unused: */
162 case RTR0MEMOBJTYPE_LOW:
163 case RTR0MEMOBJTYPE_PHYS:
164 case RTR0MEMOBJTYPE_PHYS_NC:
165 default:
166 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
167 return VERR_INTERNAL_ERROR;
168 }
169
170 return VINF_SUCCESS;
171}
172
173
174int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
175{
176 int rc;
177 size_t cPages = cb >> PAGE_SHIFT;
178
179 /* create the object. */
180 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
181 if (!pMemFreeBSD)
182 return VERR_NO_MEMORY;
183
184 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cPages);
185 if (pMemFreeBSD->pObject)
186 {
187 vm_offset_t MapAddress = vm_map_min(kernel_map);
188 rc = vm_map_find(kernel_map, /* map */
189 pMemFreeBSD->pObject, /* object */
190 0, /* offset */
191 &MapAddress, /* addr (IN/OUT) */
192 cb, /* length */
193 TRUE, /* find_space */
194 fExecutable /* protection */
195 ? VM_PROT_ALL
196 : VM_PROT_RW,
197 VM_PROT_ALL, /* max(_prot) */
198 FALSE); /* cow (copy-on-write) */
199 if (rc == KERN_SUCCESS)
200 {
201 vm_offset_t AddressDst = MapAddress;
202
203 rc = VINF_SUCCESS;
204
205 VM_OBJECT_LOCK(pMemFreeBSD->pObject);
206 for (size_t iPage = 0; iPage < cPages; iPage++)
207 {
208 vm_pindex_t PageIndex = OFF_TO_IDX(AddressDst);
209 vm_page_t pPage;
210
211 pPage = vm_page_alloc(pMemFreeBSD->pObject, PageIndex,
212 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
213 VM_ALLOC_WIRED);
214
215#if __FreeBSD_version >= 800000 /** @todo Find exact version number */
216 /* Fixes crashes during VM termination on FreeBSD8-CURRENT amd64
217 * with kernel debugging enabled. */
218 vm_page_set_valid(pPage, 0, PAGE_SIZE);
219#endif
220
221 if (pPage)
222 {
223 vm_page_lock_queues();
224 vm_page_wire(pPage);
225 vm_page_unlock_queues();
226 /* Put the page into the page table now. */
227#if __FreeBSD_version >= 701105
228 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage,
229 fExecutable
230 ? VM_PROT_ALL
231 : VM_PROT_RW,
232 TRUE);
233#else
234 pmap_enter(kernel_map->pmap, AddressDst, pPage,
235 fExecutable
236 ? VM_PROT_ALL
237 : VM_PROT_RW,
238 TRUE);
239#endif
240 }
241 else
242 {
243 /*
244 * Allocation failed. vm_map_remove will remove any
245 * page already alocated.
246 */
247 rc = VERR_NO_MEMORY;
248 break;
249 }
250 AddressDst += PAGE_SIZE;
251 }
252 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
253
254 if (rc == VINF_SUCCESS)
255 {
256 pMemFreeBSD->Core.pv = (void *)MapAddress;
257 *ppMem = &pMemFreeBSD->Core;
258 return VINF_SUCCESS;
259 }
260
261 vm_map_remove(kernel_map,
262 MapAddress,
263 MapAddress + cb);
264 }
265 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
266 }
267 else
268 rc = VERR_NO_MEMORY;
269
270 rtR0MemObjDelete(&pMemFreeBSD->Core);
271 return rc;
272}
273
274
275int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
276{
277 /*
278 * Try a Alloc first and see if we get luck, if not try contigmalloc.
279 * Might wish to try find our own pages or something later if this
280 * turns into a problemspot on AMD64 boxes.
281 */
282 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
283 if (RT_SUCCESS(rc))
284 {
285 size_t iPage = cb >> PAGE_SHIFT;
286 while (iPage-- > 0)
287 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
288 {
289 RTR0MemObjFree(*ppMem, false);
290 *ppMem = NULL;
291 rc = VERR_NO_MEMORY;
292 break;
293 }
294 }
295 if (RT_FAILURE(rc))
296 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
297 return rc;
298}
299
300
301int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
302{
303 /* create the object. */
304 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
305 if (!pMemFreeBSD)
306 return VERR_NO_MEMORY;
307
308 /* do the allocation. */
309 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
310 M_IPRTMOBJ, /* type */
311 M_NOWAIT | M_ZERO, /* flags */
312 0, /* lowest physical address*/
313 _4G-1, /* highest physical address */
314 PAGE_SIZE, /* alignment. */
315 0); /* boundrary */
316 if (pMemFreeBSD->Core.pv)
317 {
318 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
319 *ppMem = &pMemFreeBSD->Core;
320 return VINF_SUCCESS;
321 }
322
323 NOREF(fExecutable);
324 rtR0MemObjDelete(&pMemFreeBSD->Core);
325 return VERR_NO_MEMORY;
326}
327
328
329int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
330{
331 /** @todo check if there is a more appropriate API somewhere.. */
332
333 /** @todo alignment */
334 if (uAlignment != PAGE_SIZE)
335 return VERR_NOT_SUPPORTED;
336
337 /* create the object. */
338 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
339 if (!pMemFreeBSD)
340 return VERR_NO_MEMORY;
341
342 /* do the allocation. */
343 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
344 M_IPRTMOBJ, /* type */
345 M_NOWAIT | M_ZERO, /* flags */
346 0, /* lowest physical address*/
347 PhysHighest, /* highest physical address */
348 PAGE_SIZE, /* alignment. */
349 0); /* boundrary */
350 if (pMemFreeBSD->Core.pv)
351 {
352 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
353 *ppMem = &pMemFreeBSD->Core;
354 return VINF_SUCCESS;
355 }
356
357 rtR0MemObjDelete(&pMemFreeBSD->Core);
358 return VERR_NO_MEMORY;
359}
360
361
362int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
363{
364 /** @todo rtR0MemObjNativeAllocPhys / freebsd */
365 return VERR_NOT_SUPPORTED;
366}
367
368
369int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
370{
371 /* create the object. */
372 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
373 if (!pMemFreeBSD)
374 return VERR_NO_MEMORY;
375
376 /* there is no allocation here, it needs to be mapped somewhere first. */
377 pMemFreeBSD->Core.u.Phys.fAllocated = false;
378 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
379 *ppMem = &pMemFreeBSD->Core;
380 return VINF_SUCCESS;
381}
382
383
384int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
385{
386 int rc;
387 NOREF(fAccess);
388
389 /* create the object. */
390 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
391 if (!pMemFreeBSD)
392 return VERR_NO_MEMORY;
393
394 /*
395 * We could've used vslock here, but we don't wish to be subject to
396 * resource usage restrictions, so we'll call vm_map_wire directly.
397 */
398 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */
399 (vm_offset_t)R3Ptr, /* start */
400 (vm_offset_t)R3Ptr + cb, /* end */
401 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); /* flags */
402 if (rc == KERN_SUCCESS)
403 {
404 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
405 *ppMem = &pMemFreeBSD->Core;
406 return VINF_SUCCESS;
407 }
408 rtR0MemObjDelete(&pMemFreeBSD->Core);
409 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
410}
411
412
413int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
414{
415 int rc;
416 NOREF(fAccess);
417
418 /* create the object. */
419 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
420 if (!pMemFreeBSD)
421 return VERR_NO_MEMORY;
422
423 /* lock the memory */
424 rc = vm_map_wire(kernel_map, /* the map */
425 (vm_offset_t)pv, /* start */
426 (vm_offset_t)pv + cb, /* end */
427 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */
428 if (rc == KERN_SUCCESS)
429 {
430 pMemFreeBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
431 *ppMem = &pMemFreeBSD->Core;
432 return VINF_SUCCESS;
433 }
434 rtR0MemObjDelete(&pMemFreeBSD->Core);
435 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
436}
437
438
439/**
440 * Worker for the two virtual address space reservers.
441 *
442 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
443 */
444static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
445{
446 int rc;
447
448 /*
449 * The pvFixed address range must be within the VM space when specified.
450 */
451 if (pvFixed != (void *)-1
452 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
453 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
454 return VERR_INVALID_PARAMETER;
455
456 /*
457 * Check that the specified alignment is supported.
458 */
459 if (uAlignment > PAGE_SIZE)
460 return VERR_NOT_SUPPORTED;
461
462 /*
463 * Create the object.
464 */
465 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
466 if (!pMemFreeBSD)
467 return VERR_NO_MEMORY;
468
469 /*
470 * Allocate an empty VM object and map it into the requested map.
471 */
472 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
473 if (pMemFreeBSD->pObject)
474 {
475 vm_offset_t MapAddress = pvFixed != (void *)-1
476 ? (vm_offset_t)pvFixed
477 : vm_map_min(pMap);
478 if (pvFixed != (void *)-1)
479 vm_map_remove(pMap,
480 MapAddress,
481 MapAddress + cb);
482
483 rc = vm_map_find(pMap, /* map */
484 pMemFreeBSD->pObject, /* object */
485 0, /* offset */
486 &MapAddress, /* addr (IN/OUT) */
487 cb, /* length */
488 pvFixed == (void *)-1, /* find_space */
489 VM_PROT_NONE, /* protection */
490 VM_PROT_ALL, /* max(_prot) ?? */
491 0); /* cow (copy-on-write) */
492 if (rc == KERN_SUCCESS)
493 {
494 if (R0Process != NIL_RTR0PROCESS)
495 {
496 rc = vm_map_inherit(pMap,
497 MapAddress,
498 MapAddress + cb,
499 VM_INHERIT_SHARE);
500 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
501 }
502 pMemFreeBSD->Core.pv = (void *)MapAddress;
503 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
504 *ppMem = &pMemFreeBSD->Core;
505 return VINF_SUCCESS;
506 }
507 vm_object_deallocate(pMemFreeBSD->pObject);
508 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
509 }
510 else
511 rc = VERR_NO_MEMORY;
512 rtR0MemObjDelete(&pMemFreeBSD->Core);
513 return rc;
514
515}
516
517int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
518{
519 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
520}
521
522
523int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
524{
525 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
526 &((struct proc *)R0Process)->p_vmspace->vm_map);
527}
528
529
530int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
531 unsigned fProt, size_t offSub, size_t cbSub)
532{
533 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
534 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
535
536 /*
537 * Check that the specified alignment is supported.
538 */
539 if (uAlignment > PAGE_SIZE)
540 return VERR_NOT_SUPPORTED;
541
542
543
544/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
545
546#if 0
547/** @todo finish the implementation. */
548
549 int rc;
550 void *pvR0 = NULL;
551 PRTR0MEMOBJFREEBSD pMemToMapOs2 = (PRTR0MEMOBJFREEBSD)pMemToMap;
552 switch (pMemToMapOs2->Core.enmType)
553 {
554 /*
555 * These has kernel mappings.
556 */
557 case RTR0MEMOBJTYPE_PAGE:
558 case RTR0MEMOBJTYPE_LOW:
559 case RTR0MEMOBJTYPE_CONT:
560 pvR0 = pMemToMapOs2->Core.pv;
561 break;
562
563 case RTR0MEMOBJTYPE_PHYS_NC:
564 case RTR0MEMOBJTYPE_PHYS:
565 pvR0 = pMemToMapOs2->Core.pv;
566 if (!pvR0)
567 {
568 /* no ring-0 mapping, so allocate a mapping in the process. */
569 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
570 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
571 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
572 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
573 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
574 if (rc)
575 return RTErrConvertFromOS2(rc);
576 pMemToMapOs2->Core.pv = pvR0;
577 }
578 break;
579
580 case RTR0MEMOBJTYPE_LOCK:
581 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
582 return VERR_NOT_SUPPORTED; /** @todo implement this... */
583 pvR0 = pMemToMapOs2->Core.pv;
584 break;
585
586 case RTR0MEMOBJTYPE_RES_VIRT:
587 case RTR0MEMOBJTYPE_MAPPING:
588 default:
589 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
590 return VERR_INTERNAL_ERROR;
591 }
592
593 /*
594 * Create a dummy mapping object for it.
595 *
596 * All mappings are read/write/execute in OS/2 and there isn't
597 * any cache options, so sharing is ok. And the main memory object
598 * isn't actually freed until all the mappings have been freed up
599 * (reference counting).
600 */
601 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
602 if (pMemFreeBSD)
603 {
604 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
605 *ppMem = &pMemFreeBSD->Core;
606 return VINF_SUCCESS;
607 }
608 return VERR_NO_MEMORY;
609#endif
610 return VERR_NOT_IMPLEMENTED;
611}
612
613
614/* see http://markmail.org/message/udhq33tefgtyfozs */
615int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
616{
617 /*
618 * Check for unsupported stuff.
619 */
620 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
621 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
622 if (uAlignment > PAGE_SIZE)
623 return VERR_NOT_SUPPORTED;
624
625 int rc;
626 vm_object_t pObjectToMap = ((PRTR0MEMOBJFREEBSD)pMemToMap)->pObject;
627 struct proc *pProc = (struct proc *)R0Process;
628 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
629
630 /* calc protection */
631 vm_prot_t ProtectionFlags = 0;
632 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
633 ProtectionFlags = VM_PROT_NONE;
634 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
635 ProtectionFlags |= VM_PROT_READ;
636 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
637 ProtectionFlags |= VM_PROT_WRITE;
638 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
639 ProtectionFlags |= VM_PROT_EXECUTE;
640
641 /* calc mapping address */
642 PROC_LOCK(pProc);
643 vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
644 PROC_UNLOCK(pProc);
645
646 vm_object_t pObjectNew = vm_object_allocate(OBJT_PHYS, pMemToMap->cb >> PAGE_SHIFT);
647 if (!RT_UNLIKELY(pObjectNew))
648 return VERR_NO_MEMORY;
649
650 /* Insert the object in the map. */
651 rc = vm_map_find(pProcMap, /* Map to insert the object in */
652 pObjectNew , /* Object to map */
653 0, /* Start offset in the object */
654 &AddrR3, /* Start address IN/OUT */
655 pMemToMap->cb, /* Size of the mapping */
656 TRUE, /* Whether a suitable address should be searched for first */
657 ProtectionFlags, /* protection flags */
658 VM_PROT_ALL, /* Maximum protection flags */
659 0); /* Copy on write */
660
661 /* Map the memory page by page into the destination map. */
662 if (rc == KERN_SUCCESS)
663 {
664 size_t cLeft = pMemToMap->cb >> PAGE_SHIFT;
665 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
666 pmap_t pPhysicalMap = pProcMap->pmap;
667 vm_offset_t AddrR3Dst = AddrR3;
668
669 /* Insert the memory page by page into the mapping. */
670 while (cLeft-- > 0)
671 {
672 vm_page_t Page = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
673
674#if __FreeBSD_version >= 701105
675 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, Page, ProtectionFlags, TRUE);
676#else
677 pmap_enter(pPhysicalMap, AddrR3Dst, Page, ProtectionFlags, TRUE);
678#endif
679 AddrToMap += PAGE_SIZE;
680 AddrR3Dst += PAGE_SIZE;
681 }
682 pObjectToMap = pObjectNew;
683 }
684 else
685 vm_object_deallocate(pObjectNew);
686
687 if (rc == KERN_SUCCESS)
688 {
689 /*
690 * Create a mapping object for it.
691 */
692 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
693 RTR0MEMOBJTYPE_MAPPING,
694 (void *)AddrR3,
695 pMemToMap->cb);
696 if (pMemFreeBSD)
697 {
698 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
699 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
700 pMemFreeBSD->pMappingObject = pObjectToMap;
701 *ppMem = &pMemFreeBSD->Core;
702 return VINF_SUCCESS;
703 }
704
705 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
706 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
707 }
708
709 if (pObjectToMap)
710 vm_object_deallocate(pObjectToMap);
711
712 return VERR_NO_MEMORY;
713}
714
715
716int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
717{
718 NOREF(pMem);
719 NOREF(offSub);
720 NOREF(cbSub);
721 NOREF(fProt);
722 return VERR_NOT_SUPPORTED;
723}
724
725
726RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
727{
728 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
729
730 switch (pMemFreeBSD->Core.enmType)
731 {
732 case RTR0MEMOBJTYPE_LOCK:
733 {
734 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
735 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
736 {
737 /* later */
738 return NIL_RTHCPHYS;
739 }
740
741 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
742
743 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
744 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
745 pmap_t pPhysicalMap = pProcMap->pmap;
746
747 return pmap_extract(pPhysicalMap, pb);
748 }
749
750 case RTR0MEMOBJTYPE_PAGE:
751 {
752 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
753 return vtophys(pb);
754 }
755
756 case RTR0MEMOBJTYPE_MAPPING:
757 {
758 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
759
760 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
761 {
762 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
763 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
764 pmap_t pPhysicalMap = pProcMap->pmap;
765
766 return pmap_extract(pPhysicalMap, pb);
767 }
768 return vtophys(pb);
769 }
770
771 case RTR0MEMOBJTYPE_CONT:
772 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
773
774 case RTR0MEMOBJTYPE_PHYS:
775 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
776
777 case RTR0MEMOBJTYPE_PHYS_NC:
778 case RTR0MEMOBJTYPE_RES_VIRT:
779 case RTR0MEMOBJTYPE_LOW:
780 default:
781 return NIL_RTHCPHYS;
782 }
783}
784
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette