VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 39521

Last change on this file since 39521 was 39521, checked in by vboxsync, 13 years ago

FreeBSD: Certain fixes to run on CURRENT and clean up the support driver character device handling, thanks to Bernhard Froehlich, Jung-uk Kim and Ed Schouten

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.7 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 39521 2011-12-03 22:59:46Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46/*******************************************************************************
47* Structures and Typedefs *
48*******************************************************************************/
49/**
50 * The FreeBSD version of the memory object structure.
51 */
52typedef struct RTR0MEMOBJFREEBSD
53{
54 /** The core structure. */
55 RTR0MEMOBJINTERNAL Core;
56 /** Type dependent data */
57 union
58 {
59 /** Non physical memory allocations */
60 struct
61 {
62 /** The VM object associated with the allocation. */
63 vm_object_t pObject;
64 } NonPhys;
65 /** Physical memory allocations */
66 struct
67 {
68 /** Number of pages */
69 uint32_t cPages;
70 /** Array of pages - variable */
71 vm_page_t apPages[1];
72 } Phys;
73 } u;
74} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
75
76
77MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
78
79
80
81/**
82 * Gets the virtual memory map the specified object is mapped into.
83 *
84 * @returns VM map handle on success, NULL if no map.
85 * @param pMem The memory object.
86 */
87static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
88{
89 switch (pMem->enmType)
90 {
91 case RTR0MEMOBJTYPE_PAGE:
92 case RTR0MEMOBJTYPE_LOW:
93 case RTR0MEMOBJTYPE_CONT:
94 return kernel_map;
95
96 case RTR0MEMOBJTYPE_PHYS:
97 case RTR0MEMOBJTYPE_PHYS_NC:
98 return NULL; /* pretend these have no mapping atm. */
99
100 case RTR0MEMOBJTYPE_LOCK:
101 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
102 ? kernel_map
103 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
104
105 case RTR0MEMOBJTYPE_RES_VIRT:
106 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
107 ? kernel_map
108 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
109
110 case RTR0MEMOBJTYPE_MAPPING:
111 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
112 ? kernel_map
113 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
114
115 default:
116 return NULL;
117 }
118}
119
120
121DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
122{
123 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
124 int rc;
125
126 switch (pMemFreeBSD->Core.enmType)
127 {
128 case RTR0MEMOBJTYPE_CONT:
129 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
130 break;
131
132 case RTR0MEMOBJTYPE_PAGE:
133 {
134 rc = vm_map_remove(kernel_map,
135 (vm_offset_t)pMemFreeBSD->Core.pv,
136 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
137 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
138
139 vm_page_lock_queues();
140 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
141 {
142 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
143 vm_page_unwire(pPage, 0);
144 vm_page_free(pPage);
145 }
146 vm_page_unlock_queues();
147 break;
148 }
149
150 case RTR0MEMOBJTYPE_LOCK:
151 {
152 vm_map_t pMap = kernel_map;
153
154 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
155 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
156
157 rc = vm_map_unwire(pMap,
158 (vm_offset_t)pMemFreeBSD->Core.pv,
159 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
160 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
161 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
162 break;
163 }
164
165 case RTR0MEMOBJTYPE_RES_VIRT:
166 {
167 vm_map_t pMap = kernel_map;
168 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
169 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
170 rc = vm_map_remove(pMap,
171 (vm_offset_t)pMemFreeBSD->Core.pv,
172 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
173 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
174 break;
175 }
176
177 case RTR0MEMOBJTYPE_MAPPING:
178 {
179 vm_map_t pMap = kernel_map;
180
181 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
182 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
183
184 rc = vm_map_remove(pMap,
185 (vm_offset_t)pMemFreeBSD->Core.pv,
186 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
187 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
188 break;
189 }
190
191 case RTR0MEMOBJTYPE_PHYS:
192 case RTR0MEMOBJTYPE_PHYS_NC:
193 {
194 vm_page_lock_queues();
195 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
196 {
197 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
198 vm_page_unwire(pPage, 0);
199 vm_page_free(pPage);
200 }
201 vm_page_unlock_queues();
202 break;
203 }
204
205#ifdef USE_KMEM_ALLOC_ATTR
206 case RTR0MEMOBJTYPE_LOW:
207 {
208 kmem_free(kernel_map, (vm_offset_t)pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb);
209 break;
210 }
211#else
212 case RTR0MEMOBJTYPE_LOW: /* unused */
213#endif
214 default:
215 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
216 return VERR_INTERNAL_ERROR;
217 }
218
219 return VINF_SUCCESS;
220}
221
222
223DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
224{
225 int rc;
226 size_t cPages = cb >> PAGE_SHIFT;
227
228 /* create the object. */
229 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
230 RTR0MEMOBJTYPE_PAGE, NULL, cb);
231 if (!pMemFreeBSD)
232 return VERR_NO_MEMORY;
233
234 pMemFreeBSD->u.Phys.cPages = cPages;
235
236 vm_offset_t MapAddress = vm_map_min(kernel_map);
237 rc = vm_map_find(kernel_map, /* map */
238 NULL, /* object */
239 0, /* offset */
240 &MapAddress, /* addr (IN/OUT) */
241 cb, /* length */
242 TRUE, /* find_space */
243 fExecutable /* protection */
244 ? VM_PROT_ALL
245 : VM_PROT_RW,
246 VM_PROT_ALL, /* max(_prot) */
247 0); /* cow (copy-on-write) */
248 if (rc == KERN_SUCCESS)
249 {
250 rc = VINF_SUCCESS;
251
252 for (size_t iPage = 0; iPage < cPages; iPage++)
253 {
254 vm_page_t pPage;
255
256 pPage = vm_page_alloc(NULL, iPage,
257 VM_ALLOC_SYSTEM |
258 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
259
260 if (!pPage)
261 {
262 /*
263 * Out of pages
264 * Remove already allocated pages
265 */
266 while (iPage-- > 0)
267 {
268 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
269 vm_page_lock_queues();
270 vm_page_unwire(pPage, 0);
271 vm_page_free(pPage);
272 vm_page_unlock_queues();
273 }
274 rc = VERR_NO_MEMORY;
275 break;
276 }
277
278 pPage->valid = VM_PAGE_BITS_ALL;
279 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
280 }
281
282 if (rc == VINF_SUCCESS)
283 {
284 vm_offset_t AddressDst = MapAddress;
285
286 for (size_t iPage = 0; iPage < cPages; iPage++)
287 {
288 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
289
290 MY_PMAP_ENTER(kernel_map->pmap, AddressDst, pPage,
291 fExecutable
292 ? VM_PROT_ALL
293 : VM_PROT_RW,
294 TRUE);
295
296 AddressDst += PAGE_SIZE;
297 }
298
299 /* Store start address */
300 pMemFreeBSD->Core.pv = (void *)MapAddress;
301 *ppMem = &pMemFreeBSD->Core;
302 return VINF_SUCCESS;
303 }
304 }
305 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
306
307 rtR0MemObjDelete(&pMemFreeBSD->Core);
308 return rc;
309}
310
311
312DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
313{
314#ifdef USE_KMEM_ALLOC_ATTR
315 /*
316 * Use kmem_alloc_attr, fExectuable is not needed because the
317 * memory will be executable by default
318 */
319 NOREF(fExecutable);
320
321 /* create the object. */
322 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOW, NULL, cb);
323 if (!pMemFreeBSD)
324 return VERR_NO_MEMORY;
325
326 pMemFreeBSD->Core.pv = (void *)kmem_alloc_attr(kernel_map, /* Kernel */
327 cb, /* Amount */
328 M_ZERO, /* Zero memory */
329 0, /* Low physical address */
330 _4G - PAGE_SIZE, /* Highest physical address */
331 VM_MEMATTR_DEFAULT); /* Default memory attributes */
332 if (!pMemFreeBSD->Core.pv)
333 return VERR_NO_MEMORY;
334
335 *ppMem = &pMemFreeBSD->Core;
336
337 return VINF_SUCCESS;
338#else
339 /*
340 * Try a Alloc first and see if we get luck, if not try contigmalloc.
341 * Might wish to try find our own pages or something later if this
342 * turns into a problemspot on AMD64 boxes.
343 */
344 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
345 if (RT_SUCCESS(rc))
346 {
347 size_t iPage = cb >> PAGE_SHIFT;
348 while (iPage-- > 0)
349 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
350 {
351 RTR0MemObjFree(*ppMem, false);
352 *ppMem = NULL;
353 rc = VERR_NO_MEMORY;
354 break;
355 }
356 }
357 if (RT_FAILURE(rc))
358 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
359 return rc;
360#endif
361}
362
363
364DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
365{
366 /* create the object. */
367 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
368 if (!pMemFreeBSD)
369 return VERR_NO_MEMORY;
370
371 /* do the allocation. */
372 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
373 M_IPRTMOBJ, /* type */
374 M_NOWAIT | M_ZERO, /* flags */
375 0, /* lowest physical address*/
376 _4G-1, /* highest physical address */
377 PAGE_SIZE, /* alignment. */
378 0); /* boundary */
379 if (pMemFreeBSD->Core.pv)
380 {
381 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
382 *ppMem = &pMemFreeBSD->Core;
383 return VINF_SUCCESS;
384 }
385
386 NOREF(fExecutable);
387 rtR0MemObjDelete(&pMemFreeBSD->Core);
388 return VERR_NO_MEMORY;
389}
390
391
392static void rtR0MemObjFreeBSDPhysPageInit(vm_page_t pPage, vm_pindex_t iPage)
393{
394#if __FreeBSD_version <= 1000000
395 pPage->wire_count = 1;
396 pPage->pindex = iPage;
397 pPage->act_count = 0;
398 atomic_add_int(&cnt.v_wire_count, 1);
399
400#if __FreeBSD_version >= 900040
401 Assert(pPage->oflags & VPO_UNMANAGED != 0);
402#else
403 Assert(pPage->flags & PG_UNMANAGED != 0);
404#endif
405#endif
406}
407
408
409static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
410 size_t cb,
411 RTHCPHYS PhysHighest, size_t uAlignment,
412 bool fContiguous)
413{
414 int rc = VINF_SUCCESS;
415 uint32_t cPages = cb >> PAGE_SHIFT;
416 vm_paddr_t VmPhysAddrHigh;
417#if __FreeBSD_version >= 1000001
418 int pFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
419#endif
420
421 /* create the object. */
422 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
423 enmType, NULL, cb);
424 if (!pMemFreeBSD)
425 return VERR_NO_MEMORY;
426
427 pMemFreeBSD->u.Phys.cPages = cPages;
428
429 if (PhysHighest != NIL_RTHCPHYS)
430 VmPhysAddrHigh = PhysHighest;
431 else
432 VmPhysAddrHigh = ~(vm_paddr_t)0;
433
434 if (fContiguous)
435 {
436#if __FreeBSD_version >= 1000001
437 vm_page_t pPage = vm_page_alloc_contig(NULL, 0, pFlags, cPages, 0, VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
438#else
439 vm_page_t pPage = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
440#endif
441
442 if (pPage)
443 for (uint32_t iPage = 0; iPage < cPages; iPage++)
444 {
445 rtR0MemObjFreeBSDPhysPageInit(&pPage[iPage], iPage);
446 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage];
447 }
448 else
449 rc = VERR_NO_MEMORY;
450 }
451 else
452 {
453 /* Allocate page by page */
454 for (uint32_t iPage = 0; iPage < cPages; iPage++)
455 {
456#if __FreeBSD_version >= 1000001
457 vm_page_t pPage = vm_page_alloc_contig(NULL, iPage, pFlags, 1, 0, VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
458#else
459 vm_page_t pPage = vm_phys_alloc_contig(1, 0, VmPhysAddrHigh, uAlignment, 0);
460#endif
461
462 if (!pPage)
463 {
464 /* Free all allocated pages */
465 while (iPage-- > 0)
466 {
467 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
468 vm_page_lock_queues();
469 vm_page_unwire(pPage, 0);
470 vm_page_free(pPage);
471 vm_page_unlock_queues();
472 }
473 rc = VERR_NO_MEMORY;
474 break;
475 }
476 rtR0MemObjFreeBSDPhysPageInit(pPage, iPage);
477 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
478 }
479 }
480
481 if (RT_FAILURE(rc))
482 rtR0MemObjDelete(&pMemFreeBSD->Core);
483 else
484 {
485 if (enmType == RTR0MEMOBJTYPE_PHYS)
486 {
487 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[0]);
488 pMemFreeBSD->Core.u.Phys.fAllocated = true;
489 }
490
491 *ppMem = &pMemFreeBSD->Core;
492 }
493
494 return rc;
495}
496
497
498DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
499{
500#if 1
501 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
502#else
503 /* create the object. */
504 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
505 if (!pMemFreeBSD)
506 return VERR_NO_MEMORY;
507
508 /* do the allocation. */
509 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
510 M_IPRTMOBJ, /* type */
511 M_NOWAIT | M_ZERO, /* flags */
512 0, /* lowest physical address*/
513 _4G-1, /* highest physical address */
514 uAlignment, /* alignment. */
515 0); /* boundary */
516 if (pMemFreeBSD->Core.pv)
517 {
518 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
519 *ppMem = &pMemFreeBSD->Core;
520 return VINF_SUCCESS;
521 }
522
523 rtR0MemObjDelete(&pMemFreeBSD->Core);
524 return VERR_NO_MEMORY;
525#endif
526}
527
528
529DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
530{
531#if 1
532 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
533#else
534 return VERR_NOT_SUPPORTED;
535#endif
536}
537
538
539DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
540{
541 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
542
543 /* create the object. */
544 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
545 if (!pMemFreeBSD)
546 return VERR_NO_MEMORY;
547
548 /* there is no allocation here, it needs to be mapped somewhere first. */
549 pMemFreeBSD->Core.u.Phys.fAllocated = false;
550 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
551 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
552 *ppMem = &pMemFreeBSD->Core;
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Worker locking the memory in either kernel or user maps.
559 */
560static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
561 vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
562 RTR0PROCESS R0Process, int fFlags)
563{
564 int rc;
565 NOREF(fAccess);
566
567 /* create the object. */
568 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
569 if (!pMemFreeBSD)
570 return VERR_NO_MEMORY;
571
572 /*
573 * We could've used vslock here, but we don't wish to be subject to
574 * resource usage restrictions, so we'll call vm_map_wire directly.
575 */
576 rc = vm_map_wire(pVmMap, /* the map */
577 AddrStart, /* start */
578 AddrStart + cb, /* end */
579 fFlags); /* flags */
580 if (rc == KERN_SUCCESS)
581 {
582 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
583 *ppMem = &pMemFreeBSD->Core;
584 return VINF_SUCCESS;
585 }
586 rtR0MemObjDelete(&pMemFreeBSD->Core);
587 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
588}
589
590
591DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
592{
593 return rtR0MemObjNativeLockInMap(ppMem,
594 &((struct proc *)R0Process)->p_vmspace->vm_map,
595 (vm_offset_t)R3Ptr,
596 cb,
597 fAccess,
598 R0Process,
599 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
600}
601
602
603DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
604{
605 return rtR0MemObjNativeLockInMap(ppMem,
606 kernel_map,
607 (vm_offset_t)pv,
608 cb,
609 fAccess,
610 NIL_RTR0PROCESS,
611 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
612}
613
614
615/**
616 * Worker for the two virtual address space reservers.
617 *
618 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
619 */
620static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
621{
622 int rc;
623
624 /*
625 * The pvFixed address range must be within the VM space when specified.
626 */
627 if (pvFixed != (void *)-1
628 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
629 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
630 return VERR_INVALID_PARAMETER;
631
632 /*
633 * Check that the specified alignment is supported.
634 */
635 if (uAlignment > PAGE_SIZE)
636 return VERR_NOT_SUPPORTED;
637
638 /*
639 * Create the object.
640 */
641 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
642 if (!pMemFreeBSD)
643 return VERR_NO_MEMORY;
644
645 /*
646 * Allocate an empty VM object and map it into the requested map.
647 */
648 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
649 if (pMemFreeBSD->u.NonPhys.pObject)
650 {
651 vm_offset_t MapAddress = pvFixed != (void *)-1
652 ? (vm_offset_t)pvFixed
653 : vm_map_min(pMap);
654 if (pvFixed != (void *)-1)
655 vm_map_remove(pMap,
656 MapAddress,
657 MapAddress + cb);
658
659 rc = vm_map_find(pMap, /* map */
660 pMemFreeBSD->u.NonPhys.pObject, /* object */
661 0, /* offset */
662 &MapAddress, /* addr (IN/OUT) */
663 cb, /* length */
664 pvFixed == (void *)-1, /* find_space */
665 VM_PROT_NONE, /* protection */
666 VM_PROT_ALL, /* max(_prot) ?? */
667 0); /* cow (copy-on-write) */
668 if (rc == KERN_SUCCESS)
669 {
670 if (R0Process != NIL_RTR0PROCESS)
671 {
672 rc = vm_map_inherit(pMap,
673 MapAddress,
674 MapAddress + cb,
675 VM_INHERIT_SHARE);
676 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
677 }
678 pMemFreeBSD->Core.pv = (void *)MapAddress;
679 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
680 *ppMem = &pMemFreeBSD->Core;
681 return VINF_SUCCESS;
682 }
683 vm_object_deallocate(pMemFreeBSD->u.NonPhys.pObject);
684 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
685 }
686 else
687 rc = VERR_NO_MEMORY;
688 rtR0MemObjDelete(&pMemFreeBSD->Core);
689 return rc;
690
691}
692
693
694DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
695{
696 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
697}
698
699
700DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
701{
702 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
703 &((struct proc *)R0Process)->p_vmspace->vm_map);
704}
705
706
707DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
708 unsigned fProt, size_t offSub, size_t cbSub)
709{
710 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
711 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
712
713 /*
714 * Check that the specified alignment is supported.
715 */
716 if (uAlignment > PAGE_SIZE)
717 return VERR_NOT_SUPPORTED;
718
719/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
720/** @todo finish the implementation. */
721
722 return VERR_NOT_SUPPORTED;
723}
724
725
726/* see http://markmail.org/message/udhq33tefgtyfozs */
727DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
728{
729 /*
730 * Check for unsupported stuff.
731 */
732 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
733 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
734 if (uAlignment > PAGE_SIZE)
735 return VERR_NOT_SUPPORTED;
736
737 int rc;
738 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
739 struct proc *pProc = (struct proc *)R0Process;
740 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
741
742 /* calc protection */
743 vm_prot_t ProtectionFlags = 0;
744 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
745 ProtectionFlags = VM_PROT_NONE;
746 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
747 ProtectionFlags |= VM_PROT_READ;
748 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
749 ProtectionFlags |= VM_PROT_WRITE;
750 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
751 ProtectionFlags |= VM_PROT_EXECUTE;
752
753 /* calc mapping address */
754 PROC_LOCK(pProc);
755 vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
756 PROC_UNLOCK(pProc);
757
758 /* Insert the object in the map. */
759 rc = vm_map_find(pProcMap, /* Map to insert the object in */
760 NULL, /* Object to map */
761 0, /* Start offset in the object */
762 &AddrR3, /* Start address IN/OUT */
763 pMemToMap->cb, /* Size of the mapping */
764 TRUE, /* Whether a suitable address should be searched for first */
765 ProtectionFlags, /* protection flags */
766 VM_PROT_ALL, /* Maximum protection flags */
767 0); /* Copy on write */
768
769 /* Map the memory page by page into the destination map. */
770 if (rc == KERN_SUCCESS)
771 {
772 size_t cPages = pMemToMap->cb >> PAGE_SHIFT;;
773 pmap_t pPhysicalMap = pProcMap->pmap;
774 vm_offset_t AddrR3Dst = AddrR3;
775
776 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
777 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC
778 || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE)
779 {
780 /* Mapping physical allocations */
781 Assert(cPages == pMemToMapFreeBSD->u.Phys.cPages);
782
783 /* Insert the memory page by page into the mapping. */
784 for (uint32_t iPage = 0; iPage < cPages; iPage++)
785 {
786 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage];
787
788 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
789 AddrR3Dst += PAGE_SIZE;
790 }
791 }
792 else
793 {
794 /* Mapping cont or low memory types */
795 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
796
797 for (uint32_t iPage = 0; iPage < cPages; iPage++)
798 {
799 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
800
801 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
802 AddrR3Dst += PAGE_SIZE;
803 AddrToMap += PAGE_SIZE;
804 }
805 }
806 }
807
808 if (RT_SUCCESS(rc))
809 {
810 /*
811 * Create a mapping object for it.
812 */
813 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
814 RTR0MEMOBJTYPE_MAPPING,
815 (void *)AddrR3,
816 pMemToMap->cb);
817 if (pMemFreeBSD)
818 {
819 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
820 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
821 *ppMem = &pMemFreeBSD->Core;
822 return VINF_SUCCESS;
823 }
824
825 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
826 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
827 }
828
829 return VERR_NO_MEMORY;
830}
831
832
833DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
834{
835 vm_prot_t ProtectionFlags = 0;
836 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
837 vm_offset_t AddrEnd = AddrStart + cbSub;
838 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
839
840 if (!pVmMap)
841 return VERR_NOT_SUPPORTED;
842
843 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
844 ProtectionFlags = VM_PROT_NONE;
845 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
846 ProtectionFlags |= VM_PROT_READ;
847 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
848 ProtectionFlags |= VM_PROT_WRITE;
849 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
850 ProtectionFlags |= VM_PROT_EXECUTE;
851
852 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
853 if (krc == KERN_SUCCESS)
854 return VINF_SUCCESS;
855
856 return VERR_NOT_SUPPORTED;
857}
858
859
860DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
861{
862 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
863
864 switch (pMemFreeBSD->Core.enmType)
865 {
866 case RTR0MEMOBJTYPE_LOCK:
867 {
868 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
869 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
870 {
871 /* later */
872 return NIL_RTHCPHYS;
873 }
874
875 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
876
877 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
878 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
879 pmap_t pPhysicalMap = pProcMap->pmap;
880
881 return pmap_extract(pPhysicalMap, pb);
882 }
883
884 case RTR0MEMOBJTYPE_MAPPING:
885 {
886 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
887
888 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
889 {
890 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
891 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
892 pmap_t pPhysicalMap = pProcMap->pmap;
893
894 return pmap_extract(pPhysicalMap, pb);
895 }
896 return vtophys(pb);
897 }
898
899 case RTR0MEMOBJTYPE_CONT:
900 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
901
902 case RTR0MEMOBJTYPE_PHYS:
903 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
904
905 case RTR0MEMOBJTYPE_PAGE:
906 case RTR0MEMOBJTYPE_PHYS_NC:
907 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]);
908
909#ifdef USE_KMEM_ALLOC_ATTR
910 case RTR0MEMOBJTYPE_LOW:
911 {
912 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
913 return vtophys(pb);
914 }
915#else
916 case RTR0MEMOBJTYPE_LOW:
917#endif
918 case RTR0MEMOBJTYPE_RES_VIRT:
919 default:
920 return NIL_RTHCPHYS;
921 }
922}
923
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette