VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 26899

Last change on this file since 26899 was 26899, checked in by vboxsync, 15 years ago

R0/MemObj/FreeBSD: Various changes

  • Don't wire allocated pages twice in AllocPage
  • More work for the physical page allocation stuff Still disabled by default because the host crashes sometimes n weird ways after a VM terminated
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.7 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 26899 2010-02-28 21:44:02Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * The FreeBSD version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJFREEBSD
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** Type dependent data */
58 union
59 {
60 /** Non physical memory allocations */
61 struct
62 {
63 /** The VM object associated with the allocation. */
64 vm_object_t pObject;
65 } NonPhys;
66 /** Physical memory allocations */
67 struct
68 {
69 /** Number of pages */
70 uint32_t cPages;
71 /** Array of pages - variable */
72 vm_page_t apPages[1];
73 } Phys;
74 } u;
75} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
76
77
78MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
79
80/*******************************************************************************
81* Internal Functions *
82*******************************************************************************/
83
84
85int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
86{
87 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
88 int rc;
89
90 switch (pMemFreeBSD->Core.enmType)
91 {
92 case RTR0MEMOBJTYPE_CONT:
93 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
94 break;
95
96 case RTR0MEMOBJTYPE_PAGE:
97 if (pMemFreeBSD->u.NonPhys.pObject)
98 {
99 rc = vm_map_remove(kernel_map,
100 (vm_offset_t)pMemFreeBSD->Core.pv,
101 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
102 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
103 }
104 else
105 {
106 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
107 rc = vm_map_remove(kernel_map,
108 (vm_offset_t)pMemFreeBSD->Core.pv,
109 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
110 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
111 }
112 break;
113
114 case RTR0MEMOBJTYPE_LOCK:
115 {
116 int fFlags = VM_MAP_WIRE_NOHOLES;
117 vm_map_t pMap = kernel_map;
118
119 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
120 {
121 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
122 fFlags |= VM_MAP_WIRE_USER;
123 }
124 else
125 fFlags |= VM_MAP_WIRE_SYSTEM;
126
127 rc = vm_map_unwire(pMap,
128 (vm_offset_t)pMemFreeBSD->Core.pv,
129 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
130 fFlags);
131 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
132 break;
133 }
134
135 case RTR0MEMOBJTYPE_RES_VIRT:
136 {
137 vm_map_t pMap = kernel_map;
138 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
139 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
140 rc = vm_map_remove(pMap,
141 (vm_offset_t)pMemFreeBSD->Core.pv,
142 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
143 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
144 break;
145 }
146
147 case RTR0MEMOBJTYPE_MAPPING:
148 {
149 vm_map_t pMap = kernel_map;
150
151 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
152 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
153
154 rc = vm_map_remove(pMap,
155 (vm_offset_t)pMemFreeBSD->Core.pv,
156 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
157 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
158 break;
159 }
160
161 case RTR0MEMOBJTYPE_PHYS:
162 case RTR0MEMOBJTYPE_PHYS_NC:
163 {
164 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
165 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]);
166 break;
167 }
168
169 /* unused: */
170 case RTR0MEMOBJTYPE_LOW:
171 default:
172 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
173 return VERR_INTERNAL_ERROR;
174 }
175
176 return VINF_SUCCESS;
177}
178
179
180int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
181{
182 int rc;
183 size_t cPages = cb >> PAGE_SHIFT;
184
185 /* create the object. */
186 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
187 if (!pMemFreeBSD)
188 return VERR_NO_MEMORY;
189
190 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cPages);
191 if (pMemFreeBSD->u.NonPhys.pObject)
192 {
193 vm_offset_t MapAddress = vm_map_min(kernel_map);
194 rc = vm_map_find(kernel_map, /* map */
195 pMemFreeBSD->u.NonPhys.pObject, /* object */
196 0, /* offset */
197 &MapAddress, /* addr (IN/OUT) */
198 cb, /* length */
199 TRUE, /* find_space */
200 fExecutable /* protection */
201 ? VM_PROT_ALL
202 : VM_PROT_RW,
203 VM_PROT_ALL, /* max(_prot) */
204 FALSE); /* cow (copy-on-write) */
205 if (rc == KERN_SUCCESS)
206 {
207 rc = VINF_SUCCESS;
208
209 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject);
210 for (size_t iPage = 0; iPage < cPages; iPage++)
211 {
212 vm_page_t pPage;
213
214 pPage = vm_page_alloc(pMemFreeBSD->u.NonPhys.pObject, iPage,
215 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
216 VM_ALLOC_WIRED);
217
218 if (!pPage)
219 {
220 /*
221 * Out of pages
222 * Remove already allocated pages
223 */
224 while (iPage-- > 0)
225 {
226 vm_map_lock(kernel_map);
227 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage);
228 vm_page_lock_queues();
229 vm_page_unwire(pPage, 0);
230 vm_page_free(pPage);
231 vm_page_unlock_queues();
232 }
233 rc = VERR_NO_MEMORY;
234 break;
235 }
236
237 pPage->valid = VM_PAGE_BITS_ALL;
238 }
239 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject);
240
241 if (rc == VINF_SUCCESS)
242 {
243 vm_map_entry_t pMapEntry;
244 boolean_t fEntryFound;
245
246 fEntryFound = vm_map_lookup_entry(kernel_map, MapAddress, &pMapEntry);
247 if (fEntryFound)
248 {
249 pMapEntry->wired_count = 1;
250 vm_map_simplify_entry(kernel_map, pMapEntry);
251
252 /* Put the page into the page table now. */
253 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject);
254 vm_offset_t AddressDst = MapAddress;
255
256 for (size_t iPage = 0; iPage < cPages; iPage++)
257 {
258 vm_page_t pPage;
259
260 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage);
261
262#if __FreeBSD_version >= 701105
263 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage,
264 fExecutable
265 ? VM_PROT_ALL
266 : VM_PROT_RW,
267 TRUE);
268#else
269 pmap_enter(kernel_map->pmap, AddressDst, pPage,
270 fExecutable
271 ? VM_PROT_ALL
272 : VM_PROT_RW,
273 TRUE);
274#endif
275
276 AddressDst += PAGE_SIZE;
277 }
278 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject);
279
280 /* Store start address */
281 pMemFreeBSD->Core.pv = (void *)MapAddress;
282 *ppMem = &pMemFreeBSD->Core;
283 return VINF_SUCCESS;
284 }
285 else
286 {
287 AssertFailed();
288 }
289 }
290 }
291 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
292 }
293 else
294 rc = VERR_NO_MEMORY;
295
296 rtR0MemObjDelete(&pMemFreeBSD->Core);
297 return rc;
298}
299
300
301int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
302{
303 /*
304 * Try a Alloc first and see if we get luck, if not try contigmalloc.
305 * Might wish to try find our own pages or something later if this
306 * turns into a problemspot on AMD64 boxes.
307 */
308 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
309 if (RT_SUCCESS(rc))
310 {
311 size_t iPage = cb >> PAGE_SHIFT;
312 while (iPage-- > 0)
313 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
314 {
315 RTR0MemObjFree(*ppMem, false);
316 *ppMem = NULL;
317 rc = VERR_NO_MEMORY;
318 break;
319 }
320 }
321 if (RT_FAILURE(rc))
322 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
323 return rc;
324}
325
326
327int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
328{
329 /* create the object. */
330 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
331 if (!pMemFreeBSD)
332 return VERR_NO_MEMORY;
333
334 /* do the allocation. */
335 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
336 M_IPRTMOBJ, /* type */
337 M_NOWAIT | M_ZERO, /* flags */
338 0, /* lowest physical address*/
339 _4G-1, /* highest physical address */
340 PAGE_SIZE, /* alignment. */
341 0); /* boundrary */
342 if (pMemFreeBSD->Core.pv)
343 {
344 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
345 *ppMem = &pMemFreeBSD->Core;
346 return VINF_SUCCESS;
347 }
348
349 NOREF(fExecutable);
350 rtR0MemObjDelete(&pMemFreeBSD->Core);
351 return VERR_NO_MEMORY;
352}
353
354static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
355 size_t cb,
356 RTHCPHYS PhysHighest, size_t uAlignment,
357 bool fContiguous)
358{
359 int rc = VINF_SUCCESS;
360 uint32_t cPages = cb >> PAGE_SHIFT;
361 vm_paddr_t VmPhysAddrHigh;
362
363 /* create the object. */
364 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
365 enmType, NULL, cb);
366 if (!pMemFreeBSD)
367 return VERR_NO_MEMORY;
368
369 pMemFreeBSD->u.Phys.cPages = cPages;
370
371 if (PhysHighest != NIL_RTHCPHYS)
372 VmPhysAddrHigh = PhysHighest;
373 else
374 VmPhysAddrHigh = ~(vm_paddr_t)0;
375
376 if (fContiguous)
377 {
378 vm_page_t pPage = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
379
380 if (pPage)
381 for (uint32_t iPage = 0; iPage < cPages; iPage++)
382 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage];
383 else
384 rc = VERR_NO_MEMORY;
385 }
386 else
387 {
388 /* Allocate page by page */
389 for (uint32_t iPage = 0; iPage < cPages; iPage++)
390 {
391 vm_page_t pPage = vm_phys_alloc_contig(1, 0, VmPhysAddrHigh, uAlignment, 0);
392
393 if (!pPage)
394 {
395 /* Free all allocated pages */
396 while (iPage-- > 0)
397 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]);
398 rc = VERR_NO_MEMORY;
399 break;
400 }
401 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
402 }
403 }
404
405 if (RT_FAILURE(rc))
406 rtR0MemObjDelete(&pMemFreeBSD->Core);
407 else
408 {
409 if (enmType == RTR0MEMOBJTYPE_PHYS)
410 {
411 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[0]);
412 pMemFreeBSD->Core.u.Phys.fAllocated = true;
413 }
414
415 *ppMem = &pMemFreeBSD->Core;
416 }
417
418 return rc;
419}
420
421int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
422{
423#if 0
424 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
425#else
426 /* create the object. */
427 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
428 if (!pMemFreeBSD)
429 return VERR_NO_MEMORY;
430
431 /* do the allocation. */
432 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
433 M_IPRTMOBJ, /* type */
434 M_NOWAIT | M_ZERO, /* flags */
435 0, /* lowest physical address*/
436 _4G-1, /* highest physical address */
437 uAlignment, /* alignment. */
438 0); /* boundrary */
439 if (pMemFreeBSD->Core.pv)
440 {
441 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
442 *ppMem = &pMemFreeBSD->Core;
443 return VINF_SUCCESS;
444 }
445
446 rtR0MemObjDelete(&pMemFreeBSD->Core);
447 return VERR_NO_MEMORY;
448#endif
449}
450
451
452int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
453{
454#if 0
455 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
456#else
457 return VERR_NOT_SUPPORTED;
458#endif
459}
460
461
462int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
463{
464 /* create the object. */
465 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
466 if (!pMemFreeBSD)
467 return VERR_NO_MEMORY;
468
469 /* there is no allocation here, it needs to be mapped somewhere first. */
470 pMemFreeBSD->Core.u.Phys.fAllocated = false;
471 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
472 *ppMem = &pMemFreeBSD->Core;
473 return VINF_SUCCESS;
474}
475
476
477int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
478{
479 int rc;
480 NOREF(fAccess);
481
482 /* create the object. */
483 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
484 if (!pMemFreeBSD)
485 return VERR_NO_MEMORY;
486
487 /*
488 * We could've used vslock here, but we don't wish to be subject to
489 * resource usage restrictions, so we'll call vm_map_wire directly.
490 */
491 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */
492 (vm_offset_t)R3Ptr, /* start */
493 (vm_offset_t)R3Ptr + cb, /* end */
494 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); /* flags */
495 if (rc == KERN_SUCCESS)
496 {
497 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
498 *ppMem = &pMemFreeBSD->Core;
499 return VINF_SUCCESS;
500 }
501 rtR0MemObjDelete(&pMemFreeBSD->Core);
502 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
503}
504
505
506int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
507{
508 int rc;
509 NOREF(fAccess);
510
511 /* create the object. */
512 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
513 if (!pMemFreeBSD)
514 return VERR_NO_MEMORY;
515
516 /* lock the memory */
517 rc = vm_map_wire(kernel_map, /* the map */
518 (vm_offset_t)pv, /* start */
519 (vm_offset_t)pv + cb, /* end */
520 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */
521 if (rc == KERN_SUCCESS)
522 {
523 pMemFreeBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
524 *ppMem = &pMemFreeBSD->Core;
525 return VINF_SUCCESS;
526 }
527 rtR0MemObjDelete(&pMemFreeBSD->Core);
528 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
529}
530
531
532/**
533 * Worker for the two virtual address space reservers.
534 *
535 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
536 */
537static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
538{
539 int rc;
540
541 /*
542 * The pvFixed address range must be within the VM space when specified.
543 */
544 if (pvFixed != (void *)-1
545 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
546 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
547 return VERR_INVALID_PARAMETER;
548
549 /*
550 * Check that the specified alignment is supported.
551 */
552 if (uAlignment > PAGE_SIZE)
553 return VERR_NOT_SUPPORTED;
554
555 /*
556 * Create the object.
557 */
558 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
559 if (!pMemFreeBSD)
560 return VERR_NO_MEMORY;
561
562 /*
563 * Allocate an empty VM object and map it into the requested map.
564 */
565 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
566 if (pMemFreeBSD->u.NonPhys.pObject)
567 {
568 vm_offset_t MapAddress = pvFixed != (void *)-1
569 ? (vm_offset_t)pvFixed
570 : vm_map_min(pMap);
571 if (pvFixed != (void *)-1)
572 vm_map_remove(pMap,
573 MapAddress,
574 MapAddress + cb);
575
576 rc = vm_map_find(pMap, /* map */
577 pMemFreeBSD->u.NonPhys.pObject, /* object */
578 0, /* offset */
579 &MapAddress, /* addr (IN/OUT) */
580 cb, /* length */
581 pvFixed == (void *)-1, /* find_space */
582 VM_PROT_NONE, /* protection */
583 VM_PROT_ALL, /* max(_prot) ?? */
584 0); /* cow (copy-on-write) */
585 if (rc == KERN_SUCCESS)
586 {
587 if (R0Process != NIL_RTR0PROCESS)
588 {
589 rc = vm_map_inherit(pMap,
590 MapAddress,
591 MapAddress + cb,
592 VM_INHERIT_SHARE);
593 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
594 }
595 pMemFreeBSD->Core.pv = (void *)MapAddress;
596 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
597 *ppMem = &pMemFreeBSD->Core;
598 return VINF_SUCCESS;
599 }
600 vm_object_deallocate(pMemFreeBSD->u.NonPhys.pObject);
601 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
602 }
603 else
604 rc = VERR_NO_MEMORY;
605 rtR0MemObjDelete(&pMemFreeBSD->Core);
606 return rc;
607
608}
609
610int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
611{
612 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
613}
614
615
616int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
617{
618 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
619 &((struct proc *)R0Process)->p_vmspace->vm_map);
620}
621
622
623int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
624 unsigned fProt, size_t offSub, size_t cbSub)
625{
626 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
627 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
628
629 /*
630 * Check that the specified alignment is supported.
631 */
632 if (uAlignment > PAGE_SIZE)
633 return VERR_NOT_SUPPORTED;
634
635/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
636/** @todo finish the implementation. */
637
638 return VERR_NOT_IMPLEMENTED;
639}
640
641
642/* see http://markmail.org/message/udhq33tefgtyfozs */
643int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
644{
645 /*
646 * Check for unsupported stuff.
647 */
648 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
649 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
650 if (uAlignment > PAGE_SIZE)
651 return VERR_NOT_SUPPORTED;
652
653 int rc;
654 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
655 struct proc *pProc = (struct proc *)R0Process;
656 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
657
658 /* calc protection */
659 vm_prot_t ProtectionFlags = 0;
660 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
661 ProtectionFlags = VM_PROT_NONE;
662 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
663 ProtectionFlags |= VM_PROT_READ;
664 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
665 ProtectionFlags |= VM_PROT_WRITE;
666 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
667 ProtectionFlags |= VM_PROT_EXECUTE;
668
669 /* calc mapping address */
670 PROC_LOCK(pProc);
671 vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
672 PROC_UNLOCK(pProc);
673
674 vm_object_t pObjectNew = vm_object_allocate(OBJT_DEFAULT, pMemToMap->cb >> PAGE_SHIFT);
675 if (!RT_UNLIKELY(pObjectNew))
676 return VERR_NO_MEMORY;
677
678 /* Insert the object in the map. */
679 rc = vm_map_find(pProcMap, /* Map to insert the object in */
680 pObjectNew, /* Object to map */
681 0, /* Start offset in the object */
682 &AddrR3, /* Start address IN/OUT */
683 pMemToMap->cb, /* Size of the mapping */
684 TRUE, /* Whether a suitable address should be searched for first */
685 ProtectionFlags, /* protection flags */
686 VM_PROT_ALL, /* Maximum protection flags */
687 0); /* Copy on write */
688
689 /* Map the memory page by page into the destination map. */
690 if (rc == KERN_SUCCESS)
691 {
692 size_t cPages = pMemToMap->cb >> PAGE_SHIFT;;
693 pmap_t pPhysicalMap = pProcMap->pmap;
694 vm_offset_t AddrR3Dst = AddrR3;
695
696 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
697 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC)
698 {
699 /* Mapping physical allocations */
700 Assert(cPages == pMemToMap->u.Phys.cPages);
701
702 /* Insert the memory page by page into the mapping. */
703 for (uint32_t iPage = 0; iPage < cPages; iPage++)
704 {
705 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage];
706
707#if __FreeBSD_version >= 701105
708 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE);
709#else
710 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
711#endif
712 AddrR3Dst += PAGE_SIZE;
713 }
714 }
715 else if (pMemToMapFreeBSD->u.NonPhys.pObject)
716 {
717 /* Mapping page memory object */
718 VM_OBJECT_LOCK(pMemToMapFreeBSD->u.NonPhys.pObject);
719
720 /* Insert the memory page by page into the mapping. */
721 for (uint32_t iPage = 0; iPage < cPages; iPage++)
722 {
723 vm_page_t pPage = vm_page_lookup(pMemToMapFreeBSD->u.NonPhys.pObject, iPage);
724
725#if __FreeBSD_version >= 701105
726 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE);
727#else
728 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
729#endif
730 AddrR3Dst += PAGE_SIZE;
731 }
732 VM_OBJECT_UNLOCK(pMemToMapFreeBSD->u.NonPhys.pObject);
733 }
734 else
735 {
736 /* Mapping cont or low memory types */
737 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
738
739 for (uint32_t iPage = 0; iPage < cPages; iPage++)
740 {
741 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
742
743#if __FreeBSD_version >= 701105
744 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE);
745#else
746 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
747#endif
748 AddrR3Dst += PAGE_SIZE;
749 AddrToMap += PAGE_SIZE;
750 }
751 }
752 }
753
754 if (RT_SUCCESS(rc))
755 {
756 /*
757 * Create a mapping object for it.
758 */
759 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
760 RTR0MEMOBJTYPE_MAPPING,
761 (void *)AddrR3,
762 pMemToMap->cb);
763 if (pMemFreeBSD)
764 {
765 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
766 pMemFreeBSD->u.NonPhys.pObject = pObjectNew;
767 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
768 *ppMem = &pMemFreeBSD->Core;
769 return VINF_SUCCESS;
770 }
771
772 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
773 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
774 }
775
776 if (RT_FAILURE(rc))
777 vm_object_deallocate(pObjectNew);
778
779 return VERR_NO_MEMORY;
780}
781
782
783int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
784{
785 NOREF(pMem);
786 NOREF(offSub);
787 NOREF(cbSub);
788 NOREF(fProt);
789 return VERR_NOT_SUPPORTED;
790}
791
792
793RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
794{
795 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
796
797 switch (pMemFreeBSD->Core.enmType)
798 {
799 case RTR0MEMOBJTYPE_LOCK:
800 {
801 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
802 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
803 {
804 /* later */
805 return NIL_RTHCPHYS;
806 }
807
808 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
809
810 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
811 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
812 pmap_t pPhysicalMap = pProcMap->pmap;
813
814 return pmap_extract(pPhysicalMap, pb);
815 }
816
817 case RTR0MEMOBJTYPE_PAGE:
818 {
819 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
820 return vtophys(pb);
821 }
822
823 case RTR0MEMOBJTYPE_MAPPING:
824 {
825 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
826
827 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
828 {
829 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
830 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
831 pmap_t pPhysicalMap = pProcMap->pmap;
832
833 return pmap_extract(pPhysicalMap, pb);
834 }
835 return vtophys(pb);
836 }
837
838 case RTR0MEMOBJTYPE_CONT:
839 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
840
841 case RTR0MEMOBJTYPE_PHYS:
842 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
843
844 case RTR0MEMOBJTYPE_PHYS_NC:
845 {
846 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]);
847 }
848
849 case RTR0MEMOBJTYPE_RES_VIRT:
850 case RTR0MEMOBJTYPE_LOW:
851 default:
852 return NIL_RTHCPHYS;
853 }
854}
855
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette