VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c

Last change on this file was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.9 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Contributed by knut st. osmundsen, Andriy Gapon.
8 *
9 * Copyright (C) 2007-2024 Oracle and/or its affiliates.
10 *
11 * This file is part of VirtualBox base platform packages, as
12 * available from https://www.virtualbox.org.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation, in version 3 of the
17 * License.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see <https://www.gnu.org/licenses>.
26 *
27 * The contents of this file may alternatively be used under the terms
28 * of the Common Development and Distribution License Version 1.0
29 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
30 * in the VirtualBox distribution, in which case the provisions of the
31 * CDDL are applicable instead of those of the GPL.
32 *
33 * You may elect to license modified versions of this file under the
34 * terms and conditions of either the GPL or the CDDL or both.
35 *
36 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
37 * --------------------------------------------------------------------
38 *
39 * This code is based on:
40 *
41 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
42 * Copyright (c) 2011 Andriy Gapon <[email protected]>
43 *
44 * Permission is hereby granted, free of charge, to any person
45 * obtaining a copy of this software and associated documentation
46 * files (the "Software"), to deal in the Software without
47 * restriction, including without limitation the rights to use,
48 * copy, modify, merge, publish, distribute, sublicense, and/or sell
49 * copies of the Software, and to permit persons to whom the
50 * Software is furnished to do so, subject to the following
51 * conditions:
52 *
53 * The above copyright notice and this permission notice shall be
54 * included in all copies or substantial portions of the Software.
55 *
56 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
57 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
58 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
59 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
60 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
61 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
62 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
63 * OTHER DEALINGS IN THE SOFTWARE.
64 */
65
66
67/*********************************************************************************************************************************
68* Header Files *
69*********************************************************************************************************************************/
70#include "the-freebsd-kernel.h"
71
72#include <iprt/memobj.h>
73#include <iprt/mem.h>
74#include <iprt/err.h>
75#include <iprt/assert.h>
76#include <iprt/log.h>
77#include <iprt/param.h>
78#include <iprt/process.h>
79#include <iprt/string.h>
80#include "internal/memobj.h"
81
82
83/*********************************************************************************************************************************
84* Structures and Typedefs *
85*********************************************************************************************************************************/
86/**
87 * The FreeBSD version of the memory object structure.
88 */
89typedef struct RTR0MEMOBJFREEBSD
90{
91 /** The core structure. */
92 RTR0MEMOBJINTERNAL Core;
93 /** The VM object associated with the allocation. */
94 vm_object_t pObject;
95} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
96
97
98MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
99
100
101/**
102 * Gets the virtual memory map the specified object is mapped into.
103 *
104 * @returns VM map handle on success, NULL if no map.
105 * @param pMem The memory object.
106 */
107static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
108{
109 switch (pMem->enmType)
110 {
111 case RTR0MEMOBJTYPE_PAGE:
112 case RTR0MEMOBJTYPE_LOW:
113 case RTR0MEMOBJTYPE_CONT:
114 return kernel_map;
115
116 case RTR0MEMOBJTYPE_PHYS:
117 case RTR0MEMOBJTYPE_PHYS_NC:
118 return NULL; /* pretend these have no mapping atm. */
119
120 case RTR0MEMOBJTYPE_LOCK:
121 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
122 ? kernel_map
123 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
124
125 case RTR0MEMOBJTYPE_RES_VIRT:
126 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
127 ? kernel_map
128 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
129
130 case RTR0MEMOBJTYPE_MAPPING:
131 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
132 ? kernel_map
133 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
134
135 default:
136 return NULL;
137 }
138}
139
140
141DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
142{
143 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
144 int rc;
145
146 switch (pMemFreeBSD->Core.enmType)
147 {
148 case RTR0MEMOBJTYPE_PAGE:
149 case RTR0MEMOBJTYPE_LOW:
150 case RTR0MEMOBJTYPE_CONT:
151 rc = vm_map_remove(kernel_map,
152 (vm_offset_t)pMemFreeBSD->Core.pv,
153 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
154 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
155 break;
156
157 case RTR0MEMOBJTYPE_LOCK:
158 {
159 vm_map_t pMap = kernel_map;
160
161 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
162 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
163
164 rc = vm_map_unwire(pMap,
165 (vm_offset_t)pMemFreeBSD->Core.pv,
166 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
167 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
168 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
169 break;
170 }
171
172 case RTR0MEMOBJTYPE_RES_VIRT:
173 {
174 vm_map_t pMap = kernel_map;
175 if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
176 pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map;
177 rc = vm_map_remove(pMap,
178 (vm_offset_t)pMemFreeBSD->Core.pv,
179 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
180 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
181 break;
182 }
183
184 case RTR0MEMOBJTYPE_MAPPING:
185 {
186 vm_map_t pMap = kernel_map;
187
188 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
189 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
190 rc = vm_map_remove(pMap,
191 (vm_offset_t)pMemFreeBSD->Core.pv,
192 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
193 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
194 break;
195 }
196
197 case RTR0MEMOBJTYPE_PHYS:
198 case RTR0MEMOBJTYPE_PHYS_NC:
199 {
200 VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
201 vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
202#if __FreeBSD_version < 1000000
203 vm_page_lock_queues();
204#endif
205 for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
206 pPage != NULL;
207 pPage = vm_page_next(pPage))
208 {
209 vm_page_unwire(pPage, 0);
210 }
211#if __FreeBSD_version < 1000000
212 vm_page_unlock_queues();
213#endif
214 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
215 vm_object_deallocate(pMemFreeBSD->pObject);
216 break;
217 }
218
219 default:
220 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
221 return VERR_INTERNAL_ERROR;
222 }
223
224 return VINF_SUCCESS;
225}
226
227
228static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex,
229 u_long cPages, vm_paddr_t VmPhysAddrHigh,
230 u_long uAlignment, bool fWire)
231{
232 vm_page_t pPages;
233 int cTries = 0;
234
235#if __FreeBSD_version > 1000000
236 int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
237 if (fWire)
238 fFlags |= VM_ALLOC_WIRED;
239
240 while (cTries <= 1)
241 {
242 VM_OBJECT_WLOCK(pObject);
243 pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0, VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
244 VM_OBJECT_WUNLOCK(pObject);
245 if (pPages)
246 break;
247#if __FreeBSD_version >= 1100092
248 if (!vm_page_reclaim_contig(cTries, cPages, 0, VmPhysAddrHigh, PAGE_SIZE, 0))
249 break;
250#else
251 vm_pageout_grow_cache(cTries, 0, VmPhysAddrHigh);
252#endif
253 cTries++;
254 }
255
256 return pPages;
257#else
258 while (cTries <= 1)
259 {
260 pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
261 if (pPages)
262 break;
263 vm_contig_grow_cache(cTries, 0, VmPhysAddrHigh);
264 cTries++;
265 }
266
267 if (!pPages)
268 return pPages;
269 VM_OBJECT_WLOCK(pObject);
270 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
271 {
272 vm_page_t pPage = pPages + iPage;
273 vm_page_insert(pPage, pObject, iPIndex + iPage);
274 pPage->valid = VM_PAGE_BITS_ALL;
275 if (fWire)
276 {
277 pPage->wire_count = 1;
278 atomic_add_int(&cnt.v_wire_count, 1);
279 }
280 }
281 VM_OBJECT_WUNLOCK(pObject);
282 return pPages;
283#endif
284}
285
286static int rtR0MemObjFreeBSDPhysAllocHelper(vm_object_t pObject, u_long cPages,
287 vm_paddr_t VmPhysAddrHigh, u_long uAlignment,
288 bool fContiguous, bool fWire, int rcNoMem)
289{
290 if (fContiguous)
291 {
292 if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh, uAlignment, fWire) != NULL)
293 return VINF_SUCCESS;
294 return rcNoMem;
295 }
296
297 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
298 {
299 vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh, uAlignment, fWire);
300 if (pPage)
301 { /* likely */ }
302 else
303 {
304 /* Free all allocated pages */
305 VM_OBJECT_WLOCK(pObject);
306 while (iPage-- > 0)
307 {
308 pPage = vm_page_lookup(pObject, iPage);
309#if __FreeBSD_version < 1000000
310 vm_page_lock_queues();
311#endif
312 if (fWire)
313 vm_page_unwire(pPage, 0);
314 vm_page_free(pPage);
315#if __FreeBSD_version < 1000000
316 vm_page_unlock_queues();
317#endif
318 }
319 VM_OBJECT_WUNLOCK(pObject);
320 return rcNoMem;
321 }
322 }
323 return VINF_SUCCESS;
324}
325
326static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable,
327 vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem)
328{
329 vm_offset_t MapAddress = vm_map_min(kernel_map);
330 size_t cPages = atop(pMemFreeBSD->Core.cb);
331 int rc;
332
333 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);
334
335 /* No additional object reference for auto-deallocation upon unmapping. */
336#if __FreeBSD_version >= 1000055
337 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
338 &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE,
339 fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
340#else
341 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
342 &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE,
343 fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
344#endif
345
346 if (rc == KERN_SUCCESS)
347 {
348 rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, PAGE_SIZE,
349 fContiguous, false /*fWire*/, rcNoMem);
350 if (RT_SUCCESS(rc))
351 {
352 vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
353
354 /* Store start address */
355 pMemFreeBSD->Core.pv = (void *)MapAddress;
356 pMemFreeBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
357 return VINF_SUCCESS;
358 }
359
360 vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb);
361 }
362 else
363 {
364 rc = rcNoMem; /** @todo fix translation (borrow from darwin) */
365 vm_object_deallocate(pMemFreeBSD->pObject);
366 }
367
368 rtR0MemObjDelete(&pMemFreeBSD->Core);
369 return rc;
370}
371
372
373DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
374{
375 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE,
376 NULL, cb, pszTag);
377 if (pMemFreeBSD)
378 {
379 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false /*fContiguous*/, VERR_NO_MEMORY);
380 if (RT_SUCCESS(rc))
381 *ppMem = &pMemFreeBSD->Core;
382 else
383 rtR0MemObjDelete(&pMemFreeBSD->Core);
384 return rc;
385 }
386 return VERR_NO_MEMORY;
387}
388
389
390DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
391 const char *pszTag)
392{
393 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
394}
395
396
397DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
398{
399 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag);
400 if (pMemFreeBSD)
401 {
402 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false /*fContiguous*/, VERR_NO_LOW_MEMORY);
403 if (RT_SUCCESS(rc))
404 *ppMem = &pMemFreeBSD->Core;
405 else
406 rtR0MemObjDelete(&pMemFreeBSD->Core);
407 return rc;
408 }
409 return VERR_NO_MEMORY;
410}
411
412
413DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHigest,
414 bool fExecutable, const char *pszTag)
415{
416 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT,
417 NULL, cb, pszTag);
418 if (pMemFreeBSD)
419 {
420 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, PhysHigest, true /*fContiguous*/, VERR_NO_CONT_MEMORY);
421 if (RT_SUCCESS(rc))
422 {
423 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
424 *ppMem = &pMemFreeBSD->Core;
425 }
426 else
427 rtR0MemObjDelete(&pMemFreeBSD->Core);
428 return rc;
429 }
430 return VERR_NO_MEMORY;
431}
432
433
434static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest,
435 size_t uAlignment, bool fContiguous, int rcNoMem, const char *pszTag)
436{
437 /* create the object. */
438 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), enmType, NULL, cb, pszTag);
439 if (pMemFreeBSD)
440 {
441 vm_paddr_t const VmPhysAddrHigh = PhysHighest != NIL_RTHCPHYS ? PhysHighest : ~(vm_paddr_t)0;
442 u_long const cPages = atop(cb);
443
444 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);
445
446 int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
447 uAlignment, fContiguous, true, rcNoMem);
448 if (RT_SUCCESS(rc))
449 {
450 if (fContiguous)
451 {
452 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
453 VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
454 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
455 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
456 pMemFreeBSD->Core.u.Phys.fAllocated = true;
457 }
458
459 pMemFreeBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
460 *ppMem = &pMemFreeBSD->Core;
461 }
462 else
463 {
464 vm_object_deallocate(pMemFreeBSD->pObject);
465 rtR0MemObjDelete(&pMemFreeBSD->Core);
466 }
467 return rc;
468 }
469 return VERR_NO_MEMORY;
470}
471
472
473DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
474 const char *pszTag)
475{
476 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true, VERR_NO_MEMORY, pszTag);
477}
478
479
480DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
481{
482 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false,
483 VERR_NO_PHYS_MEMORY, pszTag);
484}
485
486
487DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
488 const char *pszTag)
489{
490 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
491
492 /* create the object. */
493 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS,
494 NULL, cb, pszTag);
495 if (pMemFreeBSD)
496 {
497 /* there is no allocation here, it needs to be mapped somewhere first. */
498 pMemFreeBSD->Core.u.Phys.fAllocated = false;
499 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
500 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
501 *ppMem = &pMemFreeBSD->Core;
502 return VINF_SUCCESS;
503 }
504 return VERR_NO_MEMORY;
505}
506
507
508/**
509 * Worker locking the memory in either kernel or user maps.
510 */
511static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
512 vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
513 RTR0PROCESS R0Process, int fFlags, const char *pszTag)
514{
515 int rc;
516 NOREF(fAccess);
517
518 /* create the object. */
519 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK,
520 (void *)AddrStart, cb, pszTag);
521 if (!pMemFreeBSD)
522 return VERR_NO_MEMORY;
523
524 /*
525 * We could've used vslock here, but we don't wish to be subject to
526 * resource usage restrictions, so we'll call vm_map_wire directly.
527 */
528 rc = vm_map_wire(pVmMap, /* the map */
529 AddrStart, /* start */
530 AddrStart + cb, /* end */
531 fFlags); /* flags */
532 if (rc == KERN_SUCCESS)
533 {
534 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
535 *ppMem = &pMemFreeBSD->Core;
536 return VINF_SUCCESS;
537 }
538 rtR0MemObjDelete(&pMemFreeBSD->Core);
539 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
540}
541
542
543DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
544 RTR0PROCESS R0Process, const char *pszTag)
545{
546 return rtR0MemObjNativeLockInMap(ppMem,
547 &((struct proc *)R0Process)->p_vmspace->vm_map,
548 (vm_offset_t)R3Ptr,
549 cb,
550 fAccess,
551 R0Process,
552 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES,
553 pszTag);
554}
555
556
557DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
558{
559 return rtR0MemObjNativeLockInMap(ppMem,
560 kernel_map,
561 (vm_offset_t)pv,
562 cb,
563 fAccess,
564 NIL_RTR0PROCESS,
565 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES,
566 pszTag);
567}
568
569
570/**
571 * Worker for the two virtual address space reservers.
572 *
573 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
574 */
575static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
576 RTR0PROCESS R0Process, vm_map_t pMap, const char *pszTag)
577{
578 int rc;
579
580 /*
581 * The pvFixed address range must be within the VM space when specified.
582 */
583 if ( pvFixed != (void *)-1
584 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
585 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
586 return VERR_INVALID_PARAMETER;
587
588 /*
589 * Check that the specified alignment is supported.
590 */
591 if (uAlignment > PAGE_SIZE)
592 return VERR_NOT_SUPPORTED;
593
594 /*
595 * Create the object.
596 */
597 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT,
598 NULL, cb, pszTag);
599 if (!pMemFreeBSD)
600 return VERR_NO_MEMORY;
601
602 vm_offset_t MapAddress = pvFixed != (void *)-1
603 ? (vm_offset_t)pvFixed
604 : vm_map_min(pMap);
605 if (pvFixed != (void *)-1)
606 vm_map_remove(pMap,
607 MapAddress,
608 MapAddress + cb);
609
610 rc = vm_map_find(pMap, /* map */
611 NULL, /* object */
612 0, /* offset */
613 &MapAddress, /* addr (IN/OUT) */
614 cb, /* length */
615#if __FreeBSD_version >= 1000055
616 0, /* max addr */
617#endif
618 pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
619 /* find_space */
620 VM_PROT_NONE, /* protection */
621 VM_PROT_ALL, /* max(_prot) ?? */
622 0); /* cow (copy-on-write) */
623 if (rc == KERN_SUCCESS)
624 {
625 if (R0Process != NIL_RTR0PROCESS)
626 {
627 rc = vm_map_inherit(pMap,
628 MapAddress,
629 MapAddress + cb,
630 VM_INHERIT_SHARE);
631 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
632 }
633 pMemFreeBSD->Core.pv = (void *)MapAddress;
634 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
635 *ppMem = &pMemFreeBSD->Core;
636 return VINF_SUCCESS;
637 }
638
639 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
640 rtR0MemObjDelete(&pMemFreeBSD->Core);
641 return rc;
642
643}
644
645
646DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
647 const char *pszTag)
648{
649 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map, pszTag);
650}
651
652
653DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
654 RTR0PROCESS R0Process, const char *pszTag)
655{
656 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
657 &((struct proc *)R0Process)->p_vmspace->vm_map, pszTag);
658}
659
660
661DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
662 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
663{
664// AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
665 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
666
667 /*
668 * Check that the specified alignment is supported.
669 */
670 if (uAlignment > PAGE_SIZE)
671 return VERR_NOT_SUPPORTED;
672 Assert(!offSub || cbSub);
673
674 int rc;
675 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
676
677 /* calc protection */
678 vm_prot_t ProtectionFlags = 0;
679 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
680 ProtectionFlags = VM_PROT_NONE;
681 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
682 ProtectionFlags |= VM_PROT_READ;
683 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
684 ProtectionFlags |= VM_PROT_WRITE;
685 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
686 ProtectionFlags |= VM_PROT_EXECUTE;
687
688 vm_offset_t Addr = vm_map_min(kernel_map);
689 if (cbSub == 0)
690 cbSub = pMemToMap->cb - offSub;
691
692 vm_object_reference(pMemToMapFreeBSD->pObject);
693 rc = vm_map_find(kernel_map, /* Map to insert the object in */
694 pMemToMapFreeBSD->pObject, /* Object to map */
695 offSub, /* Start offset in the object */
696 &Addr, /* Start address IN/OUT */
697 cbSub, /* Size of the mapping */
698#if __FreeBSD_version >= 1000055
699 0, /* Upper bound of mapping */
700#endif
701 VMFS_ANY_SPACE, /* Whether a suitable address should be searched for first */
702 ProtectionFlags, /* protection flags */
703 VM_PROT_ALL, /* Maximum protection flags */
704 0); /* copy-on-write and similar flags */
705
706 if (rc == KERN_SUCCESS)
707 {
708 rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
709 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
710
711 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD), RTR0MEMOBJTYPE_MAPPING,
712 (void *)Addr, cbSub, pszTag);
713 if (pMemFreeBSD)
714 {
715 Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr);
716 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
717 *ppMem = &pMemFreeBSD->Core;
718 return VINF_SUCCESS;
719 }
720 rc = vm_map_remove(kernel_map, Addr, Addr + cbSub);
721 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
722 }
723 else
724 vm_object_deallocate(pMemToMapFreeBSD->pObject);
725
726 return VERR_NO_MEMORY;
727}
728
729
730DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
731 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
732{
733 /*
734 * Check for unsupported stuff.
735 */
736 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
737 if (uAlignment > PAGE_SIZE)
738 return VERR_NOT_SUPPORTED;
739 Assert(!offSub || cbSub);
740
741 int rc;
742 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
743 struct proc *pProc = (struct proc *)R0Process;
744 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
745
746 /* calc protection */
747 vm_prot_t ProtectionFlags = 0;
748 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
749 ProtectionFlags = VM_PROT_NONE;
750 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
751 ProtectionFlags |= VM_PROT_READ;
752 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
753 ProtectionFlags |= VM_PROT_WRITE;
754 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
755 ProtectionFlags |= VM_PROT_EXECUTE;
756
757 /* calc mapping address */
758 vm_offset_t AddrR3;
759 if (R3PtrFixed == (RTR3PTR)-1)
760 {
761 /** @todo is this needed?. */
762 PROC_LOCK(pProc);
763 AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + MY_LIM_MAX_PROC(pProc, RLIMIT_DATA));
764 PROC_UNLOCK(pProc);
765 }
766 else
767 AddrR3 = (vm_offset_t)R3PtrFixed;
768
769 if (cbSub == 0)
770 cbSub = pMemToMap->cb - offSub;
771
772 /* Insert the pObject in the map. */
773 vm_object_reference(pMemToMapFreeBSD->pObject);
774 rc = vm_map_find(pProcMap, /* Map to insert the object in */
775 pMemToMapFreeBSD->pObject, /* Object to map */
776 offSub, /* Start offset in the object */
777 &AddrR3, /* Start address IN/OUT */
778 cbSub, /* Size of the mapping */
779#if __FreeBSD_version >= 1000055
780 0, /* Upper bound of the mapping */
781#endif
782 R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
783 /* Whether a suitable address should be searched for first */
784 ProtectionFlags, /* protection flags */
785 VM_PROT_ALL, /* Maximum protection flags */
786 0); /* copy-on-write and similar flags */
787
788 if (rc == KERN_SUCCESS)
789 {
790 rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
791 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
792
793 rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE);
794 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
795
796 /*
797 * Create a mapping object for it.
798 */
799 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD), RTR0MEMOBJTYPE_MAPPING,
800 (void *)AddrR3, pMemToMap->cb, pszTag);
801 if (pMemFreeBSD)
802 {
803 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
804 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
805 *ppMem = &pMemFreeBSD->Core;
806 return VINF_SUCCESS;
807 }
808
809 rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb);
810 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
811 }
812 else
813 vm_object_deallocate(pMemToMapFreeBSD->pObject);
814
815 return VERR_NO_MEMORY;
816}
817
818
819DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
820{
821 vm_prot_t ProtectionFlags = 0;
822 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
823 vm_offset_t AddrEnd = AddrStart + cbSub;
824 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
825
826 if (!pVmMap)
827 return VERR_NOT_SUPPORTED;
828
829 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
830 ProtectionFlags = VM_PROT_NONE;
831 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
832 ProtectionFlags |= VM_PROT_READ;
833 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
834 ProtectionFlags |= VM_PROT_WRITE;
835 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
836 ProtectionFlags |= VM_PROT_EXECUTE;
837
838 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
839 if (krc == KERN_SUCCESS)
840 return VINF_SUCCESS;
841
842 return VERR_NOT_SUPPORTED;
843}
844
845
846DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
847{
848 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
849
850 switch (pMemFreeBSD->Core.enmType)
851 {
852 case RTR0MEMOBJTYPE_LOCK:
853 {
854 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
855 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
856 {
857 /* later */
858 return NIL_RTHCPHYS;
859 }
860
861 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
862
863 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
864 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
865 pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
866
867 return pmap_extract(pPhysicalMap, pb);
868 }
869
870 case RTR0MEMOBJTYPE_MAPPING:
871 {
872 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
873
874 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
875 {
876 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
877 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
878 pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
879
880 return pmap_extract(pPhysicalMap, pb);
881 }
882 return vtophys(pb);
883 }
884
885 case RTR0MEMOBJTYPE_PAGE:
886 case RTR0MEMOBJTYPE_LOW:
887 case RTR0MEMOBJTYPE_PHYS_NC:
888 {
889 RTHCPHYS addr;
890
891 VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
892 addr = VM_PAGE_TO_PHYS(vm_page_lookup(pMemFreeBSD->pObject, iPage));
893 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
894 return addr;
895 }
896
897 case RTR0MEMOBJTYPE_PHYS:
898 return pMemFreeBSD->Core.u.Cont.Phys + ptoa(iPage);
899
900 case RTR0MEMOBJTYPE_CONT:
901 return pMemFreeBSD->Core.u.Phys.PhysBase + ptoa(iPage);
902
903 case RTR0MEMOBJTYPE_RES_VIRT:
904 default:
905 return NIL_RTHCPHYS;
906 }
907}
908
909
910DECLHIDDEN(int) rtR0MemObjNativeZeroInitWithoutMapping(PRTR0MEMOBJINTERNAL pMem)
911{
912 PRTR0MEMOBJFREEBSD const pMemFreeBsd = (PRTR0MEMOBJFREEBSD)pMem;
913 size_t const cPages = pMemSolaris->Core.cb >> PAGE_SHIFT;
914 size_t iPage;
915 for (iPage = 0; iPage < cPages; iPage++)
916 {
917 void *pvPage;
918
919 /* Get the physical address of the page. */
920 RTHCPHYS const HCPhys = rtR0MemObjNativeGetPagePhysAddr(&pMemFreeBsd->Core, iPage);
921 AssertReturn(HCPhys != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
922 Assert(!(HCPhys & PAGE_OFFSET_MASK));
923
924 /* Map it. */
925 pvPage = (void *)(uintptr_t)pmap_map(NULL, HCPhys, (HCPhys | PAGE_OFFSET_MASK) + 1, VM_PROT_WRITE | VM_PROT_READ);
926 AssertPtrReturn(pvPage, VERR_INTERNAL_ERROR_3);
927
928 /* Zero it. */
929 RT_BZERO(pvPage, PAGE_SIZE);
930 }
931 return VINF_SUCCESS;
932}
933
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette