VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c@ 78381

Last change on this file since 78381 was 78120, checked in by vboxsync, 6 years ago

IPRT: Started adding a RTR0MemObjMapUserEx function that takes offSub and cbSub. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.4 KB
Line 
1/* $Id: memobj-r0drv-netbsd.c 78120 2019-04-12 13:20:50Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NetBSD.
4 */
5
6/*
7 * Contributed by knut st. osmundsen, Andriy Gapon, Arto Huusko.
8 *
9 * Copyright (C) 2007-2019 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * The contents of this file may alternatively be used under the terms
20 * of the Common Development and Distribution License Version 1.0
21 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
22 * VirtualBox OSE distribution, in which case the provisions of the
23 * CDDL are applicable instead of those of the GPL.
24 *
25 * You may elect to license modified versions of this file under the
26 * terms and conditions of either the GPL or the CDDL or both.
27 *
28 * --------------------------------------------------------------------
29 *
30 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
31 * Copyright (c) 2011 Andriy Gapon <[email protected]>
32 * Copyright (c) 2014 Arto Huusko
33 *
34 * Permission is hereby granted, free of charge, to any person
35 * obtaining a copy of this software and associated documentation
36 * files (the "Software"), to deal in the Software without
37 * restriction, including without limitation the rights to use,
38 * copy, modify, merge, publish, distribute, sublicense, and/or sell
39 * copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following
41 * conditions:
42 *
43 * The above copyright notice and this permission notice shall be
44 * included in all copies or substantial portions of the Software.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
47 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
48 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
49 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
50 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
51 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
52 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
53 * OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56
57/*********************************************************************************************************************************
58* Header Files *
59*********************************************************************************************************************************/
60#include "the-netbsd-kernel.h"
61
62#include <iprt/memobj.h>
63#include <iprt/mem.h>
64#include <iprt/err.h>
65#include <iprt/assert.h>
66#include <iprt/log.h>
67#include <iprt/param.h>
68#include <iprt/process.h>
69#include "internal/memobj.h"
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75/**
76 * The NetBSD version of the memory object structure.
77 */
78typedef struct RTR0MEMOBJNETBSD
79{
80 /** The core structure. */
81 RTR0MEMOBJINTERNAL Core;
82 size_t size;
83 struct pglist pglist;
84} RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD;
85
86
87typedef struct vm_map* vm_map_t;
88
89/**
90 * Gets the virtual memory map the specified object is mapped into.
91 *
92 * @returns VM map handle on success, NULL if no map.
93 * @param pMem The memory object.
94 */
95static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
96{
97 switch (pMem->enmType)
98 {
99 case RTR0MEMOBJTYPE_PAGE:
100 case RTR0MEMOBJTYPE_LOW:
101 case RTR0MEMOBJTYPE_CONT:
102 return kernel_map;
103
104 case RTR0MEMOBJTYPE_PHYS:
105 case RTR0MEMOBJTYPE_PHYS_NC:
106 return NULL; /* pretend these have no mapping atm. */
107
108 case RTR0MEMOBJTYPE_LOCK:
109 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
110 ? kernel_map
111 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
112
113 case RTR0MEMOBJTYPE_RES_VIRT:
114 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
115 ? kernel_map
116 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
117
118 case RTR0MEMOBJTYPE_MAPPING:
119 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
120 ? kernel_map
121 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
122
123 default:
124 return NULL;
125 }
126}
127
128
129DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
130{
131 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
132 int rc;
133
134 switch (pMemNetBSD->Core.enmType)
135 {
136 case RTR0MEMOBJTYPE_PAGE:
137 {
138 kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
139 break;
140 }
141 case RTR0MEMOBJTYPE_LOW:
142 case RTR0MEMOBJTYPE_CONT:
143 {
144 /* Unmap */
145 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
146 /* Free the virtual space */
147 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
148 /* Free the physical pages */
149 uvm_pglistfree(&pMemNetBSD->pglist);
150 break;
151 }
152 case RTR0MEMOBJTYPE_PHYS:
153 case RTR0MEMOBJTYPE_PHYS_NC:
154 {
155 /* Free the physical pages */
156 uvm_pglistfree(&pMemNetBSD->pglist);
157 break;
158 }
159 case RTR0MEMOBJTYPE_LOCK:
160 if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
161 {
162 uvm_map_pageable(
163 &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map,
164 (vaddr_t)pMemNetBSD->Core.pv,
165 ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb,
166 1, 0);
167 }
168 break;
169 case RTR0MEMOBJTYPE_RES_VIRT:
170 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
171 {
172 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
173 }
174 break;
175 case RTR0MEMOBJTYPE_MAPPING:
176 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
177 {
178 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
179 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
180 }
181 break;
182
183 default:
184 AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType));
185 return VERR_INTERNAL_ERROR;
186 }
187
188 return VINF_SUCCESS;
189}
190
191static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
192 paddr_t VmPhysAddrHigh, bool fContiguous)
193{
194 /* Virtual space first */
195 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
196 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
197 if (virt == 0)
198 return VERR_NO_MEMORY;
199
200 struct pglist *rlist = &pMemNetBSD->pglist;
201
202 int nsegs = fContiguous ? 1 : INT_MAX;
203
204 /* Physical pages */
205 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
206 PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
207 {
208 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
209 return VERR_NO_MEMORY;
210 }
211
212 /* Map */
213 struct vm_page *page;
214 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
215 if (fExecutable)
216 prot |= VM_PROT_EXECUTE;
217 vaddr_t virt2 = virt;
218 TAILQ_FOREACH(page, rlist, pageq.queue)
219 {
220 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
221 virt2 += PAGE_SIZE;
222 }
223
224 pMemNetBSD->Core.pv = (void *)virt;
225 if (fContiguous)
226 {
227 page = TAILQ_FIRST(rlist);
228 pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page);
229 }
230 return VINF_SUCCESS;
231}
232
233DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
234{
235 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
236 RTR0MEMOBJTYPE_PAGE, NULL, cb);
237 if (!pMemNetBSD)
238 return VERR_NO_MEMORY;
239
240 void *pvMem = kmem_alloc(cb, KM_SLEEP);
241 if (RT_UNLIKELY(!pvMem))
242 {
243 rtR0MemObjDelete(&pMemNetBSD->Core);
244 return VERR_NO_PAGE_MEMORY;
245 }
246 if (fExecutable)
247 {
248 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
249 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
250 }
251
252 pMemNetBSD->Core.pv = pvMem;
253 *ppMem = &pMemNetBSD->Core;
254 return VINF_SUCCESS;
255}
256
257
258DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
259{
260 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
261 RTR0MEMOBJTYPE_LOW, NULL, cb);
262 if (!pMemNetBSD)
263 return VERR_NO_MEMORY;
264
265 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
266 if (rc)
267 {
268 rtR0MemObjDelete(&pMemNetBSD->Core);
269 return rc;
270 }
271
272 *ppMem = &pMemNetBSD->Core;
273 return VINF_SUCCESS;
274}
275
276
277DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
278{
279 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
280 RTR0MEMOBJTYPE_CONT, NULL, cb);
281 if (!pMemNetBSD)
282 return VERR_NO_MEMORY;
283
284 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
285 if (rc)
286 {
287 rtR0MemObjDelete(&pMemNetBSD->Core);
288 return rc;
289 }
290
291 *ppMem = &pMemNetBSD->Core;
292 return VINF_SUCCESS;
293}
294
295
296static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
297 size_t cb,
298 RTHCPHYS PhysHighest, size_t uAlignment,
299 bool fContiguous)
300{
301 paddr_t VmPhysAddrHigh;
302
303 /* create the object. */
304 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
305 enmType, NULL, cb);
306 if (!pMemNetBSD)
307 return VERR_NO_MEMORY;
308
309 if (PhysHighest != NIL_RTHCPHYS)
310 VmPhysAddrHigh = PhysHighest;
311 else
312 VmPhysAddrHigh = ~(paddr_t)0;
313
314 int nsegs = fContiguous ? 1 : INT_MAX;
315
316 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
317 if (error)
318 {
319 rtR0MemObjDelete(&pMemNetBSD->Core);
320 return VERR_NO_MEMORY;
321 }
322
323 if (fContiguous)
324 {
325 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
326 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
327 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
328 pMemNetBSD->Core.u.Phys.fAllocated = true;
329 }
330 *ppMem = &pMemNetBSD->Core;
331
332 return VINF_SUCCESS;
333}
334
335
336DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
337{
338 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
339}
340
341
342DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
343{
344 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
345}
346
347
348DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
349{
350 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
351
352 /* create the object. */
353 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
354 if (!pMemNetBSD)
355 return VERR_NO_MEMORY;
356
357 /* there is no allocation here, it needs to be mapped somewhere first. */
358 pMemNetBSD->Core.u.Phys.fAllocated = false;
359 pMemNetBSD->Core.u.Phys.PhysBase = Phys;
360 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
361 TAILQ_INIT(&pMemNetBSD->pglist);
362 *ppMem = &pMemNetBSD->Core;
363 return VINF_SUCCESS;
364}
365
366
367DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
368{
369 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
370 if (!pMemNetBSD)
371 return VERR_NO_MEMORY;
372
373 int rc = uvm_map_pageable(
374 &((struct proc *)R0Process)->p_vmspace->vm_map,
375 R3Ptr,
376 R3Ptr + cb,
377 0, 0);
378 if (rc)
379 {
380 rtR0MemObjDelete(&pMemNetBSD->Core);
381 return VERR_NO_MEMORY;
382 }
383
384 pMemNetBSD->Core.u.Lock.R0Process = R0Process;
385 *ppMem = &pMemNetBSD->Core;
386 return VINF_SUCCESS;
387}
388
389
390DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
391{
392 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
393 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
394 if (!pMemNetBSD)
395 return VERR_NO_MEMORY;
396
397 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
398 pMemNetBSD->Core.pv = pv;
399 *ppMem = &pMemNetBSD->Core;
400 return VINF_SUCCESS;
401}
402
403DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
404{
405 if (pvFixed != (void *)-1)
406 {
407 /* can we support this? or can we assume the virtual space is already reserved? */
408 printf("reserve specified kernel virtual address not supported\n");
409 return VERR_NOT_SUPPORTED;
410 }
411
412 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
413 if (!pMemNetBSD)
414 return VERR_NO_MEMORY;
415
416 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
417 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
418 if (virt == 0)
419 {
420 rtR0MemObjDelete(&pMemNetBSD->Core);
421 return VERR_NO_MEMORY;
422 }
423
424 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
425 pMemNetBSD->Core.pv = (void *)virt;
426 *ppMem = &pMemNetBSD->Core;
427 return VINF_SUCCESS;
428}
429
430
431DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
432{
433 printf("NativeReserveUser\n");
434 return VERR_NOT_SUPPORTED;
435}
436
437
438DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
439 unsigned fProt, size_t offSub, size_t cbSub)
440{
441 if (pvFixed != (void *)-1)
442 {
443 /* can we support this? or can we assume the virtual space is already reserved? */
444 printf("map to specified kernel virtual address not supported\n");
445 return VERR_NOT_SUPPORTED;
446 }
447
448 PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap;
449 if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS)
450 && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC))
451 {
452 printf("memory to map is not physical\n");
453 return VERR_NOT_SUPPORTED;
454 }
455 size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb;
456
457 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz);
458
459 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
460 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
461 if (virt == 0)
462 {
463 rtR0MemObjDelete(&pMemNetBSD->Core);
464 return VERR_NO_MEMORY;
465 }
466
467 vm_prot_t prot = 0;
468
469 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
470 prot |= VM_PROT_READ;
471 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
472 prot |= VM_PROT_WRITE;
473 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
474 prot |= VM_PROT_EXECUTE;
475
476 struct vm_page *page;
477 vaddr_t virt2 = virt;
478 size_t map_pos = 0;
479 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
480 {
481 if (map_pos >= offSub)
482 {
483 if (cbSub > 0 && (map_pos >= offSub + cbSub))
484 break;
485
486 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
487 virt2 += PAGE_SIZE;
488 }
489 map_pos += PAGE_SIZE;
490 }
491
492 pMemNetBSD->Core.pv = (void *)virt;
493 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
494 *ppMem = &pMemNetBSD->Core;
495
496 return VINF_SUCCESS;
497}
498
499
500DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
501 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
502{
503 printf("NativeMapUser\n");
504 return VERR_NOT_SUPPORTED;
505}
506
507
508DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
509{
510 vm_prot_t ProtectionFlags = 0;
511 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub;
512 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem);
513
514 if (!pVmMap)
515 return VERR_NOT_SUPPORTED;
516
517 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
518 ProtectionFlags |= UVM_PROT_R;
519 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
520 ProtectionFlags |= UVM_PROT_W;
521 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
522 ProtectionFlags |= UVM_PROT_X;
523
524 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
525 ProtectionFlags, 0);
526 if (!error)
527 return VINF_SUCCESS;
528
529 return VERR_NOT_SUPPORTED;
530}
531
532
533DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
534{
535 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
536
537 switch (pMemNetBSD->Core.enmType)
538 {
539 case RTR0MEMOBJTYPE_PAGE:
540 case RTR0MEMOBJTYPE_LOW:
541 {
542 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
543 paddr_t pa = 0;
544 pmap_extract(pmap_kernel(), va, &pa);
545 return pa;
546 }
547 case RTR0MEMOBJTYPE_CONT:
548 return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage);
549 case RTR0MEMOBJTYPE_PHYS:
550 return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage);
551 case RTR0MEMOBJTYPE_PHYS_NC:
552 {
553 struct vm_page *page;
554 size_t i = 0;
555 TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue)
556 {
557 if (i == iPage)
558 break;
559 i++;
560 }
561 return VM_PAGE_TO_PHYS(page);
562 }
563 case RTR0MEMOBJTYPE_LOCK:
564 case RTR0MEMOBJTYPE_MAPPING:
565 {
566 pmap_t pmap;
567 if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
568 pmap = pmap_kernel();
569 else
570 pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap;
571 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
572 paddr_t pa = 0;
573 pmap_extract(pmap, va, &pa);
574 return pa;
575 }
576 case RTR0MEMOBJTYPE_RES_VIRT:
577 return NIL_RTHCPHYS;
578 default:
579 return NIL_RTHCPHYS;
580 }
581}
582
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette