VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 272

Last change on this file since 272 was 217, checked in by vboxsync, 18 years ago

hacking darwin memory objects.

  • Property svn:keywords set to Id
File size: 24.4 KB
Line 
1/* $Id: memobj-r0drv.cpp 217 2007-01-21 21:41:29Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
27#include <iprt/memobj.h>
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/err.h>
31#include <iprt/log.h>
32#include <iprt/param.h>
33#include "internal/memobj.h"
34
35
36/**
37 * Internal function for allocating a new memory object.
38 *
39 * @returns The allocated and initialized handle.
40 * @param cbSelf The size of the memory object handle. 0 mean default size.
41 * @param enmType The memory object type.
42 * @param pv The memory object mapping.
43 * @param cb The size of the memory object.
44 */
45PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
46{
47 PRTR0MEMOBJINTERNAL pNew;
48
49 /* validate the size */
50 if (!cbSelf)
51 cbSelf = sizeof(*pNew);
52 Assert(cbSelf >= sizeof(*pNew));
53
54 /*
55 * Allocate and initialize the object.
56 */
57 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cb);
58 if (pNew)
59 {
60 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
61 pNew->cbSelf = cbSelf;
62 pNew->enmType = enmType;
63 pNew->cb = cb;
64 pNew->pv = pv;
65 }
66 return pNew;
67}
68
69
70/**
71 * Links a mapping object to a primary object.
72 *
73 * @returns IPRT status code.
74 * @retval VINF_SUCCESS on success.
75 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
76 * @param pParent The parent (primary) memory object.
77 * @param pChild The child (mapping) memory object.
78 */
79static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
80{
81 /* sanity */
82 Assert(rtR0MemObjIsMapping(pChild));
83 Assert(!rtR0MemObjIsMapping(pParent));
84
85 /* expand the array? */
86 const uint32_t i = pParent->uRel.Parent.cMappings;
87 if (i >= pParent->uRel.Parent.cMappingsAllocated)
88 {
89 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
90 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
91 if (!pv)
92 return VERR_NO_MEMORY;
93 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
94 pParent->uRel.Parent.cMappingsAllocated = i + 32;
95 Assert(i == pParent->uRel.Parent.cMappings);
96 }
97
98 /* do the linking. */
99 pParent->uRel.Parent.papMappings[i] = pChild;
100 pParent->uRel.Parent.cMappings++;
101 pChild->uRel.Child.pParent = pParent;
102
103 return VINF_SUCCESS;
104}
105
106
107/**
108 * Checks if this is mapping or not.
109 *
110 * @returns true if it's a mapping, otherwise false.
111 * @param MemObj The ring-0 memory object handle.
112 */
113RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
114{
115 /* Validate the object handle. */
116 AssertPtrReturn(MemObj, false);
117 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
118 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, false);
119 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, false);
120
121 /* hand it on to the inlined worker. */
122 return rtR0MemObjIsMapping(pMem);
123}
124
125
126/**
127 * Gets the address of a ring-0 memory object.
128 *
129 * @returns The address of the memory object.
130 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
131 * @param MemObj The ring-0 memory object handle.
132 */
133RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
134{
135 /* Validate the object handle. */
136 AssertPtrReturn(MemObj, 0);
137 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
138 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
139 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
140
141 /* return the mapping address. */
142 return pMem->pv;
143}
144
145
146/**
147 * Gets the size of a ring-0 memory object.
148 *
149 * @returns The address of the memory object.
150 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
151 * @param MemObj The ring-0 memory object handle.
152 */
153RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
154{
155 /* Validate the object handle. */
156 AssertPtrReturn(MemObj, 0);
157 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
158 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
159 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
160
161 /* return the size. */
162 return pMem->cb;
163}
164
165
166/**
167 * Get the physical address of an page in the memory object.
168 *
169 * @returns The physical address.
170 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
171 * @returns NIL_RTHCPHYS if the iPage is out of range.
172 * @returns NIL_RTHCPHYS if the object handle isn't valid.
173 * @param MemObj The ring-0 memory object handle.
174 * @param iPage The page number within the object.
175 */
176RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsigned iPage)
177{
178 /* Validate the object handle. */
179 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
180 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
181 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
182 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
183 const unsigned cPages = (pMem->cb >> PAGE_SHIFT);
184 if (iPage >= cPages)
185 {
186 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
187 if (iPage == cPages)
188 return NIL_RTHCPHYS;
189 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
190 }
191
192 /*
193 * We know the address of physically contiguous allocations and mappings.
194 */
195 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
196 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
197 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
198 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
199
200 /*
201 * Do the job.
202 */
203 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
204}
205
206
207/**
208 * Frees a ring-0 memory object.
209 *
210 * @returns IPRT status code.
211 * @retval VERR_INVALID_HANDLE if
212 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
213 * @param fFreeMappings Whether or not to free mappings of the object.
214 */
215RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
216{
217 /*
218 * Validate the object handle.
219 */
220 if (MemObj == NIL_RTR0MEMOBJ)
221 return VINF_SUCCESS;
222 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
223 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
224 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
225 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
226
227 /*
228 * Deal with mapings according to fFreeMappings.
229 */
230 if ( !rtR0MemObjIsMapping(pMem)
231 && pMem->uRel.Parent.cMappings > 0)
232 {
233 /* fail if not requested to free mappings. */
234 if (!fFreeMappings)
235 return VERR_MEMORY_BUSY;
236
237 while (pMem->uRel.Parent.cMappings > 0)
238 {
239 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
240 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
241
242 /* sanity checks. */
243 AssertPtr(pChild);
244 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
245 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
246 AssertFatal(rtR0MemObjIsMapping(pChild));
247
248 /* free the mapping. */
249 int rc = rtR0MemObjNativeFree(pChild);
250 if (RT_FAILURE(rc))
251 {
252 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
253 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
254 return rc;
255 }
256 }
257 }
258
259 /*
260 * Free this object.
261 */
262 int rc = rtR0MemObjNativeFree(pMem);
263 if (RT_SUCCESS(rc))
264 {
265 /*
266 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
267 */
268 if (rtR0MemObjIsMapping(pMem))
269 {
270 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
271
272 /* sanity checks */
273 AssertPtr(pParent);
274 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
275 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
276 AssertFatal(!rtR0MemObjIsMapping(pParent));
277 AssertFatal(pParent->uRel.Parent.cMappings > 0);
278 AssertPtr(pParent->uRel.Parent.papMappings);
279
280 /* locate and remove from the array of mappings. */
281 uint32_t i = pParent->uRel.Parent.cMappings;
282 while (i-- > 0)
283 {
284 if (pParent->uRel.Parent.papMappings[i] == pMem)
285 {
286 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
287 break;
288 }
289 }
290 Assert(i != UINT32_MAX);
291 }
292 else
293 Assert(pMem->uRel.Parent.cMappings == 0);
294
295 /*
296 * Finally, destroy the handle.
297 */
298 pMem->u32Magic++;
299 pMem->enmType = RTR0MEMOBJTYPE_END;
300 if (!rtR0MemObjIsMapping(pMem))
301 RTMemFree(pMem->uRel.Parent.papMappings);
302 RTMemFree(pMem);
303 }
304 else
305 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
306 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
307 return rc;
308}
309
310
311
312/**
313 * Allocates page aligned virtual kernel memory.
314 *
315 * The memory is taken from a non paged (= fixed physical memory backing) pool.
316 *
317 * @returns IPRT status code.
318 * @param pMemObj Where to store the ring-0 memory object handle.
319 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
320 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
321 */
322RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
323{
324 /* sanity checks. */
325 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
326 *pMemObj = NIL_RTR0MEMOBJ;
327 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
328 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
329 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
330
331 /* do the allocation. */
332 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
333}
334
335
336/**
337 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
338 *
339 * The physical memory backing the allocation is fixed.
340 *
341 * @returns IPRT status code.
342 * @param pMemObj Where to store the ring-0 memory object handle.
343 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
344 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
345 */
346RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
347{
348 /* sanity checks. */
349 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
350 *pMemObj = NIL_RTR0MEMOBJ;
351 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
352 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
353 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
354
355 /* do the allocation. */
356 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
357}
358
359
360/**
361 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
362 *
363 * The physical memory backing the allocation is fixed.
364 *
365 * @returns IPRT status code.
366 * @param pMemObj Where to store the ring-0 memory object handle.
367 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
368 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
369 */
370RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
371{
372 /* sanity checks. */
373 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
374 *pMemObj = NIL_RTR0MEMOBJ;
375 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
376 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
377 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
378
379 /* do the allocation. */
380 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
381}
382
383
384/**
385 * Locks a range of user virtual memory.
386 *
387 * @returns IPRT status code.
388 * @param pMemObj Where to store the ring-0 memory object handle.
389 * @param pv User virtual address. This is rounded down to a page boundrary.
390 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
391 *
392 * @remark RTR0MemObjGetAddress() will return the rounded down address.
393 */
394RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
395{
396 /* sanity checks. */
397 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
398 *pMemObj = NIL_RTR0MEMOBJ;
399 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
400 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
401 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
402 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
403
404 /* do the allocation. */
405 return rtR0MemObjNativeLockUser(pMemObj, pvAligned, cbAligned);
406}
407
408
409/**
410 * Locks a range of kernel virtual memory.
411 *
412 * @returns IPRT status code.
413 * @param pMemObj Where to store the ring-0 memory object handle.
414 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
415 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
416 *
417 * @remark RTR0MemObjGetAddress() will return the rounded down address.
418 */
419RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
420{
421 /* sanity checks. */
422 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
423 *pMemObj = NIL_RTR0MEMOBJ;
424 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
425 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
426 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
427 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
428 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
429
430 /* do the allocation. */
431 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
432}
433
434
435/**
436 * Allocates page aligned physical memory without (necessarily) any kernel mapping.
437 *
438 * @returns IPRT status code.
439 * @param pMemObj Where to store the ring-0 memory object handle.
440 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
441 * @param PhysHighest The highest permittable address (inclusive).
442 * Pass NIL_RTHCPHYS if any address is acceptable.
443 */
444RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
445{
446 /* sanity checks. */
447 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
448 *pMemObj = NIL_RTR0MEMOBJ;
449 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
450 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
451 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
452 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
453
454 /* do the allocation. */
455 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
456}
457
458
459/**
460 * Creates a page aligned, contiguous, physical memory object.
461 *
462 * No physical memory is allocated, we trust you do know what you're doing.
463 *
464 * @returns IPRT status code.
465 * @param pMemObj Where to store the ring-0 memory object handle.
466 * @param Phys The physical address to start at. This is rounded down to the
467 * nearest page boundrary.
468 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
469 */
470RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
471{
472 /* sanity checks. */
473 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
474 *pMemObj = NIL_RTR0MEMOBJ;
475 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
476 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
477 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
478 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
479 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
480
481 /* do the allocation. */
482 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
483}
484
485
486/**
487 * Reserves kernel virtual address space.
488 *
489 * @returns IPRT status code.
490 * @param pMemObj Where to store the ring-0 memory object handle.
491 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
492 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
493 * @param uAlignment The alignment of the reserved memory.
494 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
495 */
496RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
497{
498 /* sanity checks. */
499 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
500 *pMemObj = NIL_RTR0MEMOBJ;
501 if (uAlignment == 0)
502 uAlignment = PAGE_SIZE;
503 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
504 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
505 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
506 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
507 if (pvFixed != (void *)-1)
508 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
509
510 /* do the reservation. */
511 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
512}
513
514
515/**
516 * Reserves user virtual address space in the current process.
517 *
518 * @returns IPRT status code.
519 * @param pMemObj Where to store the ring-0 memory object handle.
520 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
521 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
522 * @param uAlignment The alignment of the reserved memory.
523 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
524 */
525RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
526{
527 /* sanity checks. */
528 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
529 *pMemObj = NIL_RTR0MEMOBJ;
530 if (uAlignment == 0)
531 uAlignment = PAGE_SIZE;
532 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
533 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
534 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
535 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
536 if (pvFixed != (void *)-1)
537 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
538
539 /* do the reservation. */
540 return rtR0MemObjNativeReserveUser(pMemObj, pvFixed, cbAligned, uAlignment);
541}
542
543
544/**
545 * Maps a memory object into kernel virtual address space.
546 *
547 * @returns IPRT status code.
548 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
549 * @param MemObjToMap The object to be map.
550 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
551 * @param uAlignment The alignment of the reserved memory.
552 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
553 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
554 */
555RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, PRTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
556{
557 /* sanity checks. */
558 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
559 *pMemObj = NIL_RTR0MEMOBJ;
560 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
561 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
562 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
563 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
564 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
565 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
566 if (uAlignment == 0)
567 uAlignment = PAGE_SIZE;
568 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
569 if (pvFixed != (void *)-1)
570 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
571 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
572 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
573
574
575 /* do the mapping. */
576 PRTR0MEMOBJINTERNAL pNew;
577 int rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
578 if (RT_SUCCESS(rc))
579 {
580 /* link it. */
581 rc = rtR0MemObjLink(pMemToMap, pNew);
582 if (RT_SUCCESS(rc))
583 *pMemObj = pNew;
584 else
585 {
586 /* damn, out of memory. bail out. */
587 int rc2 = rtR0MemObjNativeFree(pNew);
588 AssertRC(rc2);
589 pNew->u32Magic++;
590 pNew->enmType = RTR0MEMOBJTYPE_END;
591 RTMemFree(pNew);
592 }
593 }
594
595 return rc;
596}
597
598
599/**
600 * Maps a memory object into user virtual address space in the current process.
601 *
602 * @returns IPRT status code.
603 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
604 * @param MemObjToMap The object to be map.
605 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
606 * @param uAlignment The alignment of the reserved memory.
607 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
608 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
609 */
610RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
611{
612 /* sanity checks. */
613 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
614 *pMemObj = NIL_RTR0MEMOBJ;
615 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
616 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
617 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
618 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
619 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
620 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
621 if (uAlignment == 0)
622 uAlignment = PAGE_SIZE;
623 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
624 if (pvFixed != (void *)-1)
625 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
626 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
627 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
628
629
630 /* do the mapping. */
631 PRTR0MEMOBJINTERNAL pNew;
632 int rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
633 if (RT_SUCCESS(rc))
634 {
635 /* link it. */
636 rc = rtR0MemObjLink(pMemToMap, pNew);
637 if (RT_SUCCESS(rc))
638 *pMemObj = pNew;
639 else
640 {
641 /* damn, out of memory. bail out. */
642 int rc2 = rtR0MemObjNativeFree(pNew);
643 AssertRC(rc2);
644 pNew->u32Magic++;
645 pNew->enmType = RTR0MEMOBJTYPE_END;
646 RTMemFree(pNew);
647 }
648 }
649
650 return rc;
651}
652
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette