VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 207

Last change on this file since 207 was 207, checked in by vboxsync, 18 years ago

cleaning up some header stuff.

  • Property svn:keywords set to Id
File size: 23.8 KB
Line 
1/* $Id: memobj-r0drv.cpp 207 2007-01-21 10:42:48Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
27#include <iprt/memobj.h>
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/err.h>
31#include <iprt/log.h>
32#include <iprt/param.h>
33#include "internal/memobj.h"
34
35
36/**
37 * Internal function for allocating a new memory object.
38 *
39 * @returns The allocated and initialized handle.
40 * @param cbSelf The size of the memory object handle. 0 mean default size.
41 * @param enmType The memory object type.
42 * @param pv The memory object mapping.
43 * @param cb The size of the memory object.
44 */
45PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
46{
47 PRTR0MEMOBJINTERNAL pNew;
48
49 /* validate the size */
50 if (!cbSelf)
51 cbSelf = sizeof(*pNew);
52 Assert(cbSelf >= sizeof(*pNew));
53
54 /*
55 * Allocate and initialize the object.
56 */
57 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cb);
58 if (pNew)
59 {
60 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
61 pNew->cbSelf = cbSelf;
62 pNew->enmType = enmType;
63 pNew->cb = cb;
64 pNew->pv = pv;
65 }
66 return pNew;
67}
68
69
70/**
71 * Links a mapping object to a primary object.
72 *
73 * @returns IPRT status code.
74 * @retval VINF_SUCCESS on success.
75 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
76 * @param pParent The parent (primary) memory object.
77 * @param pChild The child (mapping) memory object.
78 */
79static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
80{
81 /* sanity */
82 Assert(rtR0MemObjIsMapping(pChild));
83 Assert(!rtR0MemObjIsMapping(pParent));
84
85 /* expand the array? */
86 const uint32_t i = pParent->uRel.Parent.cMappings;
87 if (i >= pParent->uRel.Parent.cMappingsAllocated)
88 {
89 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
90 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
91 if (!pv)
92 return VERR_NO_MEMORY;
93 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
94 Assert(i == pParent->uRel.Parent.cMappings);
95 }
96
97 /* do the linking. */
98 pParent->uRel.Parent.papMappings[i] = pChild;
99 pChild->uRel.Child.pParent = pParent;
100
101 return VINF_SUCCESS;
102}
103
104
105/**
106 * Checks if this is mapping or not.
107 *
108 * @returns true if it's a mapping, otherwise false.
109 * @param MemObj The ring-0 memory object handle.
110 */
111RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
112{
113 /* Validate the object handle. */
114 AssertPtrReturn(MemObj, false);
115 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
116 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, false);
117 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, false);
118
119 /* hand it on to the inlined worker. */
120 return rtR0MemObjIsMapping(pMem);
121}
122
123
124/**
125 * Gets the address of a ring-0 memory object.
126 *
127 * @returns The address of the memory object.
128 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
129 * @param MemObj The ring-0 memory object handle.
130 */
131RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
132{
133 /* Validate the object handle. */
134 AssertPtrReturn(MemObj, 0);
135 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
136 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
137 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
138
139 /* return the mapping address. */
140 return pMem->pv;
141}
142
143
144/**
145 * Gets the size of a ring-0 memory object.
146 *
147 * @returns The address of the memory object.
148 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
149 * @param MemObj The ring-0 memory object handle.
150 */
151RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
152{
153 /* Validate the object handle. */
154 AssertPtrReturn(MemObj, 0);
155 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
156 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
157 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
158
159 /* return the size. */
160 return pMem->cb;
161}
162
163
164/**
165 * Get the physical address of an page in the memory object.
166 *
167 * @returns The physical address.
168 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
169 * @returns NIL_RTHCPHYS if the iPage is out of range.
170 * @returns NIL_RTHCPHYS if the object handle isn't valid.
171 * @param MemObj The ring-0 memory object handle.
172 * @param iPage The page number within the object.
173 */
174RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsigned iPage)
175{
176 /* Validate the object handle. */
177 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
178 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
179 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
180 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
181 const unsigned cPages = (pMem->cb >> PAGE_SHIFT);
182 if (iPage >= cPages)
183 {
184 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
185 if (iPage == cPages)
186 return NIL_RTHCPHYS;
187 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
188 }
189
190 /* return the size. */
191 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
192}
193
194
195/**
196 * Frees a ring-0 memory object.
197 *
198 * @returns IPRT status code.
199 * @retval VERR_INVALID_HANDLE if
200 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
201 * @param fFreeMappings Whether or not to free mappings of the object.
202 */
203RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
204{
205 /*
206 * Validate the object handle.
207 */
208 if (MemObj == NIL_RTR0MEMOBJ)
209 return VINF_SUCCESS;
210 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
211 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
212 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
213 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
214
215 /*
216 * Deal with mapings according to fFreeMappings.
217 */
218 if ( !rtR0MemObjIsMapping(pMem)
219 && pMem->uRel.Parent.cMappings > 0)
220 {
221 /* fail if not requested to free mappings. */
222 if (!fFreeMappings)
223 return VERR_MEMORY_BUSY;
224
225 while (pMem->uRel.Parent.cMappings > 0)
226 {
227 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
228 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
229
230 /* sanity checks. */
231 AssertPtr(pChild);
232 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
233 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
234 AssertFatal(rtR0MemObjIsMapping(pChild));
235
236 /* free the mapping. */
237 int rc = rtR0MemObjNativeFree(pChild);
238 if (RT_FAILURE(rc))
239 {
240 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
241 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
242 return rc;
243 }
244 }
245 }
246
247 /*
248 * Free this object.
249 */
250 int rc = rtR0MemObjNativeFree(pMem);
251 if (RT_SUCCESS(rc))
252 {
253 /*
254 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
255 */
256 if (rtR0MemObjIsMapping(pMem))
257 {
258 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
259
260 /* sanity checks */
261 AssertPtr(pParent);
262 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
263 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
264 AssertFatal(!rtR0MemObjIsMapping(pParent));
265
266 /* locate and remove from the array of mappings. */
267 uint32_t i = pParent->uRel.Parent.cMappings;
268 while (i-- > 0)
269 {
270 if (pParent->uRel.Parent.papMappings[i] == pMem)
271 {
272 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
273 break;
274 }
275 }
276 Assert(i != UINT32_MAX);
277 }
278 else
279 Assert(pMem->uRel.Parent.cMappings == 0);
280
281 /*
282 * Finally, destroy the handle.
283 */
284 pMem->u32Magic++;
285 pMem->enmType = RTR0MEMOBJTYPE_END;
286 if (!rtR0MemObjIsMapping(pMem))
287 RTMemFree(pMem->uRel.Parent.papMappings);
288 RTMemFree(pMem);
289 }
290 else
291 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
292 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
293 return rc;
294}
295
296
297
298/**
299 * Allocates page aligned virtual kernel memory.
300 *
301 * The memory is taken from a non paged (= fixed physical memory backing) pool.
302 *
303 * @returns IPRT status code.
304 * @param pMemObj Where to store the ring-0 memory object handle.
305 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
306 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
307 */
308RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
309{
310 /* sanity checks. */
311 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
312 *pMemObj = NIL_RTR0MEMOBJ;
313 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
314 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
315 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
316
317 /* do the allocation. */
318 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
319}
320
321
322/**
323 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
324 *
325 * The physical memory backing the allocation is fixed.
326 *
327 * @returns IPRT status code.
328 * @param pMemObj Where to store the ring-0 memory object handle.
329 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
330 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
331 */
332RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
333{
334 /* sanity checks. */
335 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
336 *pMemObj = NIL_RTR0MEMOBJ;
337 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
338 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
339 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
340
341 /* do the allocation. */
342 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
343}
344
345
346/**
347 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
348 *
349 * The physical memory backing the allocation is fixed.
350 *
351 * @returns IPRT status code.
352 * @param pMemObj Where to store the ring-0 memory object handle.
353 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
354 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
355 */
356RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
357{
358 /* sanity checks. */
359 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
360 *pMemObj = NIL_RTR0MEMOBJ;
361 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
362 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
363 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
364
365 /* do the allocation. */
366 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
367}
368
369
370/**
371 * Locks a range of user virtual memory.
372 *
373 * @returns IPRT status code.
374 * @param pMemObj Where to store the ring-0 memory object handle.
375 * @param pv User virtual address. This is rounded down to a page boundrary.
376 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
377 *
378 * @remark RTR0MemObjGetAddress() will return the rounded down address.
379 */
380RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
381{
382 /* sanity checks. */
383 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
384 *pMemObj = NIL_RTR0MEMOBJ;
385 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
386 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
387 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
388 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
389
390 /* do the allocation. */
391 return rtR0MemObjNativeLockUser(pMemObj, pvAligned, cbAligned);
392}
393
394
395/**
396 * Locks a range of kernel virtual memory.
397 *
398 * @returns IPRT status code.
399 * @param pMemObj Where to store the ring-0 memory object handle.
400 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
401 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
402 *
403 * @remark RTR0MemObjGetAddress() will return the rounded down address.
404 */
405RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
406{
407 /* sanity checks. */
408 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
409 *pMemObj = NIL_RTR0MEMOBJ;
410 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
411 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
412 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
413 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
414 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
415
416 /* do the allocation. */
417 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
418}
419
420
421/**
422 * Allocates page aligned physical memory without (necessarily) any kernel mapping.
423 *
424 * @returns IPRT status code.
425 * @param pMemObj Where to store the ring-0 memory object handle.
426 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
427 * @param PhysHighest The highest permittable address (inclusive).
428 * Pass NIL_RTHCPHYS if any address is acceptable.
429 */
430RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
431{
432 /* sanity checks. */
433 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
434 *pMemObj = NIL_RTR0MEMOBJ;
435 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
436 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
437 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
438
439 /* do the allocation. */
440 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
441}
442
443
444/**
445 * Creates a page aligned, contiguous, physical memory object.
446 *
447 * No physical memory is allocated, we trust you do know what you're doing.
448 *
449 * @returns IPRT status code.
450 * @param pMemObj Where to store the ring-0 memory object handle.
451 * @param Phys The physical address to start at. This is rounded down to the
452 * nearest page boundrary.
453 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
454 */
455RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
456{
457 /* sanity checks. */
458 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
459 *pMemObj = NIL_RTR0MEMOBJ;
460 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
461 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
462 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
463 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
464 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
465
466 /* do the allocation. */
467 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
468}
469
470
471/**
472 * Reserves kernel virtual address space.
473 *
474 * @returns IPRT status code.
475 * @param pMemObj Where to store the ring-0 memory object handle.
476 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
477 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
478 * @param uAlignment The alignment of the reserved memory.
479 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
480 */
481RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
482{
483 /* sanity checks. */
484 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
485 *pMemObj = NIL_RTR0MEMOBJ;
486 if (uAlignment == 0)
487 uAlignment = PAGE_SIZE;
488 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
489 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
490 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
491 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
492 if (pvFixed != (void *)-1)
493 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
494
495 /* do the reservation. */
496 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
497}
498
499
500/**
501 * Reserves user virtual address space in the current process.
502 *
503 * @returns IPRT status code.
504 * @param pMemObj Where to store the ring-0 memory object handle.
505 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
506 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
507 * @param uAlignment The alignment of the reserved memory.
508 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
509 */
510RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
511{
512 /* sanity checks. */
513 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
514 *pMemObj = NIL_RTR0MEMOBJ;
515 if (uAlignment == 0)
516 uAlignment = PAGE_SIZE;
517 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
518 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
519 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
520 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
521 if (pvFixed != (void *)-1)
522 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
523
524 /* do the reservation. */
525 return rtR0MemObjNativeReserveUser(pMemObj, pvFixed, cbAligned, uAlignment);
526}
527
528
529/**
530 * Maps a memory object into kernel virtual address space.
531 *
532 * @returns IPRT status code.
533 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
534 * @param MemObjToMap The object to be map.
535 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
536 * @param uAlignment The alignment of the reserved memory.
537 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
538 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
539 */
540RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, PRTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
541{
542 /* sanity checks. */
543 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
544 *pMemObj = NIL_RTR0MEMOBJ;
545 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
546 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
547 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
548 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
549 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
550 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
551 if (uAlignment == 0)
552 uAlignment = PAGE_SIZE;
553 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
554 if (pvFixed != (void *)-1)
555 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
556 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
557 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
558
559
560 /* do the mapping. */
561 PRTR0MEMOBJINTERNAL pNew;
562 int rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
563 if (RT_SUCCESS(rc))
564 {
565 /* link it. */
566 rc = rtR0MemObjLink(pMemToMap, pNew);
567 if (RT_SUCCESS(rc))
568 *pMemObj = pNew;
569 else
570 {
571 /* damn, out of memory. bail out. */
572 int rc2 = rtR0MemObjNativeFree(pNew);
573 AssertRC(rc2);
574 pNew->u32Magic++;
575 pNew->enmType = RTR0MEMOBJTYPE_END;
576 RTMemFree(pNew);
577 }
578 }
579
580 return rc;
581}
582
583
584/**
585 * Maps a memory object into user virtual address space in the current process.
586 *
587 * @returns IPRT status code.
588 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
589 * @param MemObjToMap The object to be map.
590 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
591 * @param uAlignment The alignment of the reserved memory.
592 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
593 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
594 */
595RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
596{
597 /* sanity checks. */
598 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
599 *pMemObj = NIL_RTR0MEMOBJ;
600 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
601 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
602 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
603 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
604 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
605 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
606 if (uAlignment == 0)
607 uAlignment = PAGE_SIZE;
608 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
609 if (pvFixed != (void *)-1)
610 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
611 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
612 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
613
614
615 /* do the mapping. */
616 PRTR0MEMOBJINTERNAL pNew;
617 int rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
618 if (RT_SUCCESS(rc))
619 {
620 /* link it. */
621 rc = rtR0MemObjLink(pMemToMap, pNew);
622 if (RT_SUCCESS(rc))
623 *pMemObj = pNew;
624 else
625 {
626 /* damn, out of memory. bail out. */
627 int rc2 = rtR0MemObjNativeFree(pNew);
628 AssertRC(rc2);
629 pNew->u32Magic++;
630 pNew->enmType = RTR0MEMOBJTYPE_END;
631 RTMemFree(pNew);
632 }
633 }
634
635 return rc;
636}
637
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette