VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 395

Last change on this file since 395 was 393, checked in by vboxsync, 18 years ago

Duh. my bug - a horrible memory leak.

  • Property svn:keywords set to Id
File size: 25.1 KB
Line 
1/* $Id: memobj-r0drv.cpp 393 2007-01-28 00:01:57Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
27#include <iprt/memobj.h>
28#include <iprt/alloc.h>
29#include <iprt/process.h>
30#include <iprt/assert.h>
31#include <iprt/err.h>
32#include <iprt/log.h>
33#include <iprt/param.h>
34#include "internal/memobj.h"
35
36
37/**
38 * Internal function for allocating a new memory object.
39 *
40 * @returns The allocated and initialized handle.
41 * @param cbSelf The size of the memory object handle. 0 mean default size.
42 * @param enmType The memory object type.
43 * @param pv The memory object mapping.
44 * @param cb The size of the memory object.
45 */
46PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
47{
48 PRTR0MEMOBJINTERNAL pNew;
49
50 /* validate the size */
51 if (!cbSelf)
52 cbSelf = sizeof(*pNew);
53 Assert(cbSelf >= sizeof(*pNew));
54
55 /*
56 * Allocate and initialize the object.
57 */
58 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
59 if (pNew)
60 {
61 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
62 pNew->cbSelf = cbSelf;
63 pNew->enmType = enmType;
64 pNew->cb = cb;
65 pNew->pv = pv;
66 }
67 return pNew;
68}
69
70
71/**
72 * Links a mapping object to a primary object.
73 *
74 * @returns IPRT status code.
75 * @retval VINF_SUCCESS on success.
76 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
77 * @param pParent The parent (primary) memory object.
78 * @param pChild The child (mapping) memory object.
79 */
80static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
81{
82 /* sanity */
83 Assert(rtR0MemObjIsMapping(pChild));
84 Assert(!rtR0MemObjIsMapping(pParent));
85
86 /* expand the array? */
87 const uint32_t i = pParent->uRel.Parent.cMappings;
88 if (i >= pParent->uRel.Parent.cMappingsAllocated)
89 {
90 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
91 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
92 if (!pv)
93 return VERR_NO_MEMORY;
94 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
95 pParent->uRel.Parent.cMappingsAllocated = i + 32;
96 Assert(i == pParent->uRel.Parent.cMappings);
97 }
98
99 /* do the linking. */
100 pParent->uRel.Parent.papMappings[i] = pChild;
101 pParent->uRel.Parent.cMappings++;
102 pChild->uRel.Child.pParent = pParent;
103
104 return VINF_SUCCESS;
105}
106
107
108/**
109 * Checks if this is mapping or not.
110 *
111 * @returns true if it's a mapping, otherwise false.
112 * @param MemObj The ring-0 memory object handle.
113 */
114RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
115{
116 /* Validate the object handle. */
117 AssertPtrReturn(MemObj, false);
118 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
119 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, false);
120 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, false);
121
122 /* hand it on to the inlined worker. */
123 return rtR0MemObjIsMapping(pMem);
124}
125
126
127/**
128 * Gets the address of a ring-0 memory object.
129 *
130 * @returns The address of the memory object.
131 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
132 * @param MemObj The ring-0 memory object handle.
133 */
134RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
135{
136 /* Validate the object handle. */
137 AssertPtrReturn(MemObj, 0);
138 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
139 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
140 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
141
142 /* return the mapping address. */
143 return pMem->pv;
144}
145
146
147/**
148 * Gets the size of a ring-0 memory object.
149 *
150 * @returns The address of the memory object.
151 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
152 * @param MemObj The ring-0 memory object handle.
153 */
154RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
155{
156 /* Validate the object handle. */
157 AssertPtrReturn(MemObj, 0);
158 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
159 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, 0);
160 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, 0);
161
162 /* return the size. */
163 return pMem->cb;
164}
165
166
167/**
168 * Get the physical address of an page in the memory object.
169 *
170 * @returns The physical address.
171 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
172 * @returns NIL_RTHCPHYS if the iPage is out of range.
173 * @returns NIL_RTHCPHYS if the object handle isn't valid.
174 * @param MemObj The ring-0 memory object handle.
175 * @param iPage The page number within the object.
176 */
177RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsigned iPage)
178{
179 /* Validate the object handle. */
180 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
181 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
182 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
183 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
184 const unsigned cPages = (pMem->cb >> PAGE_SHIFT);
185 if (iPage >= cPages)
186 {
187 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
188 if (iPage == cPages)
189 return NIL_RTHCPHYS;
190 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
191 }
192
193 /*
194 * We know the address of physically contiguous allocations and mappings.
195 */
196 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
197 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
198 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
199 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
200
201 /*
202 * Do the job.
203 */
204 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
205}
206
207
208/**
209 * Frees a ring-0 memory object.
210 *
211 * @returns IPRT status code.
212 * @retval VERR_INVALID_HANDLE if
213 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
214 * @param fFreeMappings Whether or not to free mappings of the object.
215 */
216RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
217{
218 /*
219 * Validate the object handle.
220 */
221 if (MemObj == NIL_RTR0MEMOBJ)
222 return VINF_SUCCESS;
223 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
224 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
225 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
226 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
227
228 /*
229 * Deal with mapings according to fFreeMappings.
230 */
231 if ( !rtR0MemObjIsMapping(pMem)
232 && pMem->uRel.Parent.cMappings > 0)
233 {
234 /* fail if not requested to free mappings. */
235 if (!fFreeMappings)
236 return VERR_MEMORY_BUSY;
237
238 while (pMem->uRel.Parent.cMappings > 0)
239 {
240 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
241 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
242
243 /* sanity checks. */
244 AssertPtr(pChild);
245 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
246 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
247 AssertFatal(rtR0MemObjIsMapping(pChild));
248
249 /* free the mapping. */
250 int rc = rtR0MemObjNativeFree(pChild);
251 if (RT_FAILURE(rc))
252 {
253 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
254 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
255 return rc;
256 }
257 }
258 }
259
260 /*
261 * Free this object.
262 */
263 int rc = rtR0MemObjNativeFree(pMem);
264 if (RT_SUCCESS(rc))
265 {
266 /*
267 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
268 */
269 if (rtR0MemObjIsMapping(pMem))
270 {
271 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
272
273 /* sanity checks */
274 AssertPtr(pParent);
275 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
276 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
277 AssertFatal(!rtR0MemObjIsMapping(pParent));
278 AssertFatal(pParent->uRel.Parent.cMappings > 0);
279 AssertPtr(pParent->uRel.Parent.papMappings);
280
281 /* locate and remove from the array of mappings. */
282 uint32_t i = pParent->uRel.Parent.cMappings;
283 while (i-- > 0)
284 {
285 if (pParent->uRel.Parent.papMappings[i] == pMem)
286 {
287 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
288 break;
289 }
290 }
291 Assert(i != UINT32_MAX);
292 }
293 else
294 Assert(pMem->uRel.Parent.cMappings == 0);
295
296 /*
297 * Finally, destroy the handle.
298 */
299 pMem->u32Magic++;
300 pMem->enmType = RTR0MEMOBJTYPE_END;
301 if (!rtR0MemObjIsMapping(pMem))
302 RTMemFree(pMem->uRel.Parent.papMappings);
303 RTMemFree(pMem);
304 }
305 else
306 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
307 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
308 return rc;
309}
310
311
312
313/**
314 * Allocates page aligned virtual kernel memory.
315 *
316 * The memory is taken from a non paged (= fixed physical memory backing) pool.
317 *
318 * @returns IPRT status code.
319 * @param pMemObj Where to store the ring-0 memory object handle.
320 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
321 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
322 */
323RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
324{
325 /* sanity checks. */
326 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
327 *pMemObj = NIL_RTR0MEMOBJ;
328 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
329 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
330 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
331
332 /* do the allocation. */
333 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
334}
335
336
337/**
338 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
339 *
340 * The physical memory backing the allocation is fixed.
341 *
342 * @returns IPRT status code.
343 * @param pMemObj Where to store the ring-0 memory object handle.
344 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
345 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
346 */
347RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
348{
349 /* sanity checks. */
350 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
351 *pMemObj = NIL_RTR0MEMOBJ;
352 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
353 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
354 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
355
356 /* do the allocation. */
357 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
358}
359
360
361/**
362 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
363 *
364 * The physical memory backing the allocation is fixed.
365 *
366 * @returns IPRT status code.
367 * @param pMemObj Where to store the ring-0 memory object handle.
368 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
369 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
370 */
371RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
372{
373 /* sanity checks. */
374 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
375 *pMemObj = NIL_RTR0MEMOBJ;
376 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
377 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
378 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
379
380 /* do the allocation. */
381 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
382}
383
384
385/**
386 * Locks a range of user virtual memory.
387 *
388 * @returns IPRT status code.
389 * @param pMemObj Where to store the ring-0 memory object handle.
390 * @param pv User virtual address. This is rounded down to a page boundrary.
391 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
392 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
393 *
394 * @remark RTR0MemObjGetAddress() will return the rounded down address.
395 */
396RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, RTR0PROCESS R0Process)
397{
398 /* sanity checks. */
399 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
400 *pMemObj = NIL_RTR0MEMOBJ;
401 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
402 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
403 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
404 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
405 if (R0Process == NIL_RTR0PROCESS)
406 R0Process = RTR0ProcHandleSelf();
407
408 /* do the allocation. */
409 return rtR0MemObjNativeLockUser(pMemObj, pvAligned, cbAligned, R0Process);
410}
411
412
413/**
414 * Locks a range of kernel virtual memory.
415 *
416 * @returns IPRT status code.
417 * @param pMemObj Where to store the ring-0 memory object handle.
418 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
419 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
420 *
421 * @remark RTR0MemObjGetAddress() will return the rounded down address.
422 */
423RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
424{
425 /* sanity checks. */
426 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
427 *pMemObj = NIL_RTR0MEMOBJ;
428 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
429 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
430 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
431 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
432 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
433
434 /* do the allocation. */
435 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
436}
437
438
439/**
440 * Allocates page aligned physical memory without (necessarily) any kernel mapping.
441 *
442 * @returns IPRT status code.
443 * @param pMemObj Where to store the ring-0 memory object handle.
444 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
445 * @param PhysHighest The highest permittable address (inclusive).
446 * Pass NIL_RTHCPHYS if any address is acceptable.
447 */
448RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
449{
450 /* sanity checks. */
451 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
452 *pMemObj = NIL_RTR0MEMOBJ;
453 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
454 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
455 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
456 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
457
458 /* do the allocation. */
459 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
460}
461
462
463/**
464 * Creates a page aligned, contiguous, physical memory object.
465 *
466 * No physical memory is allocated, we trust you do know what you're doing.
467 *
468 * @returns IPRT status code.
469 * @param pMemObj Where to store the ring-0 memory object handle.
470 * @param Phys The physical address to start at. This is rounded down to the
471 * nearest page boundrary.
472 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
473 */
474RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
475{
476 /* sanity checks. */
477 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
478 *pMemObj = NIL_RTR0MEMOBJ;
479 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
480 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
481 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
482 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
483 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
484
485 /* do the allocation. */
486 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
487}
488
489
490/**
491 * Reserves kernel virtual address space.
492 *
493 * @returns IPRT status code.
494 * @param pMemObj Where to store the ring-0 memory object handle.
495 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
496 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
497 * @param uAlignment The alignment of the reserved memory.
498 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
499 */
500RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
501{
502 /* sanity checks. */
503 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
504 *pMemObj = NIL_RTR0MEMOBJ;
505 if (uAlignment == 0)
506 uAlignment = PAGE_SIZE;
507 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
508 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
509 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
510 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
511 if (pvFixed != (void *)-1)
512 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
513
514 /* do the reservation. */
515 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
516}
517
518
519/**
520 * Reserves user virtual address space in the current process.
521 *
522 * @returns IPRT status code.
523 * @param pMemObj Where to store the ring-0 memory object handle.
524 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
525 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
526 * @param uAlignment The alignment of the reserved memory.
527 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
528 * @param R0Process The process to reserve the memory in. NIL_R0PROCESS is an alias for the current one.
529 */
530RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
531{
532 /* sanity checks. */
533 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
534 *pMemObj = NIL_RTR0MEMOBJ;
535 if (uAlignment == 0)
536 uAlignment = PAGE_SIZE;
537 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
538 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
539 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
540 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
541 if (pvFixed != (void *)-1)
542 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
543 if (R0Process == NIL_RTR0PROCESS)
544 R0Process = RTR0ProcHandleSelf();
545
546 /* do the reservation. */
547 return rtR0MemObjNativeReserveUser(pMemObj, pvFixed, cbAligned, uAlignment, R0Process);
548}
549
550
551/**
552 * Maps a memory object into kernel virtual address space.
553 *
554 * @returns IPRT status code.
555 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
556 * @param MemObjToMap The object to be map.
557 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
558 * @param uAlignment The alignment of the reserved memory.
559 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
560 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
561 */
562RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, PRTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
563{
564 /* sanity checks. */
565 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
566 *pMemObj = NIL_RTR0MEMOBJ;
567 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
568 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
569 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
570 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
571 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
572 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
573 if (uAlignment == 0)
574 uAlignment = PAGE_SIZE;
575 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
576 if (pvFixed != (void *)-1)
577 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
578 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
579 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
580
581
582 /* do the mapping. */
583 PRTR0MEMOBJINTERNAL pNew;
584 int rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
585 if (RT_SUCCESS(rc))
586 {
587 /* link it. */
588 rc = rtR0MemObjLink(pMemToMap, pNew);
589 if (RT_SUCCESS(rc))
590 *pMemObj = pNew;
591 else
592 {
593 /* damn, out of memory. bail out. */
594 int rc2 = rtR0MemObjNativeFree(pNew);
595 AssertRC(rc2);
596 pNew->u32Magic++;
597 pNew->enmType = RTR0MEMOBJTYPE_END;
598 RTMemFree(pNew);
599 }
600 }
601
602 return rc;
603}
604
605
606/**
607 * Maps a memory object into user virtual address space in the current process.
608 *
609 * @returns IPRT status code.
610 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
611 * @param MemObjToMap The object to be map.
612 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
613 * @param uAlignment The alignment of the reserved memory.
614 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
615 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
616 * @param R0Process The process to map the memory into. NIL_R0PROCESS is an alias for the current one.
617 */
618RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
619{
620 /* sanity checks. */
621 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
622 *pMemObj = NIL_RTR0MEMOBJ;
623 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
624 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
625 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
626 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
627 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
628 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
629 if (uAlignment == 0)
630 uAlignment = PAGE_SIZE;
631 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
632 if (pvFixed != (void *)-1)
633 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
634 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
635 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
636 if (R0Process == NIL_RTR0PROCESS)
637 R0Process = RTR0ProcHandleSelf();
638
639 /* do the mapping. */
640 PRTR0MEMOBJINTERNAL pNew;
641 int rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, pvFixed, uAlignment, fProt, R0Process);
642 if (RT_SUCCESS(rc))
643 {
644 /* link it. */
645 rc = rtR0MemObjLink(pMemToMap, pNew);
646 if (RT_SUCCESS(rc))
647 *pMemObj = pNew;
648 else
649 {
650 /* damn, out of memory. bail out. */
651 int rc2 = rtR0MemObjNativeFree(pNew);
652 AssertRC(rc2);
653 pNew->u32Magic++;
654 pNew->enmType = RTR0MEMOBJTYPE_END;
655 RTMemFree(pNew);
656 }
657 }
658
659 return rc;
660}
661
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette