VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 1190

Last change on this file since 1190 was 1190, checked in by vboxsync, 18 years ago

Ported IPRT to ring-0 OS/2.

  • Property svn:keywords set to Id
File size: 25.9 KB
Line 
1/* $Id: memobj-r0drv.cpp 1190 2007-03-04 20:42:13Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
27#include <iprt/memobj.h>
28#include <iprt/alloc.h>
29#include <iprt/process.h>
30#include <iprt/assert.h>
31#include <iprt/err.h>
32#include <iprt/log.h>
33#include <iprt/param.h>
34#include "internal/memobj.h"
35
36
37/**
38 * Internal function for allocating a new memory object.
39 *
40 * @returns The allocated and initialized handle.
41 * @param cbSelf The size of the memory object handle. 0 mean default size.
42 * @param enmType The memory object type.
43 * @param pv The memory object mapping.
44 * @param cb The size of the memory object.
45 */
46PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
47{
48 PRTR0MEMOBJINTERNAL pNew;
49
50 /* validate the size */
51 if (!cbSelf)
52 cbSelf = sizeof(*pNew);
53 Assert(cbSelf >= sizeof(*pNew));
54
55 /*
56 * Allocate and initialize the object.
57 */
58 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
59 if (pNew)
60 {
61 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
62 pNew->cbSelf = cbSelf;
63 pNew->enmType = enmType;
64 pNew->cb = cb;
65 pNew->pv = pv;
66 }
67 return pNew;
68}
69
70
71/**
72 * Deletes an incomplete memory object.
73 *
74 * This is for cleaning up after failures during object creation.
75 *
76 * @param pMem The incomplete memory object to delete.
77 */
78void rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
79{
80 if (pMem)
81 {
82 pMem->u32Magic++;
83 pMem->enmType = RTR0MEMOBJTYPE_END;
84 RTMemFree(pMem);
85 }
86}
87
88
89/**
90 * Links a mapping object to a primary object.
91 *
92 * @returns IPRT status code.
93 * @retval VINF_SUCCESS on success.
94 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
95 * @param pParent The parent (primary) memory object.
96 * @param pChild The child (mapping) memory object.
97 */
98static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
99{
100 /* sanity */
101 Assert(rtR0MemObjIsMapping(pChild));
102 Assert(!rtR0MemObjIsMapping(pParent));
103
104 /* expand the array? */
105 const uint32_t i = pParent->uRel.Parent.cMappings;
106 if (i >= pParent->uRel.Parent.cMappingsAllocated)
107 {
108 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
109 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
110 if (!pv)
111 return VERR_NO_MEMORY;
112 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
113 pParent->uRel.Parent.cMappingsAllocated = i + 32;
114 Assert(i == pParent->uRel.Parent.cMappings);
115 }
116
117 /* do the linking. */
118 pParent->uRel.Parent.papMappings[i] = pChild;
119 pParent->uRel.Parent.cMappings++;
120 pChild->uRel.Child.pParent = pParent;
121
122 return VINF_SUCCESS;
123}
124
125
126/**
127 * Checks if this is mapping or not.
128 *
129 * @returns true if it's a mapping, otherwise false.
130 * @param MemObj The ring-0 memory object handle.
131 */
132RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
133{
134 /* Validate the object handle. */
135 AssertPtrReturn(MemObj, false);
136 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
137 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
138 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
139
140 /* hand it on to the inlined worker. */
141 return rtR0MemObjIsMapping(pMem);
142}
143
144
145/**
146 * Gets the address of a ring-0 memory object.
147 *
148 * @returns The address of the memory object.
149 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
150 * @param MemObj The ring-0 memory object handle.
151 */
152RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
153{
154 /* Validate the object handle. */
155 AssertPtrReturn(MemObj, 0);
156 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
157 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
158 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
159
160 /* return the mapping address. */
161 return pMem->pv;
162}
163
164
165/**
166 * Gets the size of a ring-0 memory object.
167 *
168 * @returns The address of the memory object.
169 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
170 * @param MemObj The ring-0 memory object handle.
171 */
172RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
173{
174 /* Validate the object handle. */
175 AssertPtrReturn(MemObj, 0);
176 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
177 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
178 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
179
180 /* return the size. */
181 return pMem->cb;
182}
183
184
185/**
186 * Get the physical address of an page in the memory object.
187 *
188 * @returns The physical address.
189 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
190 * @returns NIL_RTHCPHYS if the iPage is out of range.
191 * @returns NIL_RTHCPHYS if the object handle isn't valid.
192 * @param MemObj The ring-0 memory object handle.
193 * @param iPage The page number within the object.
194 */
195RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsigned iPage)
196{
197 /* Validate the object handle. */
198 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
199 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
200 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
201 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
202 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
203 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
204 const unsigned cPages = (pMem->cb >> PAGE_SHIFT);
205 if (iPage >= cPages)
206 {
207 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
208 if (iPage == cPages)
209 return NIL_RTHCPHYS;
210 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
211 }
212
213 /*
214 * We know the address of physically contiguous allocations and mappings.
215 */
216 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
217 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
218 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
219 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
220
221 /*
222 * Do the job.
223 */
224 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
225}
226
227
228/**
229 * Frees a ring-0 memory object.
230 *
231 * @returns IPRT status code.
232 * @retval VERR_INVALID_HANDLE if
233 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
234 * @param fFreeMappings Whether or not to free mappings of the object.
235 */
236RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
237{
238 /*
239 * Validate the object handle.
240 */
241 if (MemObj == NIL_RTR0MEMOBJ)
242 return VINF_SUCCESS;
243 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
244 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj;
245 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
246 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
247
248 /*
249 * Deal with mapings according to fFreeMappings.
250 */
251 if ( !rtR0MemObjIsMapping(pMem)
252 && pMem->uRel.Parent.cMappings > 0)
253 {
254 /* fail if not requested to free mappings. */
255 if (!fFreeMappings)
256 return VERR_MEMORY_BUSY;
257
258 while (pMem->uRel.Parent.cMappings > 0)
259 {
260 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
261 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
262
263 /* sanity checks. */
264 AssertPtr(pChild);
265 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
266 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
267 AssertFatal(rtR0MemObjIsMapping(pChild));
268
269 /* free the mapping. */
270 int rc = rtR0MemObjNativeFree(pChild);
271 if (RT_FAILURE(rc))
272 {
273 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
274 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
275 return rc;
276 }
277 }
278 }
279
280 /*
281 * Free this object.
282 */
283 int rc = rtR0MemObjNativeFree(pMem);
284 if (RT_SUCCESS(rc))
285 {
286 /*
287 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
288 */
289 if (rtR0MemObjIsMapping(pMem))
290 {
291 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
292
293 /* sanity checks */
294 AssertPtr(pParent);
295 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
296 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
297 AssertFatal(!rtR0MemObjIsMapping(pParent));
298 AssertFatal(pParent->uRel.Parent.cMappings > 0);
299 AssertPtr(pParent->uRel.Parent.papMappings);
300
301 /* locate and remove from the array of mappings. */
302 uint32_t i = pParent->uRel.Parent.cMappings;
303 while (i-- > 0)
304 {
305 if (pParent->uRel.Parent.papMappings[i] == pMem)
306 {
307 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
308 break;
309 }
310 }
311 Assert(i != UINT32_MAX);
312 }
313 else
314 Assert(pMem->uRel.Parent.cMappings == 0);
315
316 /*
317 * Finally, destroy the handle.
318 */
319 pMem->u32Magic++;
320 pMem->enmType = RTR0MEMOBJTYPE_END;
321 if (!rtR0MemObjIsMapping(pMem))
322 RTMemFree(pMem->uRel.Parent.papMappings);
323 RTMemFree(pMem);
324 }
325 else
326 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
327 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
328 return rc;
329}
330
331
332
333/**
334 * Allocates page aligned virtual kernel memory.
335 *
336 * The memory is taken from a non paged (= fixed physical memory backing) pool.
337 *
338 * @returns IPRT status code.
339 * @param pMemObj Where to store the ring-0 memory object handle.
340 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
341 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
342 */
343RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
344{
345 /* sanity checks. */
346 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
347 *pMemObj = NIL_RTR0MEMOBJ;
348 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
349 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
350 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
351
352 /* do the allocation. */
353 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
354}
355
356
357/**
358 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
359 *
360 * The physical memory backing the allocation is fixed.
361 *
362 * @returns IPRT status code.
363 * @param pMemObj Where to store the ring-0 memory object handle.
364 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
365 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
366 */
367RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
368{
369 /* sanity checks. */
370 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
371 *pMemObj = NIL_RTR0MEMOBJ;
372 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
373 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
374 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
375
376 /* do the allocation. */
377 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
378}
379
380
381/**
382 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
383 *
384 * The physical memory backing the allocation is fixed.
385 *
386 * @returns IPRT status code.
387 * @param pMemObj Where to store the ring-0 memory object handle.
388 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
389 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
390 */
391RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
392{
393 /* sanity checks. */
394 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
395 *pMemObj = NIL_RTR0MEMOBJ;
396 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
397 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
398 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
399
400 /* do the allocation. */
401 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
402}
403
404
405/**
406 * Locks a range of user virtual memory.
407 *
408 * @returns IPRT status code.
409 * @param pMemObj Where to store the ring-0 memory object handle.
410 * @param pv User virtual address. This is rounded down to a page boundrary.
411 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
412 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
413 *
414 * @remark RTR0MemObjGetAddress() will return the rounded down address.
415 */
416RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, RTR0PROCESS R0Process)
417{
418 /* sanity checks. */
419 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
420 *pMemObj = NIL_RTR0MEMOBJ;
421 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
422 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
423 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
424 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
425 if (R0Process == NIL_RTR0PROCESS)
426 R0Process = RTR0ProcHandleSelf();
427
428 /* do the allocation. */
429 return rtR0MemObjNativeLockUser(pMemObj, pvAligned, cbAligned, R0Process);
430}
431
432
433/**
434 * Locks a range of kernel virtual memory.
435 *
436 * @returns IPRT status code.
437 * @param pMemObj Where to store the ring-0 memory object handle.
438 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
439 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
440 *
441 * @remark RTR0MemObjGetAddress() will return the rounded down address.
442 */
443RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
444{
445 /* sanity checks. */
446 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
447 *pMemObj = NIL_RTR0MEMOBJ;
448 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
449 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
450 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
451 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
452 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
453
454 /* do the allocation. */
455 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
456}
457
458
459/**
460 * Allocates page aligned physical memory without (necessarily) any kernel mapping.
461 *
462 * @returns IPRT status code.
463 * @param pMemObj Where to store the ring-0 memory object handle.
464 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
465 * @param PhysHighest The highest permittable address (inclusive).
466 * Pass NIL_RTHCPHYS if any address is acceptable.
467 */
468RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
469{
470 /* sanity checks. */
471 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
472 *pMemObj = NIL_RTR0MEMOBJ;
473 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
474 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
475 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
476 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
477
478 /* do the allocation. */
479 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
480}
481
482
483/**
484 * Creates a page aligned, contiguous, physical memory object.
485 *
486 * No physical memory is allocated, we trust you do know what you're doing.
487 *
488 * @returns IPRT status code.
489 * @param pMemObj Where to store the ring-0 memory object handle.
490 * @param Phys The physical address to start at. This is rounded down to the
491 * nearest page boundrary.
492 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
493 */
494RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
495{
496 /* sanity checks. */
497 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
498 *pMemObj = NIL_RTR0MEMOBJ;
499 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
500 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
501 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
502 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
503 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
504
505 /* do the allocation. */
506 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
507}
508
509
510/**
511 * Reserves kernel virtual address space.
512 *
513 * @returns IPRT status code.
514 * @param pMemObj Where to store the ring-0 memory object handle.
515 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
516 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
517 * @param uAlignment The alignment of the reserved memory.
518 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
519 */
520RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
521{
522 /* sanity checks. */
523 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
524 *pMemObj = NIL_RTR0MEMOBJ;
525 if (uAlignment == 0)
526 uAlignment = PAGE_SIZE;
527 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
528 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
529 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
530 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
531 if (pvFixed != (void *)-1)
532 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
533
534 /* do the reservation. */
535 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
536}
537
538
539/**
540 * Reserves user virtual address space in the current process.
541 *
542 * @returns IPRT status code.
543 * @param pMemObj Where to store the ring-0 memory object handle.
544 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
545 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
546 * @param uAlignment The alignment of the reserved memory.
547 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
548 * @param R0Process The process to reserve the memory in. NIL_R0PROCESS is an alias for the current one.
549 */
550RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
551{
552 /* sanity checks. */
553 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
554 *pMemObj = NIL_RTR0MEMOBJ;
555 if (uAlignment == 0)
556 uAlignment = PAGE_SIZE;
557 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
558 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
559 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
560 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
561 if (pvFixed != (void *)-1)
562 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
563 if (R0Process == NIL_RTR0PROCESS)
564 R0Process = RTR0ProcHandleSelf();
565
566 /* do the reservation. */
567 return rtR0MemObjNativeReserveUser(pMemObj, pvFixed, cbAligned, uAlignment, R0Process);
568}
569
570
571/**
572 * Maps a memory object into kernel virtual address space.
573 *
574 * @returns IPRT status code.
575 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
576 * @param MemObjToMap The object to be map.
577 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
578 * @param uAlignment The alignment of the reserved memory.
579 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
580 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
581 */
582RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, PRTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
583{
584 /* sanity checks. */
585 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
586 *pMemObj = NIL_RTR0MEMOBJ;
587 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
588 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
589 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
590 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
591 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
592 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
593 if (uAlignment == 0)
594 uAlignment = PAGE_SIZE;
595 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
596 if (pvFixed != (void *)-1)
597 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
598 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
599 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
600
601
602 /* do the mapping. */
603 PRTR0MEMOBJINTERNAL pNew;
604 int rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
605 if (RT_SUCCESS(rc))
606 {
607 /* link it. */
608 rc = rtR0MemObjLink(pMemToMap, pNew);
609 if (RT_SUCCESS(rc))
610 *pMemObj = pNew;
611 else
612 {
613 /* damn, out of memory. bail out. */
614 int rc2 = rtR0MemObjNativeFree(pNew);
615 AssertRC(rc2);
616 pNew->u32Magic++;
617 pNew->enmType = RTR0MEMOBJTYPE_END;
618 RTMemFree(pNew);
619 }
620 }
621
622 return rc;
623}
624
625
626/**
627 * Maps a memory object into user virtual address space in the current process.
628 *
629 * @returns IPRT status code.
630 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
631 * @param MemObjToMap The object to be map.
632 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
633 * @param uAlignment The alignment of the reserved memory.
634 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
635 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
636 * @param R0Process The process to map the memory into. NIL_R0PROCESS is an alias for the current one.
637 */
638RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
639{
640 /* sanity checks. */
641 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
642 *pMemObj = NIL_RTR0MEMOBJ;
643 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
644 PRTR0MEMOBJINTERNAL pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
645 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
646 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
647 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
648 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
649 if (uAlignment == 0)
650 uAlignment = PAGE_SIZE;
651 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
652 if (pvFixed != (void *)-1)
653 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
654 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
655 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
656 if (R0Process == NIL_RTR0PROCESS)
657 R0Process = RTR0ProcHandleSelf();
658
659 /* do the mapping. */
660 PRTR0MEMOBJINTERNAL pNew;
661 int rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, pvFixed, uAlignment, fProt, R0Process);
662 if (RT_SUCCESS(rc))
663 {
664 /* link it. */
665 rc = rtR0MemObjLink(pMemToMap, pNew);
666 if (RT_SUCCESS(rc))
667 *pMemObj = pNew;
668 else
669 {
670 /* damn, out of memory. bail out. */
671 int rc2 = rtR0MemObjNativeFree(pNew);
672 AssertRC(rc2);
673 pNew->u32Magic++;
674 pNew->enmType = RTR0MEMOBJTYPE_END;
675 RTMemFree(pNew);
676 }
677 }
678
679 return rc;
680}
681
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette