VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 4755

Last change on this file since 4755 was 4755, checked in by vboxsync, 17 years ago

Reverse allocation for Windows hosts: physical pages are allocated in the support driver and mapped into user space
VMM: Use locked memory for the MM pagepool structures.

  • Property svn:keywords set to Id Rev
File size: 28.5 KB
Line 
1/* $Revision: 4755 $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
23#include <iprt/memobj.h>
24#include <iprt/alloc.h>
25#include <iprt/process.h>
26#include <iprt/assert.h>
27#include <iprt/err.h>
28#include <iprt/log.h>
29#include <iprt/param.h>
30#include "internal/memobj.h"
31
32
33/**
34 * Internal function for allocating a new memory object.
35 *
36 * @returns The allocated and initialized handle.
37 * @param cbSelf The size of the memory object handle. 0 mean default size.
38 * @param enmType The memory object type.
39 * @param pv The memory object mapping.
40 * @param cb The size of the memory object.
41 */
42PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
43{
44 PRTR0MEMOBJINTERNAL pNew;
45
46 /* validate the size */
47 if (!cbSelf)
48 cbSelf = sizeof(*pNew);
49 Assert(cbSelf >= sizeof(*pNew));
50 Assert(cbSelf == (uint32_t)cbSelf);
51
52 /*
53 * Allocate and initialize the object.
54 */
55 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
56 if (pNew)
57 {
58 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
59 pNew->cbSelf = (uint32_t)cbSelf;
60 pNew->enmType = enmType;
61 pNew->cb = cb;
62 pNew->pv = pv;
63 }
64 return pNew;
65}
66
67
68/**
69 * Deletes an incomplete memory object.
70 *
71 * This is for cleaning up after failures during object creation.
72 *
73 * @param pMem The incomplete memory object to delete.
74 */
75void rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
76{
77 if (pMem)
78 {
79 pMem->u32Magic++;
80 pMem->enmType = RTR0MEMOBJTYPE_END;
81 RTMemFree(pMem);
82 }
83}
84
85
86/**
87 * Links a mapping object to a primary object.
88 *
89 * @returns IPRT status code.
90 * @retval VINF_SUCCESS on success.
91 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
92 * @param pParent The parent (primary) memory object.
93 * @param pChild The child (mapping) memory object.
94 */
95static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
96{
97 uint32_t i;
98
99 /* sanity */
100 Assert(rtR0MemObjIsMapping(pChild));
101 Assert(!rtR0MemObjIsMapping(pParent));
102
103 /* expand the array? */
104 i = pParent->uRel.Parent.cMappings;
105 if (i >= pParent->uRel.Parent.cMappingsAllocated)
106 {
107 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
108 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
109 if (!pv)
110 return VERR_NO_MEMORY;
111 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
112 pParent->uRel.Parent.cMappingsAllocated = i + 32;
113 Assert(i == pParent->uRel.Parent.cMappings);
114 }
115
116 /* do the linking. */
117 pParent->uRel.Parent.papMappings[i] = pChild;
118 pParent->uRel.Parent.cMappings++;
119 pChild->uRel.Child.pParent = pParent;
120
121 return VINF_SUCCESS;
122}
123
124
125/**
126 * Checks if this is mapping or not.
127 *
128 * @returns true if it's a mapping, otherwise false.
129 * @param MemObj The ring-0 memory object handle.
130 */
131RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
132{
133 /* Validate the object handle. */
134 PRTR0MEMOBJINTERNAL pMem;
135 AssertPtrReturn(MemObj, false);
136 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
137 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
138 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
139
140 /* hand it on to the inlined worker. */
141 return rtR0MemObjIsMapping(pMem);
142}
143
144
145/**
146 * Gets the address of a ring-0 memory object.
147 *
148 * @returns The address of the memory object.
149 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
150 * @param MemObj The ring-0 memory object handle.
151 */
152RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
153{
154 /* Validate the object handle. */
155 PRTR0MEMOBJINTERNAL pMem;
156 AssertPtrReturn(MemObj, 0);
157 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
158 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
159 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
160
161 /* return the mapping address. */
162 return pMem->pv;
163}
164
165
166/**
167 * Gets the ring-3 address of a ring-0 memory object.
168 *
169 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
170 * locked user memory, reserved user address space and user mappings. This API should
171 * not be used on any other objects.
172 *
173 * @returns The address of the memory object.
174 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
175 * Strict builds will assert in both cases.
176 * @param MemObj The ring-0 memory object handle.
177 */
178RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
179{
180 /* Validate the object handle. */
181 PRTR0MEMOBJINTERNAL pMem;
182 AssertPtrReturn(MemObj, NIL_RTR3PTR);
183 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
184 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
185 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
186 AssertMsgReturn( ( pMem->enmType == RTR0MEMOBJTYPE_MAPPING
187 && pMem->u.Mapping.R0Process != NIL_RTR0PROCESS)
188 || ( pMem->enmType == RTR0MEMOBJTYPE_LOCK
189 && pMem->u.Lock.R0Process != NIL_RTR0PROCESS)
190 || ( pMem->enmType == RTR0MEMOBJTYPE_PHYS_NC
191 && pMem->u.Lock.R0Process != NIL_RTR0PROCESS)
192 || ( pMem->enmType == RTR0MEMOBJTYPE_RES_VIRT
193 && pMem->u.ResVirt.R0Process != NIL_RTR0PROCESS),
194 ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
195
196 /* return the mapping address. */
197 return (RTR3PTR)pMem->pv;
198}
199
200
201/**
202 * Gets the size of a ring-0 memory object.
203 *
204 * @returns The address of the memory object.
205 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
206 * @param MemObj The ring-0 memory object handle.
207 */
208RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
209{
210 /* Validate the object handle. */
211 PRTR0MEMOBJINTERNAL pMem;
212 AssertPtrReturn(MemObj, 0);
213 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
214 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
215 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
216
217 /* return the size. */
218 return pMem->cb;
219}
220
221
222/**
223 * Get the physical address of an page in the memory object.
224 *
225 * @returns The physical address.
226 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
227 * @returns NIL_RTHCPHYS if the iPage is out of range.
228 * @returns NIL_RTHCPHYS if the object handle isn't valid.
229 * @param MemObj The ring-0 memory object handle.
230 * @param iPage The page number within the object.
231 */
232RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
233{
234 /* Validate the object handle. */
235 PRTR0MEMOBJINTERNAL pMem;
236 size_t cPages;
237 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
238 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
239 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
240 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
241 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
242 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
243 cPages = (pMem->cb >> PAGE_SHIFT);
244 if (iPage >= cPages)
245 {
246 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
247 if (iPage == cPages)
248 return NIL_RTHCPHYS;
249 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
250 }
251
252 /*
253 * We know the address of physically contiguous allocations and mappings.
254 */
255 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
256 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
257 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
258 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
259
260 /*
261 * Do the job.
262 */
263 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
264}
265
266
267/**
268 * Frees a ring-0 memory object.
269 *
270 * @returns IPRT status code.
271 * @retval VERR_INVALID_HANDLE if
272 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
273 * @param fFreeMappings Whether or not to free mappings of the object.
274 */
275RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
276{
277 /*
278 * Validate the object handle.
279 */
280 PRTR0MEMOBJINTERNAL pMem;
281 int rc;
282
283 if (MemObj == NIL_RTR0MEMOBJ)
284 return VINF_SUCCESS;
285 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
286 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
287 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
288 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
289
290 /*
291 * Deal with mapings according to fFreeMappings.
292 */
293 if ( !rtR0MemObjIsMapping(pMem)
294 && pMem->uRel.Parent.cMappings > 0)
295 {
296 /* fail if not requested to free mappings. */
297 if (!fFreeMappings)
298 return VERR_MEMORY_BUSY;
299
300 while (pMem->uRel.Parent.cMappings > 0)
301 {
302 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
303 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
304
305 /* sanity checks. */
306 AssertPtr(pChild);
307 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
308 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
309 AssertFatal(rtR0MemObjIsMapping(pChild));
310
311 /* free the mapping. */
312 rc = rtR0MemObjNativeFree(pChild);
313 if (RT_FAILURE(rc))
314 {
315 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
316 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
317 return rc;
318 }
319 }
320 }
321
322 /*
323 * Free this object.
324 */
325 rc = rtR0MemObjNativeFree(pMem);
326 if (RT_SUCCESS(rc))
327 {
328 /*
329 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
330 */
331 if (rtR0MemObjIsMapping(pMem))
332 {
333 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
334 uint32_t i;
335
336 /* sanity checks */
337 AssertPtr(pParent);
338 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
339 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
340 AssertFatal(!rtR0MemObjIsMapping(pParent));
341 AssertFatal(pParent->uRel.Parent.cMappings > 0);
342 AssertPtr(pParent->uRel.Parent.papMappings);
343
344 /* locate and remove from the array of mappings. */
345 i = pParent->uRel.Parent.cMappings;
346 while (i-- > 0)
347 {
348 if (pParent->uRel.Parent.papMappings[i] == pMem)
349 {
350 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
351 break;
352 }
353 }
354 Assert(i != UINT32_MAX);
355 }
356 else
357 Assert(pMem->uRel.Parent.cMappings == 0);
358
359 /*
360 * Finally, destroy the handle.
361 */
362 pMem->u32Magic++;
363 pMem->enmType = RTR0MEMOBJTYPE_END;
364 if (!rtR0MemObjIsMapping(pMem))
365 RTMemFree(pMem->uRel.Parent.papMappings);
366 RTMemFree(pMem);
367 }
368 else
369 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
370 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
371 return rc;
372}
373
374
375
376/**
377 * Allocates page aligned virtual kernel memory.
378 *
379 * The memory is taken from a non paged (= fixed physical memory backing) pool.
380 *
381 * @returns IPRT status code.
382 * @param pMemObj Where to store the ring-0 memory object handle.
383 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
384 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
385 */
386RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
387{
388 /* sanity checks. */
389 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
390 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
391 *pMemObj = NIL_RTR0MEMOBJ;
392 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
393 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
394
395 /* do the allocation. */
396 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
397}
398
399
400/**
401 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
402 *
403 * The physical memory backing the allocation is fixed.
404 *
405 * @returns IPRT status code.
406 * @param pMemObj Where to store the ring-0 memory object handle.
407 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
408 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
409 */
410RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
411{
412 /* sanity checks. */
413 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
414 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
415 *pMemObj = NIL_RTR0MEMOBJ;
416 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
417 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
418
419 /* do the allocation. */
420 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
421}
422
423
424/**
425 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
426 *
427 * The physical memory backing the allocation is fixed.
428 *
429 * @returns IPRT status code.
430 * @param pMemObj Where to store the ring-0 memory object handle.
431 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
432 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
433 */
434RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
435{
436 /* sanity checks. */
437 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
438 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
439 *pMemObj = NIL_RTR0MEMOBJ;
440 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
441 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
442
443 /* do the allocation. */
444 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
445}
446
447
448/**
449 * Locks a range of user virtual memory.
450 *
451 * @returns IPRT status code.
452 * @param pMemObj Where to store the ring-0 memory object handle.
453 * @param R3Ptr User virtual address. This is rounded down to a page boundrary.
454 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
455 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
456 *
457 * @remark RTR0MemGetAddressR3() and RTR0MemGetAddress() will return the rounded down address.
458 */
459RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
460{
461 /* sanity checks. */
462 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
463 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
464 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
465 *pMemObj = NIL_RTR0MEMOBJ;
466 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
467 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
468 if (R0Process == NIL_RTR0PROCESS)
469 R0Process = RTR0ProcHandleSelf();
470
471 /* do the locking. */
472 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, R0Process);
473}
474
475
476/**
477 * Locks a range of kernel virtual memory.
478 *
479 * @returns IPRT status code.
480 * @param pMemObj Where to store the ring-0 memory object handle.
481 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
482 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
483 *
484 * @remark RTR0MemGetAddress() will return the rounded down address.
485 */
486RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
487{
488 /* sanity checks. */
489 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
490 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
491 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
492 *pMemObj = NIL_RTR0MEMOBJ;
493 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
494 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
495 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
496
497 /* do the allocation. */
498 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
499}
500
501
502/**
503 * Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
504 *
505 * @returns IPRT status code.
506 * @param pMemObj Where to store the ring-0 memory object handle.
507 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
508 * @param PhysHighest The highest permittable address (inclusive).
509 * Pass NIL_RTHCPHYS if any address is acceptable.
510 */
511RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
512{
513 /* sanity checks. */
514 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
515 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
516 *pMemObj = NIL_RTR0MEMOBJ;
517 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
518 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
519 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
520
521 /* do the allocation. */
522 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
523}
524
525
526/**
527 * Allocates non-contiguous page aligned physical memory without (necessarily) any kernel mapping.
528 *
529 * @returns IPRT status code.
530 * @param pMemObj Where to store the ring-0 memory object handle.
531 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
532 * @param PhysHighest The highest permittable address (inclusive).
533 * Pass NIL_RTHCPHYS if any address is acceptable.
534 */
535RTR0DECL(int) RTR0MemObjAllocPhysNC(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
536{
537 /* sanity checks. */
538 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
539 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
540 *pMemObj = NIL_RTR0MEMOBJ;
541 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
542 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
543 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
544
545 /* do the allocation. */
546 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest);
547}
548
549
550/**
551 * Creates a page aligned, contiguous, physical memory object.
552 *
553 * No physical memory is allocated, we trust you do know what you're doing.
554 *
555 * @returns IPRT status code.
556 * @param pMemObj Where to store the ring-0 memory object handle.
557 * @param Phys The physical address to start at. This is rounded down to the
558 * nearest page boundrary.
559 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
560 */
561RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
562{
563 /* sanity checks. */
564 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
565 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
566 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
567 *pMemObj = NIL_RTR0MEMOBJ;
568 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
569 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
570 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
571
572 /* do the allocation. */
573 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
574}
575
576
577/**
578 * Reserves kernel virtual address space.
579 *
580 * @returns IPRT status code.
581 * @param pMemObj Where to store the ring-0 memory object handle.
582 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
583 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
584 * @param uAlignment The alignment of the reserved memory.
585 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
586 */
587RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
588{
589 /* sanity checks. */
590 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
591 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
592 *pMemObj = NIL_RTR0MEMOBJ;
593 if (uAlignment == 0)
594 uAlignment = PAGE_SIZE;
595 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
596 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
597 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
598 if (pvFixed != (void *)-1)
599 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
600
601 /* do the reservation. */
602 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
603}
604
605
606/**
607 * Reserves user virtual address space in the current process.
608 *
609 * @returns IPRT status code.
610 * @param pMemObj Where to store the ring-0 memory object handle.
611 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
612 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
613 * @param uAlignment The alignment of the reserved memory.
614 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
615 * @param R0Process The process to reserve the memory in. NIL_R0PROCESS is an alias for the current one.
616 */
617RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
618{
619 /* sanity checks. */
620 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
621 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
622 *pMemObj = NIL_RTR0MEMOBJ;
623 if (uAlignment == 0)
624 uAlignment = PAGE_SIZE;
625 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
626 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
627 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
628 if (R3PtrFixed != (RTR3PTR)-1)
629 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
630 if (R0Process == NIL_RTR0PROCESS)
631 R0Process = RTR0ProcHandleSelf();
632
633 /* do the reservation. */
634 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process);
635}
636
637
638/**
639 * Maps a memory object into kernel virtual address space.
640 *
641 * @returns IPRT status code.
642 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
643 * @param MemObjToMap The object to be map.
644 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
645 * @param uAlignment The alignment of the reserved memory.
646 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
647 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
648 */
649RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
650{
651 /* sanity checks. */
652 PRTR0MEMOBJINTERNAL pMemToMap;
653 PRTR0MEMOBJINTERNAL pNew;
654 int rc;
655 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
656 *pMemObj = NIL_RTR0MEMOBJ;
657 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
658 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
659 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
660 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
661 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
662 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
663 if (uAlignment == 0)
664 uAlignment = PAGE_SIZE;
665 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
666 if (pvFixed != (void *)-1)
667 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
668 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
669 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
670
671
672 /* do the mapping. */
673 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
674 if (RT_SUCCESS(rc))
675 {
676 /* link it. */
677 rc = rtR0MemObjLink(pMemToMap, pNew);
678 if (RT_SUCCESS(rc))
679 *pMemObj = pNew;
680 else
681 {
682 /* damn, out of memory. bail out. */
683 int rc2 = rtR0MemObjNativeFree(pNew);
684 AssertRC(rc2);
685 pNew->u32Magic++;
686 pNew->enmType = RTR0MEMOBJTYPE_END;
687 RTMemFree(pNew);
688 }
689 }
690
691 return rc;
692}
693
694
695/**
696 * Maps a memory object into user virtual address space in the current process.
697 *
698 * @returns IPRT status code.
699 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
700 * @param MemObjToMap The object to be map.
701 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
702 * @param uAlignment The alignment of the reserved memory.
703 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
704 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
705 * @param R0Process The process to map the memory into. NIL_R0PROCESS is an alias for the current one.
706 */
707RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
708{
709 /* sanity checks. */
710 PRTR0MEMOBJINTERNAL pMemToMap;
711 PRTR0MEMOBJINTERNAL pNew;
712 int rc;
713 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
714 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
715 *pMemObj = NIL_RTR0MEMOBJ;
716 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
717 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
718 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
719 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
720 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
721 if (uAlignment == 0)
722 uAlignment = PAGE_SIZE;
723 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
724 if (R3PtrFixed != (RTR3PTR)-1)
725 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
726 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
727 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
728 if (R0Process == NIL_RTR0PROCESS)
729 R0Process = RTR0ProcHandleSelf();
730
731 /* do the mapping. */
732 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process);
733 if (RT_SUCCESS(rc))
734 {
735 /* link it. */
736 rc = rtR0MemObjLink(pMemToMap, pNew);
737 if (RT_SUCCESS(rc))
738 *pMemObj = pNew;
739 else
740 {
741 /* damn, out of memory. bail out. */
742 int rc2 = rtR0MemObjNativeFree(pNew);
743 AssertRC(rc2);
744 pNew->u32Magic++;
745 pNew->enmType = RTR0MEMOBJTYPE_END;
746 RTMemFree(pNew);
747 }
748 }
749
750 return rc;
751}
752
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette