VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 100208

Last change on this file since 100208 was 99758, checked in by vboxsync, 20 months ago

IPRT: Make doxygen 1.9.6 happy. Mostly removing duplicate docs (iprt is documented in the header files). bugref:10442

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev Revision
File size: 31.3 KB
Line 
1/* $Id: memobj-r0drv.cpp 99758 2023-05-11 21:37:59Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define LOG_GROUP RTLOGGROUP_DEFAULT /// @todo RTLOGGROUP_MEM
42#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
43#include <iprt/memobj.h>
44#include "internal/iprt.h"
45
46#include <iprt/alloc.h>
47#include <iprt/asm.h>
48#include <iprt/assert.h>
49#include <iprt/err.h>
50#include <iprt/log.h>
51#include <iprt/mp.h>
52#include <iprt/param.h>
53#include <iprt/process.h>
54#include <iprt/thread.h>
55
56#include "internal/memobj.h"
57
58
59/**
60 * Internal function for allocating a new memory object.
61 *
62 * @returns The allocated and initialized handle.
63 * @param cbSelf The size of the memory object handle. 0 mean default size.
64 * @param enmType The memory object type.
65 * @param pv The memory object mapping.
66 * @param cb The size of the memory object.
67 * @param pszTag The tag string.
68 */
69DECLHIDDEN(PRTR0MEMOBJINTERNAL) rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb, const char *pszTag)
70{
71 PRTR0MEMOBJINTERNAL pNew;
72
73 /* validate the size */
74 if (!cbSelf)
75 cbSelf = sizeof(*pNew);
76 Assert(cbSelf >= sizeof(*pNew));
77 Assert(cbSelf == (uint32_t)cbSelf);
78 AssertMsg(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, ("%#zx\n", cb));
79
80 /*
81 * Allocate and initialize the object.
82 */
83 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
84 if (pNew)
85 {
86 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
87 pNew->cbSelf = (uint32_t)cbSelf;
88 pNew->enmType = enmType;
89 pNew->fFlags = 0;
90 pNew->cb = cb;
91 pNew->pv = pv;
92#ifdef DEBUG
93 pNew->pszTag = pszTag;
94#else
95 RT_NOREF_PV(pszTag);
96#endif
97 }
98 return pNew;
99}
100
101
102/**
103 * Deletes an incomplete memory object.
104 *
105 * This is for cleaning up after failures during object creation.
106 *
107 * @param pMem The incomplete memory object to delete.
108 */
109DECLHIDDEN(void) rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
110{
111 if (pMem)
112 {
113 ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
114 pMem->enmType = RTR0MEMOBJTYPE_END;
115 RTMemFree(pMem);
116 }
117}
118
119
120/**
121 * Links a mapping object to a primary object.
122 *
123 * @returns IPRT status code.
124 * @retval VINF_SUCCESS on success.
125 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
126 * @param pParent The parent (primary) memory object.
127 * @param pChild The child (mapping) memory object.
128 */
129static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
130{
131 uint32_t i;
132
133 /* sanity */
134 Assert(rtR0MemObjIsMapping(pChild));
135 Assert(!rtR0MemObjIsMapping(pParent));
136
137 /* expand the array? */
138 i = pParent->uRel.Parent.cMappings;
139 if (i >= pParent->uRel.Parent.cMappingsAllocated)
140 {
141 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
142 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
143 if (!pv)
144 return VERR_NO_MEMORY;
145 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
146 pParent->uRel.Parent.cMappingsAllocated = i + 32;
147 Assert(i == pParent->uRel.Parent.cMappings);
148 }
149
150 /* do the linking. */
151 pParent->uRel.Parent.papMappings[i] = pChild;
152 pParent->uRel.Parent.cMappings++;
153 pChild->uRel.Child.pParent = pParent;
154
155 return VINF_SUCCESS;
156}
157
158
159RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
160{
161 /* Validate the object handle. */
162 PRTR0MEMOBJINTERNAL pMem;
163 AssertPtrReturn(MemObj, false);
164 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
165 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
166 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
167
168 /* hand it on to the inlined worker. */
169 return rtR0MemObjIsMapping(pMem);
170}
171RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
172
173
174RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
175{
176 /* Validate the object handle. */
177 PRTR0MEMOBJINTERNAL pMem;
178 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
179 return NULL;
180 AssertPtrReturn(MemObj, NULL);
181 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
182 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
183 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
184
185 /* return the mapping address. */
186 return pMem->pv;
187}
188RT_EXPORT_SYMBOL(RTR0MemObjAddress);
189
190
191RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
192{
193 PRTR0MEMOBJINTERNAL pMem;
194
195 /* Validate the object handle. */
196 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
197 return NIL_RTR3PTR;
198 AssertPtrReturn(MemObj, NIL_RTR3PTR);
199 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
200 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
201 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
202 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
203 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
204 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
205 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
206 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
207 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
208 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
209 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
210 return NIL_RTR3PTR;
211
212 /* return the mapping address. */
213 return (RTR3PTR)pMem->pv;
214}
215RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
216
217
218RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
219{
220 PRTR0MEMOBJINTERNAL pMem;
221
222 /* Validate the object handle. */
223 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
224 return 0;
225 AssertPtrReturn(MemObj, 0);
226 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
227 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
228 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
229 AssertMsg(RT_ALIGN_Z(pMem->cb, PAGE_SIZE) == pMem->cb, ("%#zx\n", pMem->cb));
230
231 /* return the size. */
232 return pMem->cb;
233}
234RT_EXPORT_SYMBOL(RTR0MemObjSize);
235
236
237/* Work around gcc bug 55940 */
238#if defined(__GNUC__) && defined(RT_ARCH_X86) && (__GNUC__ * 100 + __GNUC_MINOR__) == 407
239 __attribute__((__optimize__ ("no-shrink-wrap")))
240#endif
241RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
242{
243 /* Validate the object handle. */
244 PRTR0MEMOBJINTERNAL pMem;
245 size_t cPages;
246 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
247 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
248 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
249 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
250 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
251 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
252 cPages = (pMem->cb >> PAGE_SHIFT);
253 if (iPage >= cPages)
254 {
255 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
256 if (iPage == cPages)
257 return NIL_RTHCPHYS;
258 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
259 }
260
261 /*
262 * We know the address of physically contiguous allocations and mappings.
263 */
264 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
265 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
266 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
267 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
268
269 /*
270 * Do the job.
271 */
272 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
273}
274RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
275
276
277RTR0DECL(bool) RTR0MemObjWasZeroInitialized(RTR0MEMOBJ hMemObj)
278{
279 PRTR0MEMOBJINTERNAL pMem;
280
281 /* Validate the object handle. */
282 if (RT_UNLIKELY(hMemObj == NIL_RTR0MEMOBJ))
283 return false;
284 AssertPtrReturn(hMemObj, false);
285 pMem = (PRTR0MEMOBJINTERNAL)hMemObj;
286 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
287 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
288 Assert( (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
289 != (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC));
290
291 /* return the alloc init state. */
292 return (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
293 == RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
294}
295RT_EXPORT_SYMBOL(RTR0MemObjWasZeroInitialized);
296
297
298RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
299{
300 /*
301 * Validate the object handle.
302 */
303 PRTR0MEMOBJINTERNAL pMem;
304 int rc;
305
306 if (MemObj == NIL_RTR0MEMOBJ)
307 return VINF_SUCCESS;
308 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
309 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
310 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
311 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
312 RT_ASSERT_PREEMPTIBLE();
313
314 /*
315 * Deal with mappings according to fFreeMappings.
316 */
317 if ( !rtR0MemObjIsMapping(pMem)
318 && pMem->uRel.Parent.cMappings > 0)
319 {
320 /* fail if not requested to free mappings. */
321 if (!fFreeMappings)
322 return VERR_MEMORY_BUSY;
323
324 while (pMem->uRel.Parent.cMappings > 0)
325 {
326 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
327 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
328
329 /* sanity checks. */
330 AssertPtr(pChild);
331 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
332 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
333 AssertFatal(rtR0MemObjIsMapping(pChild));
334
335 /* free the mapping. */
336 rc = rtR0MemObjNativeFree(pChild);
337 if (RT_FAILURE(rc))
338 {
339 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
340 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
341 return rc;
342 }
343
344 pChild->u32Magic++;
345 pChild->enmType = RTR0MEMOBJTYPE_END;
346 RTMemFree(pChild);
347 }
348 }
349
350 /*
351 * Free this object.
352 */
353 rc = rtR0MemObjNativeFree(pMem);
354 if (RT_SUCCESS(rc))
355 {
356 /*
357 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
358 */
359 if (rtR0MemObjIsMapping(pMem))
360 {
361 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
362 uint32_t i;
363
364 /* sanity checks */
365 AssertPtr(pParent);
366 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
367 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
368 AssertFatal(!rtR0MemObjIsMapping(pParent));
369 AssertFatal(pParent->uRel.Parent.cMappings > 0);
370 AssertPtr(pParent->uRel.Parent.papMappings);
371
372 /* locate and remove from the array of mappings. */
373 i = pParent->uRel.Parent.cMappings;
374 while (i-- > 0)
375 {
376 if (pParent->uRel.Parent.papMappings[i] == pMem)
377 {
378 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
379 break;
380 }
381 }
382 Assert(i != UINT32_MAX);
383 }
384 else
385 Assert(pMem->uRel.Parent.cMappings == 0);
386
387 /*
388 * Finally, destroy the handle.
389 */
390 pMem->u32Magic++;
391 pMem->enmType = RTR0MEMOBJTYPE_END;
392 if (!rtR0MemObjIsMapping(pMem))
393 RTMemFree(pMem->uRel.Parent.papMappings);
394 RTMemFree(pMem);
395 }
396 else
397 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
398 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
399 return rc;
400}
401RT_EXPORT_SYMBOL(RTR0MemObjFree);
402
403
404
405RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
406{
407 /* sanity checks. */
408 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
409 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
410 *pMemObj = NIL_RTR0MEMOBJ;
411 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
412 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
413 RT_ASSERT_PREEMPTIBLE();
414
415 /* do the allocation. */
416 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable, pszTag);
417}
418RT_EXPORT_SYMBOL(RTR0MemObjAllocPageTag);
419
420
421RTR0DECL(int) RTR0MemObjAllocLargeTag(PRTR0MEMOBJ pMemObj, size_t cb, size_t cbLargePage, uint32_t fFlags, const char *pszTag)
422{
423 /* sanity checks. */
424 const size_t cbAligned = RT_ALIGN_Z(cb, cbLargePage);
425 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
426 *pMemObj = NIL_RTR0MEMOBJ;
427#ifdef RT_ARCH_AMD64
428 AssertReturn(cbLargePage == _2M || cbLargePage == _1G, VERR_OUT_OF_RANGE);
429#elif defined(RT_ARCH_X86)
430 AssertReturn(cbLargePage == _2M || cbLargePage == _4M, VERR_OUT_OF_RANGE);
431#else
432 AssertReturn(RT_IS_POWER_OF_TWO(cbLargePage), VERR_NOT_POWER_OF_TWO);
433 AssertReturn(cbLargePage > PAGE_SIZE, VERR_OUT_OF_RANGE);
434#endif
435 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
436 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
437 AssertReturn(!(fFlags & ~RTMEMOBJ_ALLOC_LARGE_F_VALID_MASK), VERR_INVALID_PARAMETER);
438 RT_ASSERT_PREEMPTIBLE();
439
440 /* do the allocation. */
441 return rtR0MemObjNativeAllocLarge(pMemObj, cbAligned, cbLargePage, fFlags, pszTag);
442}
443RT_EXPORT_SYMBOL(RTR0MemObjAllocLargeTag);
444
445
446/**
447 * Fallback implementation of rtR0MemObjNativeAllocLarge and implements single
448 * page allocation using rtR0MemObjNativeAllocPhys.
449 */
450DECLHIDDEN(int) rtR0MemObjFallbackAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
451 const char *pszTag)
452{
453 RT_NOREF(pszTag, fFlags);
454 if (cb == cbLargePage)
455 return rtR0MemObjNativeAllocPhys(ppMem, cb, NIL_RTHCPHYS, cbLargePage, pszTag);
456 return VERR_NOT_SUPPORTED;
457}
458
459
460RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
461{
462 /* sanity checks. */
463 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
464 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
465 *pMemObj = NIL_RTR0MEMOBJ;
466 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
467 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
468 RT_ASSERT_PREEMPTIBLE();
469
470 /* do the allocation. */
471 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable, pszTag);
472}
473RT_EXPORT_SYMBOL(RTR0MemObjAllocLowTag);
474
475
476RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
477{
478 /* sanity checks. */
479 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
480 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
481 *pMemObj = NIL_RTR0MEMOBJ;
482 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
483 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
484 RT_ASSERT_PREEMPTIBLE();
485
486 /* do the allocation. */
487 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable, pszTag);
488}
489RT_EXPORT_SYMBOL(RTR0MemObjAllocContTag);
490
491
492RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
493 uint32_t fAccess, RTR0PROCESS R0Process, const char *pszTag)
494{
495 /* sanity checks. */
496 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
497 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
498 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
499 *pMemObj = NIL_RTR0MEMOBJ;
500 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
501 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
502 if (R0Process == NIL_RTR0PROCESS)
503 R0Process = RTR0ProcHandleSelf();
504 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
505 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
506 RT_ASSERT_PREEMPTIBLE();
507
508 /* do the locking. */
509 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process, pszTag);
510}
511RT_EXPORT_SYMBOL(RTR0MemObjLockUserTag);
512
513
514RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
515{
516 /* sanity checks. */
517 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
518 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
519 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
520 *pMemObj = NIL_RTR0MEMOBJ;
521 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
522 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
523 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
524 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
525 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
526 RT_ASSERT_PREEMPTIBLE();
527
528 /* do the allocation. */
529 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess, pszTag);
530}
531RT_EXPORT_SYMBOL(RTR0MemObjLockKernelTag);
532
533
534RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
535{
536 /* sanity checks. */
537 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
538 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
539 *pMemObj = NIL_RTR0MEMOBJ;
540 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
541 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
542 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
543 RT_ASSERT_PREEMPTIBLE();
544
545 /* do the allocation. */
546 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */, pszTag);
547}
548RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysTag);
549
550
551RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag)
552{
553 /* sanity checks. */
554 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
555 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
556 *pMemObj = NIL_RTR0MEMOBJ;
557 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
558 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
559 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
560 if (uAlignment == 0)
561 uAlignment = PAGE_SIZE;
562 AssertReturn( uAlignment == PAGE_SIZE
563 || uAlignment == _2M
564 || uAlignment == _4M
565 || uAlignment == _1G,
566 VERR_INVALID_PARAMETER);
567#if HC_ARCH_BITS == 32
568 /* Memory allocated in this way is typically mapped into kernel space as well; simply
569 don't allow this on 32 bits hosts as the kernel space is too crowded already. */
570 if (uAlignment != PAGE_SIZE)
571 return VERR_NOT_SUPPORTED;
572#endif
573 RT_ASSERT_PREEMPTIBLE();
574
575 /* do the allocation. */
576 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment, pszTag);
577}
578RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysExTag);
579
580
581RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
582{
583 /* sanity checks. */
584 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
585 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
586 *pMemObj = NIL_RTR0MEMOBJ;
587 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
588 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
589 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
590 RT_ASSERT_PREEMPTIBLE();
591
592 /* do the allocation. */
593 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest, pszTag);
594}
595RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNCTag);
596
597
598RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag)
599{
600 /* sanity checks. */
601 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
602 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
603 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
604 *pMemObj = NIL_RTR0MEMOBJ;
605 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
606 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
607 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
608 AssertReturn( uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE
609 || uCachePolicy == RTMEM_CACHE_POLICY_MMIO,
610 VERR_INVALID_PARAMETER);
611 RT_ASSERT_PREEMPTIBLE();
612
613 /* do the allocation. */
614 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned, uCachePolicy, pszTag);
615}
616RT_EXPORT_SYMBOL(RTR0MemObjEnterPhysTag);
617
618
619RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag)
620{
621 /* sanity checks. */
622 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
623 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
624 *pMemObj = NIL_RTR0MEMOBJ;
625 if (uAlignment == 0)
626 uAlignment = PAGE_SIZE;
627 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
628 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
629 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
630 if (pvFixed != (void *)-1)
631 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
632 RT_ASSERT_PREEMPTIBLE();
633
634 /* do the reservation. */
635 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment, pszTag);
636}
637RT_EXPORT_SYMBOL(RTR0MemObjReserveKernelTag);
638
639
640RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb,
641 size_t uAlignment, RTR0PROCESS R0Process, const char *pszTag)
642{
643 /* sanity checks. */
644 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
645 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
646 *pMemObj = NIL_RTR0MEMOBJ;
647 if (uAlignment == 0)
648 uAlignment = PAGE_SIZE;
649 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
650 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
651 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
652 if (R3PtrFixed != (RTR3PTR)-1)
653 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
654 if (R0Process == NIL_RTR0PROCESS)
655 R0Process = RTR0ProcHandleSelf();
656 RT_ASSERT_PREEMPTIBLE();
657
658 /* do the reservation. */
659 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process, pszTag);
660}
661RT_EXPORT_SYMBOL(RTR0MemObjReserveUserTag);
662
663
664RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
665 size_t uAlignment, unsigned fProt, const char *pszTag)
666{
667 return RTR0MemObjMapKernelExTag(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0, pszTag);
668}
669RT_EXPORT_SYMBOL(RTR0MemObjMapKernelTag);
670
671
672RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
673 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
674{
675 PRTR0MEMOBJINTERNAL pMemToMap;
676 PRTR0MEMOBJINTERNAL pNew;
677 int rc;
678
679 /* sanity checks. */
680 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
681 *pMemObj = NIL_RTR0MEMOBJ;
682 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
683 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
684 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
685 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
686 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
687 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
688 if (uAlignment == 0)
689 uAlignment = PAGE_SIZE;
690 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
691 if (pvFixed != (void *)-1)
692 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
693 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
694 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
695 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
696 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
697 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
698 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
699 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
700 RT_ASSERT_PREEMPTIBLE();
701
702 /* adjust the request to simplify the native code. */
703 if (offSub == 0 && cbSub == pMemToMap->cb)
704 cbSub = 0;
705
706 /* do the mapping. */
707 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub, pszTag);
708 if (RT_SUCCESS(rc))
709 {
710 /* link it. */
711 rc = rtR0MemObjLink(pMemToMap, pNew);
712 if (RT_SUCCESS(rc))
713 *pMemObj = pNew;
714 else
715 {
716 /* damn, out of memory. bail out. */
717 int rc2 = rtR0MemObjNativeFree(pNew);
718 AssertRC(rc2);
719 pNew->u32Magic++;
720 pNew->enmType = RTR0MEMOBJTYPE_END;
721 RTMemFree(pNew);
722 }
723 }
724
725 return rc;
726}
727RT_EXPORT_SYMBOL(RTR0MemObjMapKernelExTag);
728
729
730RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
731 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag)
732{
733 return RTR0MemObjMapUserExTag(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process, 0, 0, pszTag);
734}
735RT_EXPORT_SYMBOL(RTR0MemObjMapUserTag);
736
737
738RTR0DECL(int) RTR0MemObjMapUserExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
739 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
740{
741 /* sanity checks. */
742 PRTR0MEMOBJINTERNAL pMemToMap;
743 PRTR0MEMOBJINTERNAL pNew;
744 int rc;
745 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
746 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
747 *pMemObj = NIL_RTR0MEMOBJ;
748 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
749 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
750 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
751 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
752 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
753 if (uAlignment == 0)
754 uAlignment = PAGE_SIZE;
755 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
756 if (R3PtrFixed != (RTR3PTR)-1)
757 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
758 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
759 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
760 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
761 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
762 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
763 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
764 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
765 if (R0Process == NIL_RTR0PROCESS)
766 R0Process = RTR0ProcHandleSelf();
767 RT_ASSERT_PREEMPTIBLE();
768
769 /* adjust the request to simplify the native code. */
770 if (offSub == 0 && cbSub == pMemToMap->cb)
771 cbSub = 0;
772
773 /* do the mapping. */
774 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
775 if (RT_SUCCESS(rc))
776 {
777 /* link it. */
778 rc = rtR0MemObjLink(pMemToMap, pNew);
779 if (RT_SUCCESS(rc))
780 *pMemObj = pNew;
781 else
782 {
783 /* damn, out of memory. bail out. */
784 int rc2 = rtR0MemObjNativeFree(pNew);
785 AssertRC(rc2);
786 pNew->u32Magic++;
787 pNew->enmType = RTR0MEMOBJTYPE_END;
788 RTMemFree(pNew);
789 }
790 }
791
792 return rc;
793}
794RT_EXPORT_SYMBOL(RTR0MemObjMapUserExTag);
795
796
797RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
798{
799 PRTR0MEMOBJINTERNAL pMemObj;
800 int rc;
801
802 /* sanity checks. */
803 pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
804 AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
805 AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
806 AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
807 AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
808 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
809 AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
810 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
811 AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
812 AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
813 AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
814 RT_ASSERT_PREEMPTIBLE();
815
816 /* do the job */
817 rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
818 if (RT_SUCCESS(rc))
819 pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
820
821 return rc;
822}
823RT_EXPORT_SYMBOL(RTR0MemObjProtect);
824
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette