VirtualBox

source: vbox/trunk/include/iprt/memobj.h@ 92250

Last change on this file since 92250 was 92250, checked in by vboxsync, 3 years ago

IPRT/RTR0MemObj: Added RTR0MemObjWasZeroInitialized and a couple of flags with which the backend can feed it the necessary info. It would be good to try avoid zeroing memory twice when we can. [fix] bugref:10093

  • Property eol-style set to native
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.8 KB
Line 
1/** @file
2 * IPRT - Memory Objects (Ring-0).
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef IPRT_INCLUDED_memobj_h
27#define IPRT_INCLUDED_memobj_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/cdefs.h>
33#include <iprt/types.h>
34
35RT_C_DECLS_BEGIN
36
37/** @defgroup grp_rt_memobj RTMemObj - Memory Object Manipulation (Ring-0)
38 * @ingroup grp_rt
39 * @{
40 */
41
42/** @def RTMEM_TAG
43 * The default allocation tag used by the RTMem allocation APIs.
44 *
45 * When not defined before the inclusion of iprt/memobj.h or iprt/mem.h, this
46 * will default to the pointer to the current file name. The memory API will
47 * make of use of this as pointer to a volatile but read-only string.
48 */
49#ifndef RTMEM_TAG
50# define RTMEM_TAG (__FILE__)
51#endif
52
53#ifdef IN_RING0
54
55/**
56 * Checks if this is mapping or not.
57 *
58 * @returns true if it's a mapping, otherwise false.
59 * @param MemObj The ring-0 memory object handle.
60 */
61RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj);
62
63/**
64 * Gets the address of a ring-0 memory object.
65 *
66 * @returns The address of the memory object.
67 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
68 * @param MemObj The ring-0 memory object handle.
69 */
70RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj);
71
72/**
73 * Gets the ring-3 address of a ring-0 memory object.
74 *
75 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
76 * locked user memory, reserved user address space and user mappings. This API should
77 * not be used on any other objects.
78 *
79 * @returns The address of the memory object.
80 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
81 * Strict builds will assert in both cases.
82 * @param MemObj The ring-0 memory object handle.
83 */
84RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj);
85
86/**
87 * Gets the size of a ring-0 memory object.
88 *
89 * The returned value may differ from the one specified to the API creating the
90 * object because of alignment adjustments. The minimal alignment currently
91 * employed by any API is PAGE_SIZE, so the result can safely be shifted by
92 * PAGE_SHIFT to calculate a page count.
93 *
94 * @returns The object size.
95 * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
96 * @param MemObj The ring-0 memory object handle.
97 */
98RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj);
99
100/**
101 * Get the physical address of an page in the memory object.
102 *
103 * @returns The physical address.
104 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
105 * @returns NIL_RTHCPHYS if the iPage is out of range.
106 * @returns NIL_RTHCPHYS if the object handle isn't valid.
107 * @param MemObj The ring-0 memory object handle.
108 * @param iPage The page number within the object.
109 */
110RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage);
111
112/**
113 * Checks whether the allocation was zero initialized or not.
114 *
115 * This only works on allocations. It is not meaningful for mappings, reserved
116 * memory and entered physical address, and will return false for these.
117 *
118 * @returns true if the allocation was initialized to zero at allocation time,
119 * false if not or query not meaningful to the object type.
120 * @param hMemObj The ring-0 memory object to be freed.
121 *
122 * @remarks It can be expected that memory allocated in the same fashion will
123 * have the same initialization state. So, if this returns true for
124 * one allocation it will return true for all other similarly made
125 * allocations.
126 */
127RTR0DECL(bool) RTR0MemObjWasZeroInitialized(RTR0MEMOBJ hMemObj);
128
129/**
130 * Frees a ring-0 memory object.
131 *
132 * @returns IPRT status code.
133 * @retval VERR_INVALID_HANDLE if
134 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
135 * @param fFreeMappings Whether or not to free mappings of the object.
136 */
137RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings);
138
139/**
140 * Allocates page aligned virtual kernel memory (default tag).
141 *
142 * The memory is taken from a non paged (= fixed physical memory backing) pool.
143 *
144 * @returns IPRT status code.
145 * @param pMemObj Where to store the ring-0 memory object handle.
146 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
147 * @param fExecutable Flag indicating whether it should be permitted to
148 * executed code in the memory object. The user must
149 * use RTR0MemObjProtect after initialization the
150 * allocation to actually make it executable.
151 */
152#define RTR0MemObjAllocPage(pMemObj, cb, fExecutable) \
153 RTR0MemObjAllocPageTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
154
155/**
156 * Allocates page aligned virtual kernel memory (custom tag).
157 *
158 * The memory is taken from a non paged (= fixed physical memory backing) pool.
159 *
160 * @returns IPRT status code.
161 * @param pMemObj Where to store the ring-0 memory object handle.
162 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
163 * @param fExecutable Flag indicating whether it should be permitted to
164 * executed code in the memory object. The user must
165 * use RTR0MemObjProtect after initialization the
166 * allocation to actually make it executable.
167 * @param pszTag Allocation tag used for statistics and such.
168 */
169RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
170
171/**
172 * Allocates large page aligned virtual kernel memory (default tag).
173 *
174 * Each large page in the allocation is backed by a contiguous chunk of physical
175 * memory aligned to the page size. The memory is taken from a non paged (=
176 * fixed physical memory backing) pool.
177 *
178 * On some hosts we only support allocating a single large page at a time, they
179 * will return VERR_NOT_SUPPORTED if @a cb is larger than @a cbLargePage.
180 *
181 * @returns IPRT status code.
182 * @retval VERR_TRY_AGAIN instead of VERR_NO_MEMORY when
183 * RTMEMOBJ_ALLOC_LARGE_F_FAST is set and supported.
184 * @param pMemObj Where to store the ring-0 memory object handle.
185 * @param cb Number of bytes to allocate. This is rounded up to
186 * nearest large page.
187 * @param cbLargePage The large page size. The allowed values varies from
188 * architecture to architecture and the paging mode
189 * used by the OS.
190 * @param fFlags Flags, RTMEMOBJ_ALLOC_LARGE_F_XXX.
191 *
192 * @note The implicit kernel mapping of this allocation does not necessarily
193 * have to be aligned on a @a cbLargePage boundrary.
194 */
195#define RTR0MemObjAllocLarge(pMemObj, cb, cbLargePage, fFlags) \
196 RTR0MemObjAllocLargeTag((pMemObj), (cb), (cbLargePage), (fFlags), RTMEM_TAG)
197
198/**
199 * Allocates large page aligned virtual kernel memory (custom tag).
200 *
201 * Each large page in the allocation is backed by a contiguous chunk of physical
202 * memory aligned to the page size. The memory is taken from a non paged (=
203 * fixed physical memory backing) pool.
204 *
205 * On some hosts we only support allocating a single large page at a time, they
206 * will return VERR_NOT_SUPPORTED if @a cb is larger than @a cbLargePage.
207 *
208 * @returns IPRT status code.
209 * @retval VERR_TRY_AGAIN instead of VERR_NO_MEMORY when
210 * RTMEMOBJ_ALLOC_LARGE_F_FAST is set and supported.
211 * @param pMemObj Where to store the ring-0 memory object handle.
212 * @param cb Number of bytes to allocate. This is rounded up to
213 * nearest large page.
214 * @param cbLargePage The large page size. The allowed values varies from
215 * architecture to architecture and the paging mode
216 * used by the OS.
217 * @param fFlags Flags, RTMEMOBJ_ALLOC_LARGE_F_XXX.
218 * @param pszTag Allocation tag used for statistics and such.
219 *
220 * @note The implicit kernel mapping of this allocation does not necessarily
221 * have to be aligned on a @a cbLargePage boundrary.
222 */
223RTR0DECL(int) RTR0MemObjAllocLargeTag(PRTR0MEMOBJ pMemObj, size_t cb, size_t cbLargePage, uint32_t fFlags, const char *pszTag);
224
225/** @name RTMEMOBJ_ALLOC_LARGE_F_XXX
226 * @{ */
227/** Indicates that it is okay to fail if there aren't enough large pages handy,
228 * cancelling any expensive search and reshuffling of memory (when supported).
229 * @note This flag can't be realized on all OSes. (Those who do support it
230 * will return VERR_TRY_AGAIN instead of VERR_NO_MEMORY if they
231 * cannot satisfy the request.) */
232#define RTMEMOBJ_ALLOC_LARGE_F_FAST RT_BIT_32(0)
233/** Mask with valid bits. */
234#define RTMEMOBJ_ALLOC_LARGE_F_VALID_MASK UINT32_C(0x00000001)
235/** @} */
236
237/**
238 * Allocates page aligned virtual kernel memory with physical backing below 4GB
239 * (default tag).
240 *
241 * The physical memory backing the allocation is fixed.
242 *
243 * @returns IPRT status code.
244 * @param pMemObj Where to store the ring-0 memory object handle.
245 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
246 * @param fExecutable Flag indicating whether it should be permitted to
247 * executed code in the memory object. The user must
248 * use RTR0MemObjProtect after initialization the
249 * allocation to actually make it executable.
250 */
251#define RTR0MemObjAllocLow(pMemObj, cb, fExecutable) \
252 RTR0MemObjAllocLowTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
253
254/**
255 * Allocates page aligned virtual kernel memory with physical backing below 4GB
256 * (custom tag).
257 *
258 * The physical memory backing the allocation is fixed.
259 *
260 * @returns IPRT status code.
261 * @param pMemObj Where to store the ring-0 memory object handle.
262 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
263 * @param fExecutable Flag indicating whether it should be permitted to
264 * executed code in the memory object. The user must
265 * use RTR0MemObjProtect after initialization the
266 * allocation to actually make it executable.
267 * @param pszTag Allocation tag used for statistics and such.
268 */
269RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
270
271/**
272 * Allocates page aligned virtual kernel memory with contiguous physical backing
273 * below 4GB (default tag).
274 *
275 * The physical memory backing the allocation is fixed.
276 *
277 * @returns IPRT status code.
278 * @param pMemObj Where to store the ring-0 memory object handle.
279 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
280 * @param fExecutable Flag indicating whether it should be permitted to
281 * executed code in the memory object. The user must
282 * use RTR0MemObjProtect after initialization the
283 * allocation to actually make it executable.
284 */
285#define RTR0MemObjAllocCont(pMemObj, cb, fExecutable) \
286 RTR0MemObjAllocContTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
287
288/**
289 * Allocates page aligned virtual kernel memory with contiguous physical backing
290 * below 4GB (custom tag).
291 *
292 * The physical memory backing the allocation is fixed.
293 *
294 * @returns IPRT status code.
295 * @param pMemObj Where to store the ring-0 memory object handle.
296 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
297 * @param fExecutable Flag indicating whether it should be permitted to
298 * executed code in the memory object. The user must
299 * use RTR0MemObjProtect after initialization the
300 * allocation to actually make it executable.
301 * @param pszTag Allocation tag used for statistics and such.
302 */
303RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
304
305/**
306 * Locks a range of user virtual memory (default tag).
307 *
308 * @returns IPRT status code.
309 * @param pMemObj Where to store the ring-0 memory object handle.
310 * @param R3Ptr User virtual address. This is rounded down to a page
311 * boundary.
312 * @param cb Number of bytes to lock. This is rounded up to
313 * nearest page boundary.
314 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
315 * and RTMEM_PROT_WRITE.
316 * @param R0Process The process to lock pages in. NIL_RTR0PROCESS is an
317 * alias for the current one.
318 *
319 * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
320 * down address.
321 *
322 * @remarks Linux: This API requires that the memory begin locked is in a memory
323 * mapping that is not required in any forked off child process. This
324 * is not intented as permanent restriction, feel free to help out
325 * lifting it.
326 */
327#define RTR0MemObjLockUser(pMemObj, R3Ptr, cb, fAccess, R0Process) \
328 RTR0MemObjLockUserTag((pMemObj), (R3Ptr), (cb), (fAccess), (R0Process), RTMEM_TAG)
329
330/**
331 * Locks a range of user virtual memory (custom tag).
332 *
333 * @returns IPRT status code.
334 * @param pMemObj Where to store the ring-0 memory object handle.
335 * @param R3Ptr User virtual address. This is rounded down to a page
336 * boundary.
337 * @param cb Number of bytes to lock. This is rounded up to
338 * nearest page boundary.
339 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
340 * and RTMEM_PROT_WRITE.
341 * @param R0Process The process to lock pages in. NIL_RTR0PROCESS is an
342 * alias for the current one.
343 * @param pszTag Allocation tag used for statistics and such.
344 *
345 * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
346 * down address.
347 *
348 * @remarks Linux: This API requires that the memory begin locked is in a memory
349 * mapping that is not required in any forked off child process. This
350 * is not intented as permanent restriction, feel free to help out
351 * lifting it.
352 */
353RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
354 RTR0PROCESS R0Process, const char *pszTag);
355
356/**
357 * Locks a range of kernel virtual memory (default tag).
358 *
359 * @returns IPRT status code.
360 * @param pMemObj Where to store the ring-0 memory object handle.
361 * @param pv Kernel virtual address. This is rounded down to a page boundary.
362 * @param cb Number of bytes to lock. This is rounded up to nearest page boundary.
363 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
364 * and RTMEM_PROT_WRITE.
365 *
366 * @remark RTR0MemGetAddress() will return the rounded down address.
367 */
368#define RTR0MemObjLockKernel(pMemObj, pv, cb, fAccess) \
369 RTR0MemObjLockKernelTag((pMemObj), (pv), (cb), (fAccess), RTMEM_TAG)
370
371/**
372 * Locks a range of kernel virtual memory (custom tag).
373 *
374 * @returns IPRT status code.
375 * @param pMemObj Where to store the ring-0 memory object handle.
376 * @param pv Kernel virtual address. This is rounded down to a page boundary.
377 * @param cb Number of bytes to lock. This is rounded up to nearest page boundary.
378 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
379 * and RTMEM_PROT_WRITE.
380 * @param pszTag Allocation tag used for statistics and such.
381 *
382 * @remark RTR0MemGetAddress() will return the rounded down address.
383 */
384RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag);
385
386/**
387 * Allocates contiguous page aligned physical memory without (necessarily) any
388 * kernel mapping (default tag).
389 *
390 * @returns IPRT status code.
391 * @param pMemObj Where to store the ring-0 memory object handle.
392 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
393 * @param PhysHighest The highest permitable address (inclusive).
394 * Pass NIL_RTHCPHYS if any address is acceptable.
395 */
396#define RTR0MemObjAllocPhys(pMemObj, cb, PhysHighest) \
397 RTR0MemObjAllocPhysTag((pMemObj), (cb), (PhysHighest), RTMEM_TAG)
398
399/**
400 * Allocates contiguous page aligned physical memory without (necessarily) any
401 * kernel mapping (custom tag).
402 *
403 * @returns IPRT status code.
404 * @param pMemObj Where to store the ring-0 memory object handle.
405 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
406 * @param PhysHighest The highest permitable address (inclusive).
407 * Pass NIL_RTHCPHYS if any address is acceptable.
408 * @param pszTag Allocation tag used for statistics and such.
409 */
410RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag);
411
412/**
413 * Allocates contiguous physical memory without (necessarily) any kernel mapping
414 * (default tag).
415 *
416 * @returns IPRT status code.
417 * @param pMemObj Where to store the ring-0 memory object handle.
418 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
419 * @param PhysHighest The highest permitable address (inclusive).
420 * Pass NIL_RTHCPHYS if any address is acceptable.
421 * @param uAlignment The alignment of the reserved memory.
422 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M, _4M and _1G.
423 */
424#define RTR0MemObjAllocPhysEx(pMemObj, cb, PhysHighest, uAlignment) \
425 RTR0MemObjAllocPhysExTag((pMemObj), (cb), (PhysHighest), (uAlignment), RTMEM_TAG)
426
427/**
428 * Allocates contiguous physical memory without (necessarily) any kernel mapping
429 * (custom tag).
430 *
431 * @returns IPRT status code.
432 * @param pMemObj Where to store the ring-0 memory object handle.
433 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
434 * @param PhysHighest The highest permitable address (inclusive).
435 * Pass NIL_RTHCPHYS if any address is acceptable.
436 * @param uAlignment The alignment of the reserved memory.
437 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M, _4M and _1G.
438 * @param pszTag Allocation tag used for statistics and such.
439 */
440RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag);
441
442/**
443 * Allocates non-contiguous page aligned physical memory without (necessarily)
444 * any kernel mapping (default tag).
445 *
446 * This API is for allocating huge amounts of pages and will return
447 * VERR_NOT_SUPPORTED if this cannot be implemented in a satisfactory
448 * manner.
449 *
450 * @returns IPRT status code.
451 * @retval VERR_NOT_SUPPORTED if it's not possible to allocated unmapped
452 * physical memory on this platform. The caller should expect
453 * this error and have a fallback strategy for it.
454 *
455 * @param pMemObj Where to store the ring-0 memory object handle.
456 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
457 * @param PhysHighest The highest permitable address (inclusive).
458 * Pass NIL_RTHCPHYS if any address is acceptable.
459 */
460#define RTR0MemObjAllocPhysNC(pMemObj, cb, PhysHighest) \
461 RTR0MemObjAllocPhysNCTag((pMemObj), (cb), (PhysHighest), RTMEM_TAG)
462
463/**
464 * Allocates non-contiguous page aligned physical memory without (necessarily)
465 * any kernel mapping (custom tag).
466 *
467 * This API is for allocating huge amounts of pages and will return
468 * VERR_NOT_SUPPORTED if this cannot be implemented in a satisfactory
469 * manner.
470 *
471 * @returns IPRT status code.
472 * @retval VERR_NOT_SUPPORTED if it's not possible to allocated unmapped
473 * physical memory on this platform. The caller should expect
474 * this error and have a fallback strategy for it.
475 *
476 * @param pMemObj Where to store the ring-0 memory object handle.
477 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
478 * @param PhysHighest The highest permitable address (inclusive).
479 * Pass NIL_RTHCPHYS if any address is acceptable.
480 * @param pszTag Allocation tag used for statistics and such.
481 */
482RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag);
483
484/** Memory cache policy for RTR0MemObjEnterPhys.
485 * @{
486 */
487/** Default caching policy -- don't care. */
488#define RTMEM_CACHE_POLICY_DONT_CARE UINT32_C(0)
489/** MMIO caching policy -- uncachable. */
490#define RTMEM_CACHE_POLICY_MMIO UINT32_C(1)
491/** @} */
492
493/**
494 * Creates a page aligned, contiguous, physical memory object (default tag).
495 *
496 * No physical memory is allocated, we trust you do know what you're doing.
497 *
498 * @returns IPRT status code.
499 * @param pMemObj Where to store the ring-0 memory object handle.
500 * @param Phys The physical address to start at. This is rounded down to the
501 * nearest page boundary.
502 * @param cb The size of the object in bytes. This is rounded up to nearest page boundary.
503 * @param uCachePolicy One of the RTMEM_CACHE_XXX modes.
504 */
505#define RTR0MemObjEnterPhys(pMemObj, Phys, cb, uCachePolicy) \
506 RTR0MemObjEnterPhysTag((pMemObj), (Phys), (cb), (uCachePolicy), RTMEM_TAG)
507
508/**
509 * Creates a page aligned, contiguous, physical memory object (custom tag).
510 *
511 * No physical memory is allocated, we trust you do know what you're doing.
512 *
513 * @returns IPRT status code.
514 * @param pMemObj Where to store the ring-0 memory object handle.
515 * @param Phys The physical address to start at. This is rounded down to the
516 * nearest page boundary.
517 * @param cb The size of the object in bytes. This is rounded up to nearest page boundary.
518 * @param uCachePolicy One of the RTMEM_CACHE_XXX modes.
519 * @param pszTag Allocation tag used for statistics and such.
520 */
521RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag);
522
523/**
524 * Reserves kernel virtual address space (default tag).
525 *
526 * If this function fails with VERR_NOT_SUPPORTED, the idea is that you
527 * can use RTR0MemObjEnterPhys() + RTR0MemObjMapKernel() as a fallback if
528 * you have a safe physical address range to make use of...
529 *
530 * @returns IPRT status code.
531 * @param pMemObj Where to store the ring-0 memory object handle.
532 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
533 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
534 * @param uAlignment The alignment of the reserved memory.
535 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
536 */
537#define RTR0MemObjReserveKernel(pMemObj, pvFixed, cb, uAlignment) \
538 RTR0MemObjReserveKernelTag((pMemObj), (pvFixed), (cb), (uAlignment), RTMEM_TAG)
539
540/**
541 * Reserves kernel virtual address space (custom tag).
542 *
543 * If this function fails with VERR_NOT_SUPPORTED, the idea is that you
544 * can use RTR0MemObjEnterPhys() + RTR0MemObjMapKernel() as a fallback if
545 * you have a safe physical address range to make use of...
546 *
547 * @returns IPRT status code.
548 * @param pMemObj Where to store the ring-0 memory object handle.
549 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
550 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
551 * @param uAlignment The alignment of the reserved memory.
552 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
553 * @param pszTag Allocation tag used for statistics and such.
554 */
555RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag);
556
557/**
558 * Reserves user virtual address space in the current process (default tag).
559 *
560 * @returns IPRT status code.
561 * @param pMemObj Where to store the ring-0 memory object handle.
562 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
563 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
564 * @param uAlignment The alignment of the reserved memory.
565 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
566 * @param R0Process The process to reserve the memory in.
567 * NIL_RTR0PROCESS is an alias for the current one.
568 */
569#define RTR0MemObjReserveUser(pMemObj, R3PtrFixed, cb, uAlignment, R0Process) \
570 RTR0MemObjReserveUserTag((pMemObj), (R3PtrFixed), (cb), (uAlignment), (R0Process), RTMEM_TAG)
571
572/**
573 * Reserves user virtual address space in the current process (custom tag).
574 *
575 * @returns IPRT status code.
576 * @param pMemObj Where to store the ring-0 memory object handle.
577 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
578 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
579 * @param uAlignment The alignment of the reserved memory.
580 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
581 * @param R0Process The process to reserve the memory in.
582 * NIL_RTR0PROCESS is an alias for the current one.
583 * @param pszTag Allocation tag used for statistics and such.
584 */
585RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
586 RTR0PROCESS R0Process, const char *pszTag);
587
588/**
589 * Maps a memory object into kernel virtual address space (default tag).
590 *
591 * This is the same as calling RTR0MemObjMapKernelEx with cbSub and offSub set
592 * to zero.
593 *
594 * @returns IPRT status code.
595 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
596 * @param MemObjToMap The object to be map.
597 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
598 * @param uAlignment The alignment of the reserved memory.
599 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
600 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
601 */
602#define RTR0MemObjMapKernel(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt) \
603 RTR0MemObjMapKernelTag((pMemObj), (MemObjToMap), (pvFixed), (uAlignment), (fProt), RTMEM_TAG)
604
605/**
606 * Maps a memory object into kernel virtual address space (custom tag).
607 *
608 * This is the same as calling RTR0MemObjMapKernelEx with cbSub and offSub set
609 * to zero.
610 *
611 * @returns IPRT status code.
612 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
613 * @param MemObjToMap The object to be map.
614 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
615 * @param uAlignment The alignment of the reserved memory.
616 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
617 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
618 * @param pszTag Allocation tag used for statistics and such.
619 */
620RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
621 size_t uAlignment, unsigned fProt, const char *pszTag);
622
623/**
624 * Maps a memory object into kernel virtual address space (default tag).
625 *
626 * The ability to map subsections of the object into kernel space is currently
627 * not implemented on all platforms. All/Most of platforms supports mapping the
628 * whole object into kernel space.
629 *
630 * @returns IPRT status code.
631 * @retval VERR_NOT_SUPPORTED if it's not possible to map a subsection of a
632 * memory object on this platform. When you hit this, try implement it.
633 *
634 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
635 * @param MemObjToMap The object to be map.
636 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
637 * @param uAlignment The alignment of the reserved memory.
638 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
639 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
640 * @param offSub Where in the object to start mapping. If non-zero
641 * the value must be page aligned and cbSub must be
642 * non-zero as well.
643 * @param cbSub The size of the part of the object to be mapped. If
644 * zero the entire object is mapped. The value must be
645 * page aligned.
646 */
647#define RTR0MemObjMapKernelEx(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, offSub, cbSub) \
648 RTR0MemObjMapKernelExTag((pMemObj), (MemObjToMap), (pvFixed), (uAlignment), (fProt), (offSub), (cbSub), RTMEM_TAG)
649
650/**
651 * Maps a memory object into kernel virtual address space (custom tag).
652 *
653 * The ability to map subsections of the object into kernel space is currently
654 * not implemented on all platforms. All/Most of platforms supports mapping the
655 * whole object into kernel space.
656 *
657 * @returns IPRT status code.
658 * @retval VERR_NOT_SUPPORTED if it's not possible to map a subsection of a
659 * memory object on this platform. When you hit this, try implement it.
660 *
661 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
662 * @param MemObjToMap The object to be map.
663 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
664 * @param uAlignment The alignment of the reserved memory.
665 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
666 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
667 * @param offSub Where in the object to start mapping. If non-zero
668 * the value must be page aligned and cbSub must be
669 * non-zero as well.
670 * @param cbSub The size of the part of the object to be mapped. If
671 * zero the entire object is mapped. The value must be
672 * page aligned.
673 * @param pszTag Allocation tag used for statistics and such.
674 */
675RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
676 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag);
677
678/**
679 * Maps a memory object into user virtual address space in the current process
680 * (default tag).
681 *
682 * @returns IPRT status code.
683 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
684 * @param MemObjToMap The object to be map.
685 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
686 * @param uAlignment The alignment of the reserved memory.
687 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
688 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
689 * @param R0Process The process to map the memory into. NIL_RTR0PROCESS
690 * is an alias for the current one.
691 */
692#define RTR0MemObjMapUser(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process) \
693 RTR0MemObjMapUserTag((pMemObj), (MemObjToMap), (R3PtrFixed), (uAlignment), (fProt), (R0Process), RTMEM_TAG)
694
695/**
696 * Maps a memory object into user virtual address space in the current process
697 * (custom tag).
698 *
699 * @returns IPRT status code.
700 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
701 * @param MemObjToMap The object to be map.
702 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
703 * @param uAlignment The alignment of the reserved memory.
704 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
705 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
706 * @param R0Process The process to map the memory into. NIL_RTR0PROCESS
707 * is an alias for the current one.
708 * @param pszTag Allocation tag used for statistics and such.
709 */
710RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
711 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag);
712
713/**
714 * Maps a memory object into user virtual address space in the current process
715 * (default tag).
716 *
717 * @returns IPRT status code.
718 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
719 * @param MemObjToMap The object to be map.
720 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
721 * @param uAlignment The alignment of the reserved memory.
722 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
723 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
724 * @param R0Process The process to map the memory into. NIL_RTR0PROCESS
725 * is an alias for the current one.
726 * @param offSub Where in the object to start mapping. If non-zero
727 * the value must be page aligned and cbSub must be
728 * non-zero as well.
729 * @param cbSub The size of the part of the object to be mapped. If
730 * zero the entire object is mapped. The value must be
731 * page aligned.
732 */
733#define RTR0MemObjMapUserEx(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub) \
734 RTR0MemObjMapUserExTag((pMemObj), (MemObjToMap), (R3PtrFixed), (uAlignment), (fProt), (R0Process), \
735 (offSub), (cbSub), RTMEM_TAG)
736
737/**
738 * Maps a memory object into user virtual address space in the current process
739 * (custom tag).
740 *
741 * @returns IPRT status code.
742 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
743 * @param MemObjToMap The object to be map.
744 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
745 * @param uAlignment The alignment of the reserved memory.
746 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
747 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
748 * @param R0Process The process to map the memory into. NIL_RTR0PROCESS
749 * is an alias for the current one.
750 * @param offSub Where in the object to start mapping. If non-zero
751 * the value must be page aligned and cbSub must be
752 * non-zero as well.
753 * @param cbSub The size of the part of the object to be mapped. If
754 * zero the entire object is mapped. The value must be
755 * page aligned.
756 * @param pszTag Allocation tag used for statistics and such.
757 */
758RTR0DECL(int) RTR0MemObjMapUserExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
759 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag);
760
761/**
762 * Change the page level protection of one or more pages in a memory object.
763 *
764 * @returns IPRT status code.
765 * @retval VERR_NOT_SUPPORTED if the OS doesn't provide any way to manipulate
766 * page level protection. The caller must handle this status code
767 * gracefully. (Note that it may also occur if the implementation is
768 * missing, in which case just go ahead and implement it.)
769 *
770 * @param hMemObj Memory object handle.
771 * @param offSub Offset into the memory object. Must be page aligned.
772 * @param cbSub Number of bytes to change the protection of. Must be
773 * page aligned.
774 * @param fProt Combination of RTMEM_PROT_* flags.
775 */
776RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt);
777
778#endif /* IN_RING0 */
779
780/** @} */
781
782RT_C_DECLS_END
783
784#endif /* !IPRT_INCLUDED_memobj_h */
785
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette