VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 28112

Last change on this file since 28112 was 26994, checked in by vboxsync, 15 years ago

r0drv: comment typo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 36.7 KB
Line 
1/* $Revision: 26994 $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
36#include <iprt/memobj.h>
37#include "internal/iprt.h"
38
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/err.h>
43#include <iprt/log.h>
44#include <iprt/mp.h>
45#include <iprt/param.h>
46#include <iprt/process.h>
47#include <iprt/thread.h>
48
49#include "internal/memobj.h"
50
51
52/**
53 * Internal function for allocating a new memory object.
54 *
55 * @returns The allocated and initialized handle.
56 * @param cbSelf The size of the memory object handle. 0 mean default size.
57 * @param enmType The memory object type.
58 * @param pv The memory object mapping.
59 * @param cb The size of the memory object.
60 */
61PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
62{
63 PRTR0MEMOBJINTERNAL pNew;
64
65 /* validate the size */
66 if (!cbSelf)
67 cbSelf = sizeof(*pNew);
68 Assert(cbSelf >= sizeof(*pNew));
69 Assert(cbSelf == (uint32_t)cbSelf);
70
71 /*
72 * Allocate and initialize the object.
73 */
74 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
75 if (pNew)
76 {
77 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
78 pNew->cbSelf = (uint32_t)cbSelf;
79 pNew->enmType = enmType;
80 pNew->fFlags = 0;
81 pNew->cb = cb;
82 pNew->pv = pv;
83 }
84 return pNew;
85}
86
87
88/**
89 * Deletes an incomplete memory object.
90 *
91 * This is for cleaning up after failures during object creation.
92 *
93 * @param pMem The incomplete memory object to delete.
94 */
95void rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
96{
97 if (pMem)
98 {
99 ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
100 pMem->enmType = RTR0MEMOBJTYPE_END;
101 RTMemFree(pMem);
102 }
103}
104
105
106/**
107 * Links a mapping object to a primary object.
108 *
109 * @returns IPRT status code.
110 * @retval VINF_SUCCESS on success.
111 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
112 * @param pParent The parent (primary) memory object.
113 * @param pChild The child (mapping) memory object.
114 */
115static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
116{
117 uint32_t i;
118
119 /* sanity */
120 Assert(rtR0MemObjIsMapping(pChild));
121 Assert(!rtR0MemObjIsMapping(pParent));
122
123 /* expand the array? */
124 i = pParent->uRel.Parent.cMappings;
125 if (i >= pParent->uRel.Parent.cMappingsAllocated)
126 {
127 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
128 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
129 if (!pv)
130 return VERR_NO_MEMORY;
131 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
132 pParent->uRel.Parent.cMappingsAllocated = i + 32;
133 Assert(i == pParent->uRel.Parent.cMappings);
134 }
135
136 /* do the linking. */
137 pParent->uRel.Parent.papMappings[i] = pChild;
138 pParent->uRel.Parent.cMappings++;
139 pChild->uRel.Child.pParent = pParent;
140
141 return VINF_SUCCESS;
142}
143
144
145/**
146 * Checks if this is mapping or not.
147 *
148 * @returns true if it's a mapping, otherwise false.
149 * @param MemObj The ring-0 memory object handle.
150 */
151RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
152{
153 /* Validate the object handle. */
154 PRTR0MEMOBJINTERNAL pMem;
155 AssertPtrReturn(MemObj, false);
156 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
157 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
158 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
159
160 /* hand it on to the inlined worker. */
161 return rtR0MemObjIsMapping(pMem);
162}
163RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
164
165
166/**
167 * Gets the address of a ring-0 memory object.
168 *
169 * @returns The address of the memory object.
170 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
171 * @param MemObj The ring-0 memory object handle.
172 */
173RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
174{
175 /* Validate the object handle. */
176 PRTR0MEMOBJINTERNAL pMem;
177 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
178 return NULL;
179 AssertPtrReturn(MemObj, NULL);
180 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
181 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
182 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
183
184 /* return the mapping address. */
185 return pMem->pv;
186}
187RT_EXPORT_SYMBOL(RTR0MemObjAddress);
188
189
190/**
191 * Gets the ring-3 address of a ring-0 memory object.
192 *
193 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
194 * locked user memory, reserved user address space and user mappings. This API should
195 * not be used on any other objects.
196 *
197 * @returns The address of the memory object.
198 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
199 * Strict builds will assert in both cases.
200 * @param MemObj The ring-0 memory object handle.
201 */
202RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
203{
204 PRTR0MEMOBJINTERNAL pMem;
205
206 /* Validate the object handle. */
207 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
208 return NIL_RTR3PTR;
209 AssertPtrReturn(MemObj, NIL_RTR3PTR);
210 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
211 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
212 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
213 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
214 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
215 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
216 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
217 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
218 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
219 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
220 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
221 return NIL_RTR3PTR;
222
223 /* return the mapping address. */
224 return (RTR3PTR)pMem->pv;
225}
226RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
227
228
229/**
230 * Gets the size of a ring-0 memory object.
231 *
232 * @returns The address of the memory object.
233 * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
234 * @param MemObj The ring-0 memory object handle.
235 */
236RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
237{
238 PRTR0MEMOBJINTERNAL pMem;
239
240 /* Validate the object handle. */
241 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
242 return 0;
243 AssertPtrReturn(MemObj, 0);
244 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
245 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
246 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
247
248 /* return the size. */
249 return pMem->cb;
250}
251RT_EXPORT_SYMBOL(RTR0MemObjSize);
252
253
254/**
255 * Get the physical address of an page in the memory object.
256 *
257 * @returns The physical address.
258 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
259 * @returns NIL_RTHCPHYS if the iPage is out of range.
260 * @returns NIL_RTHCPHYS if the object handle isn't valid.
261 * @param MemObj The ring-0 memory object handle.
262 * @param iPage The page number within the object.
263 */
264RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
265{
266 /* Validate the object handle. */
267 PRTR0MEMOBJINTERNAL pMem;
268 size_t cPages;
269 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
270 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
271 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
272 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
273 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
274 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
275 cPages = (pMem->cb >> PAGE_SHIFT);
276 if (iPage >= cPages)
277 {
278 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
279 if (iPage == cPages)
280 return NIL_RTHCPHYS;
281 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
282 }
283
284 /*
285 * We know the address of physically contiguous allocations and mappings.
286 */
287 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
288 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
289 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
290 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
291
292 /*
293 * Do the job.
294 */
295 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
296}
297RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
298
299
300/**
301 * Frees a ring-0 memory object.
302 *
303 * @returns IPRT status code.
304 * @retval VERR_INVALID_HANDLE if
305 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
306 * @param fFreeMappings Whether or not to free mappings of the object.
307 */
308RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
309{
310 /*
311 * Validate the object handle.
312 */
313 PRTR0MEMOBJINTERNAL pMem;
314 int rc;
315
316 if (MemObj == NIL_RTR0MEMOBJ)
317 return VINF_SUCCESS;
318 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
319 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
320 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
321 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
322 RT_ASSERT_PREEMPTIBLE();
323
324 /*
325 * Deal with mapings according to fFreeMappings.
326 */
327 if ( !rtR0MemObjIsMapping(pMem)
328 && pMem->uRel.Parent.cMappings > 0)
329 {
330 /* fail if not requested to free mappings. */
331 if (!fFreeMappings)
332 return VERR_MEMORY_BUSY;
333
334 while (pMem->uRel.Parent.cMappings > 0)
335 {
336 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
337 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
338
339 /* sanity checks. */
340 AssertPtr(pChild);
341 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
342 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
343 AssertFatal(rtR0MemObjIsMapping(pChild));
344
345 /* free the mapping. */
346 rc = rtR0MemObjNativeFree(pChild);
347 if (RT_FAILURE(rc))
348 {
349 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
350 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
351 return rc;
352 }
353 }
354 }
355
356 /*
357 * Free this object.
358 */
359 rc = rtR0MemObjNativeFree(pMem);
360 if (RT_SUCCESS(rc))
361 {
362 /*
363 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
364 */
365 if (rtR0MemObjIsMapping(pMem))
366 {
367 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
368 uint32_t i;
369
370 /* sanity checks */
371 AssertPtr(pParent);
372 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
373 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
374 AssertFatal(!rtR0MemObjIsMapping(pParent));
375 AssertFatal(pParent->uRel.Parent.cMappings > 0);
376 AssertPtr(pParent->uRel.Parent.papMappings);
377
378 /* locate and remove from the array of mappings. */
379 i = pParent->uRel.Parent.cMappings;
380 while (i-- > 0)
381 {
382 if (pParent->uRel.Parent.papMappings[i] == pMem)
383 {
384 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
385 break;
386 }
387 }
388 Assert(i != UINT32_MAX);
389 }
390 else
391 Assert(pMem->uRel.Parent.cMappings == 0);
392
393 /*
394 * Finally, destroy the handle.
395 */
396 pMem->u32Magic++;
397 pMem->enmType = RTR0MEMOBJTYPE_END;
398 if (!rtR0MemObjIsMapping(pMem))
399 RTMemFree(pMem->uRel.Parent.papMappings);
400 RTMemFree(pMem);
401 }
402 else
403 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
404 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
405 return rc;
406}
407RT_EXPORT_SYMBOL(RTR0MemObjFree);
408
409
410
411/**
412 * Allocates page aligned virtual kernel memory.
413 *
414 * The memory is taken from a non paged (= fixed physical memory backing) pool.
415 *
416 * @returns IPRT status code.
417 * @param pMemObj Where to store the ring-0 memory object handle.
418 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
419 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
420 */
421RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
422{
423 /* sanity checks. */
424 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
425 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
426 *pMemObj = NIL_RTR0MEMOBJ;
427 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
428 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
429 RT_ASSERT_PREEMPTIBLE();
430
431 /* do the allocation. */
432 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
433}
434RT_EXPORT_SYMBOL(RTR0MemObjAllocPage);
435
436
437/**
438 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
439 *
440 * The physical memory backing the allocation is fixed.
441 *
442 * @returns IPRT status code.
443 * @param pMemObj Where to store the ring-0 memory object handle.
444 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
445 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
446 */
447RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
448{
449 /* sanity checks. */
450 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
451 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
452 *pMemObj = NIL_RTR0MEMOBJ;
453 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
454 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
455 RT_ASSERT_PREEMPTIBLE();
456
457 /* do the allocation. */
458 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
459}
460RT_EXPORT_SYMBOL(RTR0MemObjAllocLow);
461
462
463/**
464 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
465 *
466 * The physical memory backing the allocation is fixed.
467 *
468 * @returns IPRT status code.
469 * @param pMemObj Where to store the ring-0 memory object handle.
470 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
471 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
472 */
473RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
474{
475 /* sanity checks. */
476 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
477 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
478 *pMemObj = NIL_RTR0MEMOBJ;
479 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
480 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
481 RT_ASSERT_PREEMPTIBLE();
482
483 /* do the allocation. */
484 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
485}
486RT_EXPORT_SYMBOL(RTR0MemObjAllocCont);
487
488
489/**
490 * Locks a range of user virtual memory.
491 *
492 * @returns IPRT status code.
493 * @param pMemObj Where to store the ring-0 memory object handle.
494 * @param R3Ptr User virtual address. This is rounded down to a page
495 * boundrary.
496 * @param cb Number of bytes to lock. This is rounded up to
497 * nearest page boundrary.
498 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
499 * and RTMEM_PROT_WRITE.
500 * @param R0Process The process to lock pages in. NIL_RTR0PROCESS is an
501 * alias for the current one.
502 *
503 * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
504 * down address.
505 *
506 * @remarks Linux: This API requires that the memory begin locked is in a memory
507 * mapping that is not required in any forked off child process. This
508 * is not intented as permanent restriction, feel free to help out
509 * lifting it.
510 */
511RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
512{
513 /* sanity checks. */
514 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
515 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
516 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
517 *pMemObj = NIL_RTR0MEMOBJ;
518 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
519 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
520 if (R0Process == NIL_RTR0PROCESS)
521 R0Process = RTR0ProcHandleSelf();
522 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
523 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
524 RT_ASSERT_PREEMPTIBLE();
525
526 /* do the locking. */
527 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process);
528}
529RT_EXPORT_SYMBOL(RTR0MemObjLockUser);
530
531
532/**
533 * Locks a range of kernel virtual memory.
534 *
535 * @returns IPRT status code.
536 * @param pMemObj Where to store the ring-0 memory object handle.
537 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
538 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
539 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
540 * and RTMEM_PROT_WRITE.
541 *
542 * @remark RTR0MemGetAddress() will return the rounded down address.
543 */
544RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess)
545{
546 /* sanity checks. */
547 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
548 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
549 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
550 *pMemObj = NIL_RTR0MEMOBJ;
551 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
552 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
553 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
554 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
555 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
556 RT_ASSERT_PREEMPTIBLE();
557
558 /* do the allocation. */
559 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess);
560}
561RT_EXPORT_SYMBOL(RTR0MemObjLockKernel);
562
563
564/**
565 * Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
566 *
567 * @returns IPRT status code.
568 * @param pMemObj Where to store the ring-0 memory object handle.
569 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
570 * @param PhysHighest The highest permittable address (inclusive).
571 * Pass NIL_RTHCPHYS if any address is acceptable.
572 */
573RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
574{
575 /* sanity checks. */
576 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
577 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
578 *pMemObj = NIL_RTR0MEMOBJ;
579 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
580 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
581 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
582 RT_ASSERT_PREEMPTIBLE();
583
584 /* do the allocation. */
585 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */);
586}
587RT_EXPORT_SYMBOL(RTR0MemObjAllocPhys);
588
589
590/**
591 * Allocates contiguous physical memory without (necessarily) any kernel mapping.
592 *
593 * @returns IPRT status code.
594 * @param pMemObj Where to store the ring-0 memory object handle.
595 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
596 * @param PhysHighest The highest permittable address (inclusive).
597 * Pass NIL_RTHCPHYS if any address is acceptable.
598 * @param uAlignment The alignment of the physical memory to allocate.
599 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M, _4M and _1G.
600 */
601RTR0DECL(int) RTR0MemObjAllocPhysEx(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
602{
603 /* sanity checks. */
604 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
605 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
606 *pMemObj = NIL_RTR0MEMOBJ;
607 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
608 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
609 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
610 if (uAlignment == 0)
611 uAlignment = PAGE_SIZE;
612 AssertReturn( uAlignment == PAGE_SIZE
613 || uAlignment == _2M
614 || uAlignment == _4M
615 || uAlignment == _1G,
616 VERR_INVALID_PARAMETER);
617#if HC_ARCH_BITS == 32
618 /* Memory allocated in this way is typically mapped into kernel space as well; simply
619 don't allow this on 32 bits hosts as the kernel space is too crowded already. */
620 if (uAlignment != PAGE_SIZE)
621 return VERR_NOT_SUPPORTED;
622#endif
623 RT_ASSERT_PREEMPTIBLE();
624
625 /* do the allocation. */
626 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment);
627}
628RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysEx);
629
630
631/**
632 * Allocates non-contiguous page aligned physical memory without (necessarily) any kernel mapping.
633 *
634 * @returns IPRT status code.
635 * @param pMemObj Where to store the ring-0 memory object handle.
636 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
637 * @param PhysHighest The highest permittable address (inclusive).
638 * Pass NIL_RTHCPHYS if any address is acceptable.
639 */
640RTR0DECL(int) RTR0MemObjAllocPhysNC(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
641{
642 /* sanity checks. */
643 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
644 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
645 *pMemObj = NIL_RTR0MEMOBJ;
646 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
647 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
648 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
649 RT_ASSERT_PREEMPTIBLE();
650
651 /* do the allocation. */
652 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest);
653}
654RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNC);
655
656
657/**
658 * Creates a page aligned, contiguous, physical memory object.
659 *
660 * No physical memory is allocated, we trust you do know what you're doing.
661 *
662 * @returns IPRT status code.
663 * @param pMemObj Where to store the ring-0 memory object handle.
664 * @param Phys The physical address to start at. This is rounded down to the
665 * nearest page boundrary.
666 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
667 */
668RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
669{
670 /* sanity checks. */
671 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
672 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
673 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
674 *pMemObj = NIL_RTR0MEMOBJ;
675 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
676 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
677 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
678 RT_ASSERT_PREEMPTIBLE();
679
680 /* do the allocation. */
681 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
682}
683RT_EXPORT_SYMBOL(RTR0MemObjEnterPhys);
684
685
686/**
687 * Reserves kernel virtual address space.
688 *
689 * @returns IPRT status code.
690 * @param pMemObj Where to store the ring-0 memory object handle.
691 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
692 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
693 * @param uAlignment The alignment of the reserved memory.
694 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
695 */
696RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
697{
698 /* sanity checks. */
699 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
700 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
701 *pMemObj = NIL_RTR0MEMOBJ;
702 if (uAlignment == 0)
703 uAlignment = PAGE_SIZE;
704 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
705 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
706 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
707 if (pvFixed != (void *)-1)
708 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
709 RT_ASSERT_PREEMPTIBLE();
710
711 /* do the reservation. */
712 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
713}
714RT_EXPORT_SYMBOL(RTR0MemObjReserveKernel);
715
716
717/**
718 * Reserves user virtual address space in the current process.
719 *
720 * @returns IPRT status code.
721 * @param pMemObj Where to store the ring-0 memory object handle.
722 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
723 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
724 * @param uAlignment The alignment of the reserved memory.
725 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
726 * @param R0Process The process to reserve the memory in. NIL_RTR0PROCESS is an alias for the current one.
727 */
728RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
729{
730 /* sanity checks. */
731 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
732 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
733 *pMemObj = NIL_RTR0MEMOBJ;
734 if (uAlignment == 0)
735 uAlignment = PAGE_SIZE;
736 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
737 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
738 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
739 if (R3PtrFixed != (RTR3PTR)-1)
740 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
741 if (R0Process == NIL_RTR0PROCESS)
742 R0Process = RTR0ProcHandleSelf();
743 RT_ASSERT_PREEMPTIBLE();
744
745 /* do the reservation. */
746 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process);
747}
748RT_EXPORT_SYMBOL(RTR0MemObjReserveUser);
749
750
751/**
752 * Maps a memory object into kernel virtual address space.
753 *
754 * @returns IPRT status code.
755 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
756 * @param MemObjToMap The object to be map.
757 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
758 * @param uAlignment The alignment of the reserved memory.
759 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
760 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
761 */
762RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
763{
764 return RTR0MemObjMapKernelEx(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0);
765}
766RT_EXPORT_SYMBOL(RTR0MemObjMapKernel);
767
768
769/**
770 * Maps a memory object into kernel virtual address space.
771 *
772 * The ability to map subsections of the object into kernel space is currently
773 * not implemented on all platforms. All/Most of platforms supports mapping the
774 * whole object into kernel space.
775 *
776 * @returns IPRT status code.
777 * @retval VERR_NOT_SUPPORTED if it's not possible to map a subsection of a
778 * memory object on this platform. When you hit this, try implement it.
779 *
780 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
781 * @param MemObjToMap The object to be map.
782 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
783 * @param uAlignment The alignment of the reserved memory.
784 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
785 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
786 * @param offSub Where in the object to start mapping. If non-zero
787 * the value must be page aligned and cbSub must be
788 * non-zero as well.
789 * @param cbSub The size of the part of the object to be mapped. If
790 * zero the entire object is mapped. The value must be
791 * page aligned.
792 */
793RTR0DECL(int) RTR0MemObjMapKernelEx(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
794 unsigned fProt, size_t offSub, size_t cbSub)
795{
796 PRTR0MEMOBJINTERNAL pMemToMap;
797 PRTR0MEMOBJINTERNAL pNew;
798 int rc;
799
800 /* sanity checks. */
801 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
802 *pMemObj = NIL_RTR0MEMOBJ;
803 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
804 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
805 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
806 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
807 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
808 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
809 if (uAlignment == 0)
810 uAlignment = PAGE_SIZE;
811 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
812 if (pvFixed != (void *)-1)
813 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
814 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
815 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
816 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
817 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
818 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
819 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
820 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
821 RT_ASSERT_PREEMPTIBLE();
822
823 /* adjust the request to simplify the native code. */
824 if (offSub == 0 && cbSub == pMemToMap->cb)
825 cbSub = 0;
826
827 /* do the mapping. */
828 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub);
829 if (RT_SUCCESS(rc))
830 {
831 /* link it. */
832 rc = rtR0MemObjLink(pMemToMap, pNew);
833 if (RT_SUCCESS(rc))
834 *pMemObj = pNew;
835 else
836 {
837 /* damn, out of memory. bail out. */
838 int rc2 = rtR0MemObjNativeFree(pNew);
839 AssertRC(rc2);
840 pNew->u32Magic++;
841 pNew->enmType = RTR0MEMOBJTYPE_END;
842 RTMemFree(pNew);
843 }
844 }
845
846 return rc;
847}
848RT_EXPORT_SYMBOL(RTR0MemObjMapKernelEx);
849
850
851/**
852 * Maps a memory object into user virtual address space in the current process.
853 *
854 * @returns IPRT status code.
855 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
856 * @param MemObjToMap The object to be map.
857 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
858 * @param uAlignment The alignment of the reserved memory.
859 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
860 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
861 * @param R0Process The process to map the memory into. NIL_RTR0PROCESS is an alias for the current one.
862 */
863RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
864{
865 /* sanity checks. */
866 PRTR0MEMOBJINTERNAL pMemToMap;
867 PRTR0MEMOBJINTERNAL pNew;
868 int rc;
869 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
870 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
871 *pMemObj = NIL_RTR0MEMOBJ;
872 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
873 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
874 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
875 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
876 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
877 if (uAlignment == 0)
878 uAlignment = PAGE_SIZE;
879 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
880 if (R3PtrFixed != (RTR3PTR)-1)
881 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
882 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
883 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
884 if (R0Process == NIL_RTR0PROCESS)
885 R0Process = RTR0ProcHandleSelf();
886 RT_ASSERT_PREEMPTIBLE();
887
888 /* do the mapping. */
889 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process);
890 if (RT_SUCCESS(rc))
891 {
892 /* link it. */
893 rc = rtR0MemObjLink(pMemToMap, pNew);
894 if (RT_SUCCESS(rc))
895 *pMemObj = pNew;
896 else
897 {
898 /* damn, out of memory. bail out. */
899 int rc2 = rtR0MemObjNativeFree(pNew);
900 AssertRC(rc2);
901 pNew->u32Magic++;
902 pNew->enmType = RTR0MEMOBJTYPE_END;
903 RTMemFree(pNew);
904 }
905 }
906
907 return rc;
908}
909RT_EXPORT_SYMBOL(RTR0MemObjMapUser);
910
911
912RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
913{
914 PRTR0MEMOBJINTERNAL pMemObj;
915 int rc;
916
917 /* sanity checks. */
918 pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
919 AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
920 AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
921 AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
922 AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
923 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
924 AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
925 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
926 AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
927 AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
928 AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
929 RT_ASSERT_PREEMPTIBLE();
930
931 /* do the job */
932 rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
933 if (RT_SUCCESS(rc))
934 pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
935
936 return rc;
937}
938RT_EXPORT_SYMBOL(RTR0MemObjProtect);
939
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette