VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 92250

Last change on this file since 92250 was 92250, checked in by vboxsync, 3 years ago

IPRT/RTR0MemObj: Added RTR0MemObjWasZeroInitialized and a couple of flags with which the backend can feed it the necessary info. It would be good to try avoid zeroing memory twice when we can. [fix] bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev Revision
File size: 33.8 KB
Line 
1/* $Id: memobj-r0drv.cpp 92250 2021-11-06 15:58:20Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP RTLOGGROUP_DEFAULT /// @todo RTLOGGROUP_MEM
32#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
33#include <iprt/memobj.h>
34#include "internal/iprt.h"
35
36#include <iprt/alloc.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/log.h>
41#include <iprt/mp.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include <iprt/thread.h>
45
46#include "internal/memobj.h"
47
48
49/**
50 * Internal function for allocating a new memory object.
51 *
52 * @returns The allocated and initialized handle.
53 * @param cbSelf The size of the memory object handle. 0 mean default size.
54 * @param enmType The memory object type.
55 * @param pv The memory object mapping.
56 * @param cb The size of the memory object.
57 * @param pszTag The tag string.
58 */
59DECLHIDDEN(PRTR0MEMOBJINTERNAL) rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb, const char *pszTag)
60{
61 PRTR0MEMOBJINTERNAL pNew;
62
63 /* validate the size */
64 if (!cbSelf)
65 cbSelf = sizeof(*pNew);
66 Assert(cbSelf >= sizeof(*pNew));
67 Assert(cbSelf == (uint32_t)cbSelf);
68 AssertMsg(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, ("%#zx\n", cb));
69
70 /*
71 * Allocate and initialize the object.
72 */
73 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
74 if (pNew)
75 {
76 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
77 pNew->cbSelf = (uint32_t)cbSelf;
78 pNew->enmType = enmType;
79 pNew->fFlags = 0;
80 pNew->cb = cb;
81 pNew->pv = pv;
82#ifdef DEBUG
83 pNew->pszTag = pszTag;
84#else
85 RT_NOREF_PV(pszTag);
86#endif
87 }
88 return pNew;
89}
90
91
92/**
93 * Deletes an incomplete memory object.
94 *
95 * This is for cleaning up after failures during object creation.
96 *
97 * @param pMem The incomplete memory object to delete.
98 */
99DECLHIDDEN(void) rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
100{
101 if (pMem)
102 {
103 ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
104 pMem->enmType = RTR0MEMOBJTYPE_END;
105 RTMemFree(pMem);
106 }
107}
108
109
110/**
111 * Links a mapping object to a primary object.
112 *
113 * @returns IPRT status code.
114 * @retval VINF_SUCCESS on success.
115 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
116 * @param pParent The parent (primary) memory object.
117 * @param pChild The child (mapping) memory object.
118 */
119static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
120{
121 uint32_t i;
122
123 /* sanity */
124 Assert(rtR0MemObjIsMapping(pChild));
125 Assert(!rtR0MemObjIsMapping(pParent));
126
127 /* expand the array? */
128 i = pParent->uRel.Parent.cMappings;
129 if (i >= pParent->uRel.Parent.cMappingsAllocated)
130 {
131 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
132 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
133 if (!pv)
134 return VERR_NO_MEMORY;
135 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
136 pParent->uRel.Parent.cMappingsAllocated = i + 32;
137 Assert(i == pParent->uRel.Parent.cMappings);
138 }
139
140 /* do the linking. */
141 pParent->uRel.Parent.papMappings[i] = pChild;
142 pParent->uRel.Parent.cMappings++;
143 pChild->uRel.Child.pParent = pParent;
144
145 return VINF_SUCCESS;
146}
147
148
149/**
150 * Checks if this is mapping or not.
151 *
152 * @returns true if it's a mapping, otherwise false.
153 * @param MemObj The ring-0 memory object handle.
154 */
155RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
156{
157 /* Validate the object handle. */
158 PRTR0MEMOBJINTERNAL pMem;
159 AssertPtrReturn(MemObj, false);
160 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
161 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
162 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
163
164 /* hand it on to the inlined worker. */
165 return rtR0MemObjIsMapping(pMem);
166}
167RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
168
169
170/**
171 * Gets the address of a ring-0 memory object.
172 *
173 * @returns The address of the memory object.
174 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
175 * @param MemObj The ring-0 memory object handle.
176 */
177RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
178{
179 /* Validate the object handle. */
180 PRTR0MEMOBJINTERNAL pMem;
181 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
182 return NULL;
183 AssertPtrReturn(MemObj, NULL);
184 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
185 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
186 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
187
188 /* return the mapping address. */
189 return pMem->pv;
190}
191RT_EXPORT_SYMBOL(RTR0MemObjAddress);
192
193
194/**
195 * Gets the ring-3 address of a ring-0 memory object.
196 *
197 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
198 * locked user memory, reserved user address space and user mappings. This API should
199 * not be used on any other objects.
200 *
201 * @returns The address of the memory object.
202 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
203 * Strict builds will assert in both cases.
204 * @param MemObj The ring-0 memory object handle.
205 */
206RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
207{
208 PRTR0MEMOBJINTERNAL pMem;
209
210 /* Validate the object handle. */
211 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
212 return NIL_RTR3PTR;
213 AssertPtrReturn(MemObj, NIL_RTR3PTR);
214 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
215 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
216 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
217 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
218 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
219 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
220 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
221 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
222 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
223 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
224 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
225 return NIL_RTR3PTR;
226
227 /* return the mapping address. */
228 return (RTR3PTR)pMem->pv;
229}
230RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
231
232
233/**
234 * Gets the size of a ring-0 memory object.
235 *
236 * The returned value may differ from the one specified to the API creating the
237 * object because of alignment adjustments. The minimal alignment currently
238 * employed by any API is PAGE_SIZE, so the result can safely be shifted by
239 * PAGE_SHIFT to calculate a page count.
240 *
241 * @returns The object size.
242 * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
243 * @param MemObj The ring-0 memory object handle.
244 */
245RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
246{
247 PRTR0MEMOBJINTERNAL pMem;
248
249 /* Validate the object handle. */
250 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
251 return 0;
252 AssertPtrReturn(MemObj, 0);
253 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
254 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
255 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
256 AssertMsg(RT_ALIGN_Z(pMem->cb, PAGE_SIZE) == pMem->cb, ("%#zx\n", pMem->cb));
257
258 /* return the size. */
259 return pMem->cb;
260}
261RT_EXPORT_SYMBOL(RTR0MemObjSize);
262
263
264/**
265 * Get the physical address of an page in the memory object.
266 *
267 * @returns The physical address.
268 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
269 * @returns NIL_RTHCPHYS if the iPage is out of range.
270 * @returns NIL_RTHCPHYS if the object handle isn't valid.
271 * @param MemObj The ring-0 memory object handle.
272 * @param iPage The page number within the object.
273 */
274/* Work around gcc bug 55940 */
275#if defined(__GNUC__) && defined(RT_ARCH_X86) && (__GNUC__ * 100 + __GNUC_MINOR__) == 407
276 __attribute__((__optimize__ ("no-shrink-wrap")))
277#endif
278RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
279{
280 /* Validate the object handle. */
281 PRTR0MEMOBJINTERNAL pMem;
282 size_t cPages;
283 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
284 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
285 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
286 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
287 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
288 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
289 cPages = (pMem->cb >> PAGE_SHIFT);
290 if (iPage >= cPages)
291 {
292 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
293 if (iPage == cPages)
294 return NIL_RTHCPHYS;
295 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
296 }
297
298 /*
299 * We know the address of physically contiguous allocations and mappings.
300 */
301 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
302 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
303 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
304 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
305
306 /*
307 * Do the job.
308 */
309 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
310}
311RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
312
313
314/**
315 * Checks whether the allocation was zero initialized or not.
316 *
317 * This only works on allocations. It is not meaningful for mappings, reserved
318 * memory and entered physical address, and will return false for these.
319 *
320 * @returns true if the allocation was initialized to zero at allocation time,
321 * false if not or query not meaningful to the object type.
322 * @param hMemObj The ring-0 memory object to be freed.
323 *
324 * @remarks It can be expected that memory allocated in the same fashion will
325 * have the same initialization state. So, if this returns true for
326 * one allocation it will return true for all other similarly made
327 * allocations.
328 */
329RTR0DECL(bool) RTR0MemObjWasZeroInitialized(RTR0MEMOBJ hMemObj)
330{
331 PRTR0MEMOBJINTERNAL pMem;
332
333 /* Validate the object handle. */
334 if (RT_UNLIKELY(hMemObj == NIL_RTR0MEMOBJ))
335 return false;
336 AssertPtrReturn(hMemObj, false);
337 pMem = (PRTR0MEMOBJINTERNAL)hMemObj;
338 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
339 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
340 Assert( (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
341 != (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC));
342
343 /* return the alloc init state. */
344 return (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
345 == RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
346}
347RT_EXPORT_SYMBOL(RTR0MemObjWasZeroInitialized);
348
349
350/**
351 * Frees a ring-0 memory object.
352 *
353 * @returns IPRT status code.
354 * @retval VERR_INVALID_HANDLE if
355 * @param MemObj The ring-0 memory object to be freed. NIL is
356 * accepted.
357 * @param fFreeMappings Whether or not to free mappings of the object.
358 */
359RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
360{
361 /*
362 * Validate the object handle.
363 */
364 PRTR0MEMOBJINTERNAL pMem;
365 int rc;
366
367 if (MemObj == NIL_RTR0MEMOBJ)
368 return VINF_SUCCESS;
369 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
370 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
371 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
372 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
373 RT_ASSERT_PREEMPTIBLE();
374
375 /*
376 * Deal with mappings according to fFreeMappings.
377 */
378 if ( !rtR0MemObjIsMapping(pMem)
379 && pMem->uRel.Parent.cMappings > 0)
380 {
381 /* fail if not requested to free mappings. */
382 if (!fFreeMappings)
383 return VERR_MEMORY_BUSY;
384
385 while (pMem->uRel.Parent.cMappings > 0)
386 {
387 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
388 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
389
390 /* sanity checks. */
391 AssertPtr(pChild);
392 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
393 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
394 AssertFatal(rtR0MemObjIsMapping(pChild));
395
396 /* free the mapping. */
397 rc = rtR0MemObjNativeFree(pChild);
398 if (RT_FAILURE(rc))
399 {
400 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
401 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
402 return rc;
403 }
404
405 pChild->u32Magic++;
406 pChild->enmType = RTR0MEMOBJTYPE_END;
407 RTMemFree(pChild);
408 }
409 }
410
411 /*
412 * Free this object.
413 */
414 rc = rtR0MemObjNativeFree(pMem);
415 if (RT_SUCCESS(rc))
416 {
417 /*
418 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
419 */
420 if (rtR0MemObjIsMapping(pMem))
421 {
422 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
423 uint32_t i;
424
425 /* sanity checks */
426 AssertPtr(pParent);
427 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
428 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
429 AssertFatal(!rtR0MemObjIsMapping(pParent));
430 AssertFatal(pParent->uRel.Parent.cMappings > 0);
431 AssertPtr(pParent->uRel.Parent.papMappings);
432
433 /* locate and remove from the array of mappings. */
434 i = pParent->uRel.Parent.cMappings;
435 while (i-- > 0)
436 {
437 if (pParent->uRel.Parent.papMappings[i] == pMem)
438 {
439 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
440 break;
441 }
442 }
443 Assert(i != UINT32_MAX);
444 }
445 else
446 Assert(pMem->uRel.Parent.cMappings == 0);
447
448 /*
449 * Finally, destroy the handle.
450 */
451 pMem->u32Magic++;
452 pMem->enmType = RTR0MEMOBJTYPE_END;
453 if (!rtR0MemObjIsMapping(pMem))
454 RTMemFree(pMem->uRel.Parent.papMappings);
455 RTMemFree(pMem);
456 }
457 else
458 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
459 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
460 return rc;
461}
462RT_EXPORT_SYMBOL(RTR0MemObjFree);
463
464
465
466RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
467{
468 /* sanity checks. */
469 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
470 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
471 *pMemObj = NIL_RTR0MEMOBJ;
472 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
473 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
474 RT_ASSERT_PREEMPTIBLE();
475
476 /* do the allocation. */
477 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable, pszTag);
478}
479RT_EXPORT_SYMBOL(RTR0MemObjAllocPageTag);
480
481
482RTR0DECL(int) RTR0MemObjAllocLargeTag(PRTR0MEMOBJ pMemObj, size_t cb, size_t cbLargePage, uint32_t fFlags, const char *pszTag)
483{
484 /* sanity checks. */
485 const size_t cbAligned = RT_ALIGN_Z(cb, cbLargePage);
486 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
487 *pMemObj = NIL_RTR0MEMOBJ;
488#ifdef RT_ARCH_AMD64
489 AssertReturn(cbLargePage == _2M || cbLargePage == _1G, VERR_OUT_OF_RANGE);
490#elif defined(RT_ARCH_X86)
491 AssertReturn(cbLargePage == _2M || cbLargePage == _4M, VERR_OUT_OF_RANGE);
492#else
493 AssertReturn(RT_IS_POWER_OF_TWO(cbLargePage), VERR_NOT_POWER_OF_TWO);
494 AssertReturn(cbLargePage > PAGE_SIZE, VERR_OUT_OF_RANGE);
495#endif
496 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
497 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
498 AssertReturn(!(fFlags & ~RTMEMOBJ_ALLOC_LARGE_F_VALID_MASK), VERR_INVALID_PARAMETER);
499 RT_ASSERT_PREEMPTIBLE();
500
501 /* do the allocation. */
502 return rtR0MemObjNativeAllocLarge(pMemObj, cbAligned, cbLargePage, fFlags, pszTag);
503}
504RT_EXPORT_SYMBOL(RTR0MemObjAllocLargeTag);
505
506
507/**
508 * Fallback implementation of rtR0MemObjNativeAllocLarge and implements single
509 * page allocation using rtR0MemObjNativeAllocPhys.
510 */
511DECLHIDDEN(int) rtR0MemObjFallbackAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
512 const char *pszTag)
513{
514 RT_NOREF(pszTag, fFlags);
515 if (cb == cbLargePage)
516 return rtR0MemObjNativeAllocPhys(ppMem, cb, NIL_RTHCPHYS, cbLargePage, pszTag);
517 return VERR_NOT_SUPPORTED;
518}
519
520
521RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
522{
523 /* sanity checks. */
524 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
525 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
526 *pMemObj = NIL_RTR0MEMOBJ;
527 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
528 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
529 RT_ASSERT_PREEMPTIBLE();
530
531 /* do the allocation. */
532 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable, pszTag);
533}
534RT_EXPORT_SYMBOL(RTR0MemObjAllocLowTag);
535
536
537RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
538{
539 /* sanity checks. */
540 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
541 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
542 *pMemObj = NIL_RTR0MEMOBJ;
543 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
544 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
545 RT_ASSERT_PREEMPTIBLE();
546
547 /* do the allocation. */
548 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable, pszTag);
549}
550RT_EXPORT_SYMBOL(RTR0MemObjAllocContTag);
551
552
553RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
554 uint32_t fAccess, RTR0PROCESS R0Process, const char *pszTag)
555{
556 /* sanity checks. */
557 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
558 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
559 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
560 *pMemObj = NIL_RTR0MEMOBJ;
561 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
562 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
563 if (R0Process == NIL_RTR0PROCESS)
564 R0Process = RTR0ProcHandleSelf();
565 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
566 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
567 RT_ASSERT_PREEMPTIBLE();
568
569 /* do the locking. */
570 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process, pszTag);
571}
572RT_EXPORT_SYMBOL(RTR0MemObjLockUserTag);
573
574
575RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
576{
577 /* sanity checks. */
578 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
579 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
580 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
581 *pMemObj = NIL_RTR0MEMOBJ;
582 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
583 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
584 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
585 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
586 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
587 RT_ASSERT_PREEMPTIBLE();
588
589 /* do the allocation. */
590 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess, pszTag);
591}
592RT_EXPORT_SYMBOL(RTR0MemObjLockKernelTag);
593
594
595RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
596{
597 /* sanity checks. */
598 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
599 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
600 *pMemObj = NIL_RTR0MEMOBJ;
601 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
602 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
603 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
604 RT_ASSERT_PREEMPTIBLE();
605
606 /* do the allocation. */
607 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */, pszTag);
608}
609RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysTag);
610
611
612RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag)
613{
614 /* sanity checks. */
615 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
616 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
617 *pMemObj = NIL_RTR0MEMOBJ;
618 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
619 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
620 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
621 if (uAlignment == 0)
622 uAlignment = PAGE_SIZE;
623 AssertReturn( uAlignment == PAGE_SIZE
624 || uAlignment == _2M
625 || uAlignment == _4M
626 || uAlignment == _1G,
627 VERR_INVALID_PARAMETER);
628#if HC_ARCH_BITS == 32
629 /* Memory allocated in this way is typically mapped into kernel space as well; simply
630 don't allow this on 32 bits hosts as the kernel space is too crowded already. */
631 if (uAlignment != PAGE_SIZE)
632 return VERR_NOT_SUPPORTED;
633#endif
634 RT_ASSERT_PREEMPTIBLE();
635
636 /* do the allocation. */
637 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment, pszTag);
638}
639RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysExTag);
640
641
642RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
643{
644 /* sanity checks. */
645 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
646 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
647 *pMemObj = NIL_RTR0MEMOBJ;
648 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
649 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
650 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
651 RT_ASSERT_PREEMPTIBLE();
652
653 /* do the allocation. */
654 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest, pszTag);
655}
656RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNCTag);
657
658
659RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag)
660{
661 /* sanity checks. */
662 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
663 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
664 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
665 *pMemObj = NIL_RTR0MEMOBJ;
666 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
667 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
668 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
669 AssertReturn( uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE
670 || uCachePolicy == RTMEM_CACHE_POLICY_MMIO,
671 VERR_INVALID_PARAMETER);
672 RT_ASSERT_PREEMPTIBLE();
673
674 /* do the allocation. */
675 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned, uCachePolicy, pszTag);
676}
677RT_EXPORT_SYMBOL(RTR0MemObjEnterPhysTag);
678
679
680RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag)
681{
682 /* sanity checks. */
683 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
684 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
685 *pMemObj = NIL_RTR0MEMOBJ;
686 if (uAlignment == 0)
687 uAlignment = PAGE_SIZE;
688 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
689 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
690 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
691 if (pvFixed != (void *)-1)
692 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
693 RT_ASSERT_PREEMPTIBLE();
694
695 /* do the reservation. */
696 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment, pszTag);
697}
698RT_EXPORT_SYMBOL(RTR0MemObjReserveKernelTag);
699
700
701RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb,
702 size_t uAlignment, RTR0PROCESS R0Process, const char *pszTag)
703{
704 /* sanity checks. */
705 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
706 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
707 *pMemObj = NIL_RTR0MEMOBJ;
708 if (uAlignment == 0)
709 uAlignment = PAGE_SIZE;
710 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
711 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
712 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
713 if (R3PtrFixed != (RTR3PTR)-1)
714 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
715 if (R0Process == NIL_RTR0PROCESS)
716 R0Process = RTR0ProcHandleSelf();
717 RT_ASSERT_PREEMPTIBLE();
718
719 /* do the reservation. */
720 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process, pszTag);
721}
722RT_EXPORT_SYMBOL(RTR0MemObjReserveUserTag);
723
724
725RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
726 size_t uAlignment, unsigned fProt, const char *pszTag)
727{
728 return RTR0MemObjMapKernelExTag(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0, pszTag);
729}
730RT_EXPORT_SYMBOL(RTR0MemObjMapKernelTag);
731
732
733RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
734 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
735{
736 PRTR0MEMOBJINTERNAL pMemToMap;
737 PRTR0MEMOBJINTERNAL pNew;
738 int rc;
739
740 /* sanity checks. */
741 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
742 *pMemObj = NIL_RTR0MEMOBJ;
743 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
744 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
745 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
746 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
747 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
748 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
749 if (uAlignment == 0)
750 uAlignment = PAGE_SIZE;
751 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
752 if (pvFixed != (void *)-1)
753 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
754 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
755 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
756 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
757 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
758 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
759 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
760 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
761 RT_ASSERT_PREEMPTIBLE();
762
763 /* adjust the request to simplify the native code. */
764 if (offSub == 0 && cbSub == pMemToMap->cb)
765 cbSub = 0;
766
767 /* do the mapping. */
768 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub, pszTag);
769 if (RT_SUCCESS(rc))
770 {
771 /* link it. */
772 rc = rtR0MemObjLink(pMemToMap, pNew);
773 if (RT_SUCCESS(rc))
774 *pMemObj = pNew;
775 else
776 {
777 /* damn, out of memory. bail out. */
778 int rc2 = rtR0MemObjNativeFree(pNew);
779 AssertRC(rc2);
780 pNew->u32Magic++;
781 pNew->enmType = RTR0MEMOBJTYPE_END;
782 RTMemFree(pNew);
783 }
784 }
785
786 return rc;
787}
788RT_EXPORT_SYMBOL(RTR0MemObjMapKernelExTag);
789
790
791RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
792 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag)
793{
794 return RTR0MemObjMapUserExTag(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process, 0, 0, pszTag);
795}
796RT_EXPORT_SYMBOL(RTR0MemObjMapUserTag);
797
798
799RTR0DECL(int) RTR0MemObjMapUserExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
800 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
801{
802 /* sanity checks. */
803 PRTR0MEMOBJINTERNAL pMemToMap;
804 PRTR0MEMOBJINTERNAL pNew;
805 int rc;
806 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
807 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
808 *pMemObj = NIL_RTR0MEMOBJ;
809 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
810 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
811 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
812 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
813 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
814 if (uAlignment == 0)
815 uAlignment = PAGE_SIZE;
816 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
817 if (R3PtrFixed != (RTR3PTR)-1)
818 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
819 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
820 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
821 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
822 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
823 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
824 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
825 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
826 if (R0Process == NIL_RTR0PROCESS)
827 R0Process = RTR0ProcHandleSelf();
828 RT_ASSERT_PREEMPTIBLE();
829
830 /* adjust the request to simplify the native code. */
831 if (offSub == 0 && cbSub == pMemToMap->cb)
832 cbSub = 0;
833
834 /* do the mapping. */
835 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
836 if (RT_SUCCESS(rc))
837 {
838 /* link it. */
839 rc = rtR0MemObjLink(pMemToMap, pNew);
840 if (RT_SUCCESS(rc))
841 *pMemObj = pNew;
842 else
843 {
844 /* damn, out of memory. bail out. */
845 int rc2 = rtR0MemObjNativeFree(pNew);
846 AssertRC(rc2);
847 pNew->u32Magic++;
848 pNew->enmType = RTR0MEMOBJTYPE_END;
849 RTMemFree(pNew);
850 }
851 }
852
853 return rc;
854}
855RT_EXPORT_SYMBOL(RTR0MemObjMapUserExTag);
856
857
858RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
859{
860 PRTR0MEMOBJINTERNAL pMemObj;
861 int rc;
862
863 /* sanity checks. */
864 pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
865 AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
866 AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
867 AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
868 AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
869 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
870 AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
871 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
872 AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
873 AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
874 AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
875 RT_ASSERT_PREEMPTIBLE();
876
877 /* do the job */
878 rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
879 if (RT_SUCCESS(rc))
880 pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
881
882 return rc;
883}
884RT_EXPORT_SYMBOL(RTR0MemObjProtect);
885
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette