VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/alloc/memcache.cpp@ 26421

Last change on this file since 26421 was 26421, checked in by vboxsync, 15 years ago

dbg build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.1 KB
Line 
1/* $Id: memcache.cpp 26421 2010-02-11 07:30:17Z vboxsync $ */
2/** @file
3 * IPRT - Memory Object Allocation Cache.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <iprt/memcache.h>
36#include "internal/iprt.h"
37
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/critsect.h>
41#include <iprt/err.h>
42#include <iprt/mem.h>
43#include <iprt/param.h>
44
45#include "internal/magics.h"
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/** Pointer to a cache instance. */
52typedef struct RTMEMCACHEINT *PRTMEMCACHEINT;
53/** Pointer to a cache page. */
54typedef struct RTMEMCACHEPAGE *PRTMEMCACHEPAGE;
55
56
57/**
58 * A cache page.
59 *
60 * This is a page of memory that we split up in to a bunch object sized chunks
61 * and hand out to the cache users. The bitmap is updated in an atomic fashion
62 * so that we don't have to take any locks when freeing or allocating memory.
63 */
64typedef struct RTMEMCACHEPAGE
65{
66 /** Pointer to the cache owning this page.
67 * This is used for validation purposes only. */
68 PRTMEMCACHEINT pCache;
69 /** Pointer to the next page.
70 * This is marked as volatile since we'll be adding new entries to the list
71 * without taking any locks. */
72 PRTMEMCACHEPAGE volatile pNext;
73 /** Bitmap tracking allocated blocks. */
74 void volatile *pbmAlloc;
75 /** Bitmap tracking which blocks that has been thru the constructor. */
76 void volatile *pbmCtor;
77 /** Pointer to the object array.. */
78 uint8_t *pbObjects;
79 /** The number of objects on this page. */
80 uint32_t cObjects;
81
82 /** Padding to force cFree into the next cache line. (ASSUMES CL = 64) */
83 uint8_t abPadding[ARCH_BITS == 32 ? 64 - 6*4 : 64 - 5*8 - 4];
84 /** The number of free objects. */
85 int32_t volatile cFree;
86} RTMEMCACHEPAGE;
87AssertCompileMemberOffset(RTMEMCACHEPAGE, cFree, 64);
88
89
90/**
91 * Memory object cache instance.
92 */
93typedef struct RTMEMCACHEINT
94{
95 /** Magic value (RTMEMCACHE_MAGIC). */
96 uint32_t u32Magic;
97 /** The object size. */
98 uint32_t cbObject;
99 /** Object alignment. */
100 uint32_t cbAlignment;
101 /** The per page object count. */
102 uint32_t cPerPage;
103 /** Number of bits in the bitmap.
104 * @remarks This is higher or equal to cPerPage and it is aligned such that
105 * the search operation will be most efficient on x86/AMD64. */
106 uint32_t cBits;
107 /** The maximum number of objects. */
108 uint32_t cMax;
109 /** Head of the page list. */
110 PRTMEMCACHEPAGE pPageHead;
111 /** Constructor callback. */
112 PFNMEMCACHECTOR pfnCtor;
113 /** Destructor callback. */
114 PFNMEMCACHEDTOR pfnDtor;
115 /** Callback argument. */
116 void *pvUser;
117 /** Critical section serializing page allocation and similar. */
118 RTCRITSECT CritSect;
119
120 /** The total object count. */
121 uint32_t volatile cTotal;
122 /** The number of free objects. */
123 int32_t volatile cFree;
124 /** This may point to a page with free entries. */
125 PRTMEMCACHEPAGE volatile pPageHint;
126} RTMEMCACHEINT;
127
128
129
130RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects,
131 PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser)
132{
133 AssertPtr(phMemCache);
134 AssertPtrNull(pfnCtor);
135 AssertPtrNull(pfnDtor);
136 AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER);
137 AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER);
138
139 if (cbAlignment == 0)
140 {
141 cbAlignment = UINT32_C(1) << ASMBitLastSetU32((uint32_t)cbObject);
142 if (cbAlignment > 64)
143 cbAlignment = 64;
144 }
145 else
146 {
147 AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO);
148 AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE);
149 }
150
151 /*
152 * Allocate and initialize the instance memory.
153 */
154 RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis));
155 if (!pThis)
156 return VERR_NO_MEMORY;
157 int rc = RTCritSectInit(&pThis->CritSect);
158 if (RT_FAILURE(rc))
159 {
160 RTMemFree(pThis);
161 return rc;
162 }
163
164 pThis->u32Magic = RTMEMCACHE_MAGIC;
165 pThis->cbObject = RT_ALIGN_Z(cbObject, cbAlignment);
166 pThis->cbAlignment = cbAlignment;
167 pThis->cPerPage = (PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject;
168 while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), RT_MIN(cbAlignment, 8))
169 + pThis->cPerPage * pThis->cbObject
170 + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2
171 > PAGE_SIZE)
172 pThis->cPerPage--;
173 pThis->cBits = RT_ALIGN(pThis->cPerPage, 64);
174 pThis->cMax = cMaxObjects;
175 pThis->cTotal = 0;
176 pThis->cFree = 0;
177 pThis->pPageHead = NULL;
178 pThis->pPageHint = NULL;
179 pThis->pfnCtor = pfnCtor;
180 pThis->pfnDtor = pfnDtor;
181 pThis->pvUser = pvUser;
182
183 *phMemCache = pThis;
184 return VINF_SUCCESS;
185}
186
187
188RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
189{
190 RTMEMCACHEINT *pThis = hMemCache;
191 if (!pThis)
192 return VINF_SUCCESS;
193 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
194 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);
195 AssertMsg((uint32_t)pThis->cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", pThis->cFree, pThis->cTotal));
196
197 /*
198 * Destroy it.
199 */
200 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
201 RTCritSectDelete(&pThis->CritSect);
202
203 while (pThis->pPageHead)
204 {
205 PRTMEMCACHEPAGE pPage = pThis->pPageHead;
206 pThis->pPageHead = pPage->pNext;
207 pPage->cFree = 0;
208
209 if (pThis->pfnDtor)
210 {
211 uint32_t iObj = pPage->cObjects;
212 while (iObj-- > 0)
213 if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
214 pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
215 }
216
217 RTMemPageFree(pPage);
218 }
219
220 RTMemFree(pThis);
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Grows the cache.
227 *
228 * @returns IPRT status code.
229 * @param pThis The memory cache instance.
230 */
231static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
232{
233 /*
234 * Enter the critical section here to avoid allocation races leading to
235 * wasted memory (++) and make it easier to link in the new page.
236 */
237 RTCritSectEnter(&pThis->CritSect);
238 int rc = VINF_SUCCESS;
239 if (pThis->cFree < 0)
240 {
241 /*
242 * Allocate and initialize the new page.
243 *
244 * We put the constructor bitmap at the lower end right after cFree.
245 * We then push the object array to the end of the page and place the
246 * allocation bitmap below it. The hope is to increase the chance that
247 * the allocation bitmap is in a different cache line than cFree since
248 * this increases performance markably when lots of threads are beating
249 * on the cache.
250 */
251 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
252 if (pPage)
253 {
254 uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);
255
256 ASMMemZeroPage(pPage);
257 pPage->pCache = pThis;
258 pPage->pNext = NULL;
259 pPage->cFree = cObjects;
260 pPage->cObjects = cObjects;
261 uint8_t *pb = (uint8_t *)(pPage + 1);
262 pb = RT_ALIGN_PT(pb, 8, uint8_t *);
263 pPage->pbmCtor = pb;
264 pb += pThis->cBits / 8 * 2;
265 pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
266 pPage->pbObjects = pb; Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
267 pb -= pThis->cBits / 8;
268 pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
269 pPage->pbmAlloc = pb;
270// Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 < (uintptr_t)pPage->pdmCtor);
271
272 /* Mark the bitmap padding and any unused objects as allocated. */
273 for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
274 ASMBitSet(pPage->pbmAlloc, iBit);
275
276 /* Make it the hint. */
277 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHint, pPage);
278
279 /* Link the page. */
280 PRTMEMCACHEPAGE pPrevPage = pThis->pPageHead;
281 if (!pPrevPage)
282 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHead, pPage);
283 else
284 {
285 while (pPrevPage->pNext)
286 pPrevPage = pPrevPage->pNext;
287 ASMAtomicWritePtr((void * volatile *)&pPrevPage->pNext, pPage);
288 }
289
290 /* Add it to the page counts. */
291 ASMAtomicAddS32(&pThis->cFree, cObjects);
292 ASMAtomicAddU32(&pThis->cTotal, cObjects);
293 }
294 else
295 rc = VERR_NO_MEMORY;
296 }
297 RTCritSectLeave(&pThis->CritSect);
298 return rc;
299}
300
301
302/**
303 * Grabs a an object in a page.
304 * @returns New cFree value on success (0 or higher), -1 on failure.
305 * @param pPage Pointer to the page.
306 */
307DECL_FORCE_INLINE(int32_t) rtMemCacheGrabObj(PRTMEMCACHEPAGE pPage)
308{
309 int32_t cFreeNew = ASMAtomicDecS32(&pPage->cFree);
310 if (cFreeNew < 0)
311 {
312 ASMAtomicIncS32(&pPage->cFree);
313 return -1;
314 }
315 return cFreeNew;
316}
317
318
319RTDECL(int) RTMemCacheAllocEx(RTMEMCACHE hMemCache, void **ppvObj)
320{
321 RTMEMCACHEINT *pThis = hMemCache;
322 AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
323 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_PARAMETER);
324
325 /*
326 * Try grab a free object at the cache level.
327 */
328 int32_t cNewFree = ASMAtomicDecS32(&pThis->cFree);
329 if (RT_LIKELY(cNewFree < 0))
330 {
331 uint32_t cTotal = ASMAtomicUoReadU32(&pThis->cTotal);
332 if ( (uint32_t)(cTotal + -cNewFree) > pThis->cMax
333 || (uint32_t)(cTotal + -cNewFree) <= cTotal)
334 {
335 ASMAtomicIncS32(&pThis->cFree);
336 return VERR_MEM_CACHE_MAX_SIZE;
337 }
338
339 int rc = rtMemCacheGrow(pThis);
340 if (RT_FAILURE(rc))
341 {
342 ASMAtomicIncS32(&pThis->cFree);
343 return rc;
344 }
345 }
346
347 /*
348 * Grab a free object at the page level.
349 */
350 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)ASMAtomicReadPtr((void * volatile *)&pThis->pPageHint);
351 int32_t iObj = pPage ? rtMemCacheGrabObj(pPage) : -1;
352 if (iObj < 0)
353 {
354 for (unsigned cLoops = 0; ; cLoops++)
355 {
356 for (pPage = pThis->pPageHead; pPage; pPage = pPage->pNext)
357 {
358 iObj = rtMemCacheGrabObj(pPage);
359 if (iObj >= 0)
360 {
361 if (iObj > 0)
362 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHint, pPage);
363 break;
364 }
365 }
366 if (iObj >= 0)
367 break;
368 Assert(cLoops != 2);
369 Assert(cLoops < 10);
370 }
371 }
372 Assert(iObj >= 0);
373 Assert((uint32_t)iObj < pThis->cMax);
374
375 /*
376 * Find a free object in the allocation bitmap. Use the new cFree count
377 * as a hint.
378 */
379 if (ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
380 {
381 for (unsigned cLoops2 = 0;; cLoops2++)
382 {
383 iObj = ASMBitFirstClear(pPage->pbmAlloc, pThis->cBits);
384 if (RT_LIKELY(iObj >= 0))
385 {
386 if (!ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
387 break;
388 }
389 else
390 ASMMemoryFence();
391 Assert(cLoops2 != 40);
392 }
393 Assert(iObj >= 0);
394 }
395 void *pvObj = &pPage->pbObjects[iObj * pThis->cbObject];
396 Assert((uintptr_t)pvObj - (uintptr_t)pPage < PAGE_SIZE);
397
398 /*
399 * Call the constructor?
400 */
401 if ( pThis->pfnCtor
402 && !ASMAtomicBitTestAndSet(pPage->pbmCtor, iObj))
403 {
404 int rc = pThis->pfnCtor(hMemCache, pvObj, pThis->pvUser);
405 if (RT_FAILURE(rc))
406 {
407 ASMAtomicBitClear(pPage->pbmCtor, iObj);
408 RTMemCacheFree(pThis, pvObj);
409 return rc;
410 }
411 }
412
413 *ppvObj = pvObj;
414 return VINF_SUCCESS;
415}
416
417
418RTDECL(void *) RTMemCacheAlloc(RTMEMCACHE hMemCache)
419{
420 void *pvObj;
421 int rc = RTMemCacheAllocEx(hMemCache, &pvObj);
422 if (RT_SUCCESS(rc))
423 return pvObj;
424 return NULL;
425}
426
427
428RTDECL(void) RTMemCacheFree(RTMEMCACHE hMemCache, void *pvObj)
429{
430 if (!pvObj)
431 return;
432
433 RTMEMCACHEINT *pThis = hMemCache;
434 AssertPtrReturnVoid(pThis);
435 AssertReturnVoid(pThis->u32Magic == RTMEMCACHE_MAGIC);
436
437 AssertPtr(pvObj);
438 Assert(RT_ALIGN_P(pvObj, pThis->cbAlignment) == pvObj);
439
440 /* Note: Do *NOT* attempt to poison the object if we have a constructor
441 or/and destructor! */
442
443 /*
444 * Find the cache page. The page structure is at the start of the page.
445 */
446 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
447 Assert(pPage->pCache == pThis);
448 Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
449
450 /*
451 * Clear the bitmap bit and update the two object counter. Order matters!
452 */
453 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
454 uintptr_t iObj = offObj / pThis->cbObject;
455 Assert(iObj * pThis->cbObject == offObj);
456 Assert(iObj < pThis->cPerPage);
457 AssertReturnVoid(ASMAtomicBitTestAndClear(pPage->pbmAlloc, iObj));
458
459 ASMAtomicIncS32(&pPage->cFree);
460 ASMAtomicIncS32(&pThis->cFree);
461}
462
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette