VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileCache.cpp@ 27836

Last change on this file since 27836 was 27563, checked in by vboxsync, 15 years ago

I/O cache: Remove a bit of code which is not used very often anymore after the commit delay was added. Reduces complexity and fixes a bug causing file corruption

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 82.1 KB
Line 
1/* $Id: PDMAsyncCompletionFileCache.cpp 27563 2010-03-21 16:27:56Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * File data cache.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22
23/** @page pg_pdm_async_completion_cache PDM Async Completion Cache - The file I/O cache
24 * This component implements an I/O cache for file endpoints based on the 2Q cache algorithm.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
31#include <iprt/types.h>
32#include <iprt/mem.h>
33#include <iprt/path.h>
34#include <VBox/log.h>
35#include <VBox/stam.h>
36
37#include "PDMAsyncCompletionFileInternal.h"
38
39/**
40 * A I/O memory context.
41 */
42typedef struct PDMIOMEMCTX
43{
44 /** Pointer to the scatter/gather list. */
45 PCPDMDATASEG paDataSeg;
46 /** Number of segments. */
47 size_t cSegments;
48 /** Current segment we are in. */
49 unsigned iSegIdx;
50 /** Pointer to the current buffer. */
51 uint8_t *pbBuf;
52 /** Number of bytes left in the current buffer. */
53 size_t cbBufLeft;
54} PDMIOMEMCTX, *PPDMIOMEMCTX;
55
56#ifdef VBOX_STRICT
57# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) \
58 do \
59 { \
60 AssertMsg(RTCritSectIsOwner(&Cache->CritSect), \
61 ("Thread does not own critical section\n"));\
62 } while(0)
63
64# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) \
65 do \
66 { \
67 AssertMsg(RTSemRWIsWriteOwner(pEpCache->SemRWEntries), \
68 ("Thread is not exclusive owner of the per endpoint RW semaphore\n")); \
69 } while(0)
70
71# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) \
72 do \
73 { \
74 AssertMsg(RTSemRWIsReadOwner(pEpCache->SemRWEntries), \
75 ("Thread is not read owner of the per endpoint RW semaphore\n")); \
76 } while(0)
77
78#else
79# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0)
80# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) do { } while(0)
81# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) do { } while(0)
82#endif
83
84/*******************************************************************************
85* Internal Functions *
86*******************************************************************************/
87static void pdmacFileCacheTaskCompleted(PPDMACTASKFILE pTask, void *pvUser);
88
89/**
90 * Decrement the reference counter of the given cache entry.
91 *
92 * @returns nothing.
93 * @param pEntry The entry to release.
94 */
95DECLINLINE(void) pdmacFileEpCacheEntryRelease(PPDMACFILECACHEENTRY pEntry)
96{
97 AssertMsg(pEntry->cRefs > 0, ("Trying to release a not referenced entry\n"));
98 ASMAtomicDecU32(&pEntry->cRefs);
99}
100
101/**
102 * Increment the reference counter of the given cache entry.
103 *
104 * @returns nothing.
105 * @param pEntry The entry to reference.
106 */
107DECLINLINE(void) pdmacFileEpCacheEntryRef(PPDMACFILECACHEENTRY pEntry)
108{
109 ASMAtomicIncU32(&pEntry->cRefs);
110}
111
112/**
113 * Initialize a I/O memory context.
114 *
115 * @returns nothing
116 * @param pIoMemCtx Pointer to a unitialized I/O memory context.
117 * @param paDataSeg Pointer to the S/G list.
118 * @param cSegments Number of segments in the S/G list.
119 */
120DECLINLINE(void) pdmIoMemCtxInit(PPDMIOMEMCTX pIoMemCtx, PCPDMDATASEG paDataSeg, size_t cSegments)
121{
122 AssertMsg((cSegments > 0) && paDataSeg, ("Trying to initialize a I/O memory context without a S/G list\n"));
123
124 pIoMemCtx->paDataSeg = paDataSeg;
125 pIoMemCtx->cSegments = cSegments;
126 pIoMemCtx->iSegIdx = 0;
127 pIoMemCtx->pbBuf = (uint8_t *)paDataSeg[0].pvSeg;
128 pIoMemCtx->cbBufLeft = paDataSeg[0].cbSeg;
129}
130
131/**
132 * Return a buffer from the I/O memory context.
133 *
134 * @returns Pointer to the buffer
135 * @param pIoMemCtx Pointer to the I/O memory context.
136 * @param pcbData Pointer to the amount of byte requested.
137 * If the current buffer doesn't have enough bytes left
138 * the amount is returned in the variable.
139 */
140DECLINLINE(uint8_t *) pdmIoMemCtxGetBuffer(PPDMIOMEMCTX pIoMemCtx, size_t *pcbData)
141{
142 size_t cbData = RT_MIN(*pcbData, pIoMemCtx->cbBufLeft);
143 uint8_t *pbBuf = pIoMemCtx->pbBuf;
144
145 pIoMemCtx->cbBufLeft -= cbData;
146
147 /* Advance to the next segment if required. */
148 if (!pIoMemCtx->cbBufLeft)
149 {
150 pIoMemCtx->iSegIdx++;
151
152 if (RT_UNLIKELY(pIoMemCtx->iSegIdx == pIoMemCtx->cSegments))
153 {
154 pIoMemCtx->cbBufLeft = 0;
155 pIoMemCtx->pbBuf = NULL;
156 }
157 else
158 {
159 pIoMemCtx->pbBuf = (uint8_t *)pIoMemCtx->paDataSeg[pIoMemCtx->iSegIdx].pvSeg;
160 pIoMemCtx->cbBufLeft = pIoMemCtx->paDataSeg[pIoMemCtx->iSegIdx].cbSeg;
161 }
162
163 *pcbData = cbData;
164 }
165 else
166 pIoMemCtx->pbBuf += cbData;
167
168 return pbBuf;
169}
170
171#ifdef DEBUG
172static void pdmacFileCacheValidate(PPDMACFILECACHEGLOBAL pCache)
173{
174 /* Amount of cached data should never exceed the maximum amount. */
175 AssertMsg(pCache->cbCached <= pCache->cbMax,
176 ("Current amount of cached data exceeds maximum\n"));
177
178 /* The amount of cached data in the LRU and FRU list should match cbCached */
179 AssertMsg(pCache->LruRecentlyUsedIn.cbCached + pCache->LruFrequentlyUsed.cbCached == pCache->cbCached,
180 ("Amount of cached data doesn't match\n"));
181
182 AssertMsg(pCache->LruRecentlyUsedOut.cbCached <= pCache->cbRecentlyUsedOutMax,
183 ("Paged out list exceeds maximum\n"));
184}
185#endif
186
187DECLINLINE(void) pdmacFileCacheLockEnter(PPDMACFILECACHEGLOBAL pCache)
188{
189 RTCritSectEnter(&pCache->CritSect);
190#ifdef DEBUG
191 pdmacFileCacheValidate(pCache);
192#endif
193}
194
195DECLINLINE(void) pdmacFileCacheLockLeave(PPDMACFILECACHEGLOBAL pCache)
196{
197#ifdef DEBUG
198 pdmacFileCacheValidate(pCache);
199#endif
200 RTCritSectLeave(&pCache->CritSect);
201}
202
203DECLINLINE(void) pdmacFileCacheSub(PPDMACFILECACHEGLOBAL pCache, uint32_t cbAmount)
204{
205 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
206 pCache->cbCached -= cbAmount;
207}
208
209DECLINLINE(void) pdmacFileCacheAdd(PPDMACFILECACHEGLOBAL pCache, uint32_t cbAmount)
210{
211 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
212 pCache->cbCached += cbAmount;
213}
214
215DECLINLINE(void) pdmacFileCacheListAdd(PPDMACFILELRULIST pList, uint32_t cbAmount)
216{
217 pList->cbCached += cbAmount;
218}
219
220DECLINLINE(void) pdmacFileCacheListSub(PPDMACFILELRULIST pList, uint32_t cbAmount)
221{
222 pList->cbCached -= cbAmount;
223}
224
225#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
226/**
227 * Checks consistency of a LRU list.
228 *
229 * @returns nothing
230 * @param pList The LRU list to check.
231 * @param pNotInList Element which is not allowed to occur in the list.
232 */
233static void pdmacFileCacheCheckList(PPDMACFILELRULIST pList, PPDMACFILECACHEENTRY pNotInList)
234{
235 PPDMACFILECACHEENTRY pCurr = pList->pHead;
236
237 /* Check that there are no double entries and no cycles in the list. */
238 while (pCurr)
239 {
240 PPDMACFILECACHEENTRY pNext = pCurr->pNext;
241
242 while (pNext)
243 {
244 AssertMsg(pCurr != pNext,
245 ("Entry %#p is at least two times in list %#p or there is a cycle in the list\n",
246 pCurr, pList));
247 pNext = pNext->pNext;
248 }
249
250 AssertMsg(pCurr != pNotInList, ("Not allowed entry %#p is in list\n", pCurr));
251
252 if (!pCurr->pNext)
253 AssertMsg(pCurr == pList->pTail, ("End of list reached but last element is not list tail\n"));
254
255 pCurr = pCurr->pNext;
256 }
257}
258#endif
259
260/**
261 * Unlinks a cache entry from the LRU list it is assigned to.
262 *
263 * @returns nothing.
264 * @param pEntry The entry to unlink.
265 */
266static void pdmacFileCacheEntryRemoveFromList(PPDMACFILECACHEENTRY pEntry)
267{
268 PPDMACFILELRULIST pList = pEntry->pList;
269 PPDMACFILECACHEENTRY pPrev, pNext;
270
271 LogFlowFunc((": Deleting entry %#p from list %#p\n", pEntry, pList));
272
273 AssertPtr(pList);
274
275#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
276 pdmacFileCacheCheckList(pList, NULL);
277#endif
278
279 pPrev = pEntry->pPrev;
280 pNext = pEntry->pNext;
281
282 AssertMsg(pEntry != pPrev, ("Entry links to itself as previous element\n"));
283 AssertMsg(pEntry != pNext, ("Entry links to itself as next element\n"));
284
285 if (pPrev)
286 pPrev->pNext = pNext;
287 else
288 {
289 pList->pHead = pNext;
290
291 if (pNext)
292 pNext->pPrev = NULL;
293 }
294
295 if (pNext)
296 pNext->pPrev = pPrev;
297 else
298 {
299 pList->pTail = pPrev;
300
301 if (pPrev)
302 pPrev->pNext = NULL;
303 }
304
305 pEntry->pList = NULL;
306 pEntry->pPrev = NULL;
307 pEntry->pNext = NULL;
308 pdmacFileCacheListSub(pList, pEntry->cbData);
309#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
310 pdmacFileCacheCheckList(pList, pEntry);
311#endif
312}
313
314/**
315 * Adds a cache entry to the given LRU list unlinking it from the currently
316 * assigned list if needed.
317 *
318 * @returns nothing.
319 * @param pList List to the add entry to.
320 * @param pEntry Entry to add.
321 */
322static void pdmacFileCacheEntryAddToList(PPDMACFILELRULIST pList, PPDMACFILECACHEENTRY pEntry)
323{
324 LogFlowFunc((": Adding entry %#p to list %#p\n", pEntry, pList));
325#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
326 pdmacFileCacheCheckList(pList, NULL);
327#endif
328
329 /* Remove from old list if needed */
330 if (pEntry->pList)
331 pdmacFileCacheEntryRemoveFromList(pEntry);
332
333 pEntry->pNext = pList->pHead;
334 if (pList->pHead)
335 pList->pHead->pPrev = pEntry;
336 else
337 {
338 Assert(!pList->pTail);
339 pList->pTail = pEntry;
340 }
341
342 pEntry->pPrev = NULL;
343 pList->pHead = pEntry;
344 pdmacFileCacheListAdd(pList, pEntry->cbData);
345 pEntry->pList = pList;
346#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
347 pdmacFileCacheCheckList(pList, NULL);
348#endif
349}
350
351/**
352 * Destroys a LRU list freeing all entries.
353 *
354 * @returns nothing
355 * @param pList Pointer to the LRU list to destroy.
356 *
357 * @note The caller must own the critical section of the cache.
358 */
359static void pdmacFileCacheDestroyList(PPDMACFILELRULIST pList)
360{
361 while (pList->pHead)
362 {
363 PPDMACFILECACHEENTRY pEntry = pList->pHead;
364
365 pList->pHead = pEntry->pNext;
366
367 AssertMsg(!(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY)),
368 ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
369
370 RTMemPageFree(pEntry->pbData);
371 RTMemFree(pEntry);
372 }
373}
374
375/**
376 * Tries to remove the given amount of bytes from a given list in the cache
377 * moving the entries to one of the given ghosts lists
378 *
379 * @returns Amount of data which could be freed.
380 * @param pCache Pointer to the global cache data.
381 * @param cbData The amount of the data to free.
382 * @param pListSrc The source list to evict data from.
383 * @param pGhostListSrc The ghost list removed entries should be moved to
384 * NULL if the entry should be freed.
385 * @param fReuseBuffer Flag whether a buffer should be reused if it has the same size
386 * @param ppbBuf Where to store the address of the buffer if an entry with the
387 * same size was found and fReuseBuffer is true.
388 *
389 * @note This function may return fewer bytes than requested because entries
390 * may be marked as non evictable if they are used for I/O at the
391 * moment.
392 */
393static size_t pdmacFileCacheEvictPagesFrom(PPDMACFILECACHEGLOBAL pCache, size_t cbData,
394 PPDMACFILELRULIST pListSrc, PPDMACFILELRULIST pGhostListDst,
395 bool fReuseBuffer, uint8_t **ppbBuffer)
396{
397 size_t cbEvicted = 0;
398
399 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
400
401 AssertMsg(cbData > 0, ("Evicting 0 bytes not possible\n"));
402 AssertMsg( !pGhostListDst
403 || (pGhostListDst == &pCache->LruRecentlyUsedOut),
404 ("Destination list must be NULL or the recently used but paged out list\n"));
405
406 if (fReuseBuffer)
407 {
408 AssertPtr(ppbBuffer);
409 *ppbBuffer = NULL;
410 }
411
412 /* Start deleting from the tail. */
413 PPDMACFILECACHEENTRY pEntry = pListSrc->pTail;
414
415 while ((cbEvicted < cbData) && pEntry)
416 {
417 PPDMACFILECACHEENTRY pCurr = pEntry;
418
419 pEntry = pEntry->pPrev;
420
421 /* We can't evict pages which are currently in progress or dirty but not in progress */
422 if ( !(pCurr->fFlags & PDMACFILECACHE_NOT_EVICTABLE)
423 && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
424 {
425 /* Ok eviction candidate. Grab the endpoint semaphore and check again
426 * because somebody else might have raced us. */
427 PPDMACFILEENDPOINTCACHE pEndpointCache = &pCurr->pEndpoint->DataCache;
428 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
429
430 if (!(pCurr->fFlags & PDMACFILECACHE_NOT_EVICTABLE)
431 && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
432 {
433 LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData));
434
435 if (fReuseBuffer && (pCurr->cbData == cbData))
436 {
437 STAM_COUNTER_INC(&pCache->StatBuffersReused);
438 *ppbBuffer = pCurr->pbData;
439 }
440 else if (pCurr->pbData)
441 RTMemPageFree(pCurr->pbData);
442
443 pCurr->pbData = NULL;
444 cbEvicted += pCurr->cbData;
445
446 pdmacFileCacheEntryRemoveFromList(pCurr);
447 pdmacFileCacheSub(pCache, pCurr->cbData);
448
449 if (pGhostListDst)
450 {
451 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
452
453 PPDMACFILECACHEENTRY pGhostEntFree = pGhostListDst->pTail;
454
455 /* We have to remove the last entries from the paged out list. */
456 while ( ((pGhostListDst->cbCached + pCurr->cbData) > pCache->cbRecentlyUsedOutMax)
457 && pGhostEntFree)
458 {
459 PPDMACFILECACHEENTRY pFree = pGhostEntFree;
460 PPDMACFILEENDPOINTCACHE pEndpointCacheFree = &pFree->pEndpoint->DataCache;
461
462 pGhostEntFree = pGhostEntFree->pPrev;
463
464 RTSemRWRequestWrite(pEndpointCacheFree->SemRWEntries, RT_INDEFINITE_WAIT);
465
466 if (ASMAtomicReadU32(&pFree->cRefs) == 0)
467 {
468 pdmacFileCacheEntryRemoveFromList(pFree);
469
470 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
471 RTAvlrFileOffsetRemove(pEndpointCacheFree->pTree, pFree->Core.Key);
472 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
473
474 RTMemFree(pFree);
475 }
476
477 RTSemRWReleaseWrite(pEndpointCacheFree->SemRWEntries);
478 }
479
480 if (pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax)
481 {
482 /* Couldn't remove enough entries. Delete */
483 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
484 RTAvlrFileOffsetRemove(pCurr->pEndpoint->DataCache.pTree, pCurr->Core.Key);
485 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
486
487 RTMemFree(pCurr);
488 }
489 else
490 pdmacFileCacheEntryAddToList(pGhostListDst, pCurr);
491 }
492 else
493 {
494 /* Delete the entry from the AVL tree it is assigned to. */
495 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
496 RTAvlrFileOffsetRemove(pCurr->pEndpoint->DataCache.pTree, pCurr->Core.Key);
497 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
498
499 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
500 RTMemFree(pCurr);
501 }
502 }
503
504 }
505 else
506 LogFlow(("Entry %#p (%u bytes) is still in progress and can't be evicted\n", pCurr, pCurr->cbData));
507 }
508
509 return cbEvicted;
510}
511
512static bool pdmacFileCacheReclaim(PPDMACFILECACHEGLOBAL pCache, size_t cbData, bool fReuseBuffer, uint8_t **ppbBuffer)
513{
514 size_t cbRemoved = 0;
515
516 if ((pCache->cbCached + cbData) < pCache->cbMax)
517 return true;
518 else if ((pCache->LruRecentlyUsedIn.cbCached + cbData) > pCache->cbRecentlyUsedInMax)
519 {
520 /* Try to evict as many bytes as possible from A1in */
521 cbRemoved = pdmacFileCacheEvictPagesFrom(pCache, cbData, &pCache->LruRecentlyUsedIn,
522 &pCache->LruRecentlyUsedOut, fReuseBuffer, ppbBuffer);
523
524 /*
525 * If it was not possible to remove enough entries
526 * try the frequently accessed cache.
527 */
528 if (cbRemoved < cbData)
529 {
530 Assert(!fReuseBuffer || !*ppbBuffer); /* It is not possible that we got a buffer with the correct size but we didn't freed enough data. */
531
532 /*
533 * If we removed something we can't pass the reuse buffer flag anymore because
534 * we don't need to evict that much data
535 */
536 if (!cbRemoved)
537 cbRemoved += pdmacFileCacheEvictPagesFrom(pCache, cbData, &pCache->LruFrequentlyUsed,
538 NULL, fReuseBuffer, ppbBuffer);
539 else
540 cbRemoved += pdmacFileCacheEvictPagesFrom(pCache, cbData - cbRemoved, &pCache->LruFrequentlyUsed,
541 NULL, false, NULL);
542 }
543 }
544 else
545 {
546 /* We have to remove entries from frequently access list. */
547 cbRemoved = pdmacFileCacheEvictPagesFrom(pCache, cbData, &pCache->LruFrequentlyUsed,
548 NULL, fReuseBuffer, ppbBuffer);
549 }
550
551 LogFlowFunc((": removed %u bytes, requested %u\n", cbRemoved, cbData));
552 return (cbRemoved >= cbData);
553}
554
555/**
556 * Initiates a read I/O task for the given entry.
557 *
558 * @returns nothing.
559 * @param pEntry The entry to fetch the data to.
560 */
561static void pdmacFileCacheReadFromEndpoint(PPDMACFILECACHEENTRY pEntry)
562{
563 LogFlowFunc((": Reading data into cache entry %#p\n", pEntry));
564
565 /* Make sure no one evicts the entry while it is accessed. */
566 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
567
568 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEntry->pEndpoint);
569 AssertPtr(pIoTask);
570
571 AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
572
573 pIoTask->pEndpoint = pEntry->pEndpoint;
574 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_READ;
575 pIoTask->Off = pEntry->Core.Key;
576 pIoTask->DataSeg.cbSeg = pEntry->cbData;
577 pIoTask->DataSeg.pvSeg = pEntry->pbData;
578 pIoTask->pvUser = pEntry;
579 pIoTask->pfnCompleted = pdmacFileCacheTaskCompleted;
580
581 /* Send it off to the I/O manager. */
582 pdmacFileEpAddTask(pEntry->pEndpoint, pIoTask);
583}
584
585/**
586 * Initiates a write I/O task for the given entry.
587 *
588 * @returns nothing.
589 * @param pEntry The entry to read the data from.
590 */
591static void pdmacFileCacheWriteToEndpoint(PPDMACFILECACHEENTRY pEntry)
592{
593 LogFlowFunc((": Writing data from cache entry %#p\n", pEntry));
594
595 /* Make sure no one evicts the entry while it is accessed. */
596 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
597
598 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEntry->pEndpoint);
599 AssertPtr(pIoTask);
600
601 AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
602
603 pIoTask->pEndpoint = pEntry->pEndpoint;
604 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_WRITE;
605 pIoTask->Off = pEntry->Core.Key;
606 pIoTask->DataSeg.cbSeg = pEntry->cbData;
607 pIoTask->DataSeg.pvSeg = pEntry->pbData;
608 pIoTask->pvUser = pEntry;
609 pIoTask->pfnCompleted = pdmacFileCacheTaskCompleted;
610 ASMAtomicIncU32(&pEntry->pEndpoint->DataCache.cWritesOutstanding);
611
612 /* Send it off to the I/O manager. */
613 pdmacFileEpAddTask(pEntry->pEndpoint, pIoTask);
614}
615
616/**
617 * Commit a single dirty entry to the endpoint
618 *
619 * @returns nothing
620 * @param pEntry The entry to commit.
621 */
622static void pdmacFileCacheEntryCommit(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry)
623{
624 NOREF(pEndpointCache);
625 AssertMsg( (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY)
626 && !(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS),
627 ("Invalid flags set for entry %#p\n", pEntry));
628
629 pdmacFileCacheWriteToEndpoint(pEntry);
630}
631
632/**
633 * Commit all dirty entries for a single endpoint.
634 *
635 * @returns nothing.
636 * @param pEndpointCache The endpoint cache to commit.
637 */
638static void pdmacFileCacheEndpointCommit(PPDMACFILEENDPOINTCACHE pEndpointCache)
639{
640 uint32_t cbCommitted = 0;
641 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
642
643 /* The list is moved to a new header to reduce locking overhead. */
644 RTLISTNODE ListDirtyNotCommitted;
645 RTSPINLOCKTMP Tmp;
646
647 RTListInit(&ListDirtyNotCommitted);
648 RTSpinlockAcquire(pEndpointCache->LockList, &Tmp);
649 RTListMove(&ListDirtyNotCommitted, &pEndpointCache->ListDirtyNotCommitted);
650 RTSpinlockRelease(pEndpointCache->LockList, &Tmp);
651
652 if (!RTListIsEmpty(&ListDirtyNotCommitted))
653 {
654 PPDMACFILECACHEENTRY pEntry = RTListNodeGetFirst(&ListDirtyNotCommitted,
655 PDMACFILECACHEENTRY,
656 NodeNotCommitted);
657
658 while (!RTListNodeIsLast(&ListDirtyNotCommitted, &pEntry->NodeNotCommitted))
659 {
660 PPDMACFILECACHEENTRY pNext = RTListNodeGetNext(&pEntry->NodeNotCommitted, PDMACFILECACHEENTRY,
661 NodeNotCommitted);
662 pdmacFileCacheEntryCommit(pEndpointCache, pEntry);
663 cbCommitted += pEntry->cbData;
664 RTListNodeRemove(&pEntry->NodeNotCommitted);
665 pEntry = pNext;
666 }
667
668 /* Commit the last endpoint */
669 Assert(RTListNodeIsLast(&ListDirtyNotCommitted, &pEntry->NodeNotCommitted));
670 pdmacFileCacheEntryCommit(pEndpointCache, pEntry);
671 RTListNodeRemove(&pEntry->NodeNotCommitted);
672 AssertMsg(RTListIsEmpty(&ListDirtyNotCommitted),
673 ("Committed all entries but list is not empty\n"));
674 }
675
676 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
677 AssertMsg(pEndpointCache->pCache->cbDirty >= cbCommitted,
678 ("Number of committed bytes exceeds number of dirty bytes\n"));
679 ASMAtomicSubU32(&pEndpointCache->pCache->cbDirty, cbCommitted);
680}
681
682/**
683 * Commit all dirty entries in the cache.
684 *
685 * @returns nothing.
686 * @param pCache The global cache instance.
687 */
688static void pdmacFileCacheCommitDirtyEntries(PPDMACFILECACHEGLOBAL pCache)
689{
690 bool fCommitInProgress = ASMAtomicXchgBool(&pCache->fCommitInProgress, true);
691
692 if (!fCommitInProgress)
693 {
694 pdmacFileCacheLockEnter(pCache);
695 Assert(!RTListIsEmpty(&pCache->ListEndpoints));
696
697 PPDMACFILEENDPOINTCACHE pEndpointCache = RTListNodeGetFirst(&pCache->ListEndpoints,
698 PDMACFILEENDPOINTCACHE,
699 NodeCacheEndpoint);
700 AssertPtr(pEndpointCache);
701
702 while (!RTListNodeIsLast(&pCache->ListEndpoints, &pEndpointCache->NodeCacheEndpoint))
703 {
704 pdmacFileCacheEndpointCommit(pEndpointCache);
705
706 pEndpointCache = RTListNodeGetNext(&pEndpointCache->NodeCacheEndpoint, PDMACFILEENDPOINTCACHE,
707 NodeCacheEndpoint);
708 }
709
710 /* Commit the last endpoint */
711 Assert(RTListNodeIsLast(&pCache->ListEndpoints, &pEndpointCache->NodeCacheEndpoint));
712 pdmacFileCacheEndpointCommit(pEndpointCache);
713
714 pdmacFileCacheLockLeave(pCache);
715 ASMAtomicWriteBool(&pCache->fCommitInProgress, false);
716 }
717}
718
719/**
720 * Adds the given entry as a dirty to the cache.
721 *
722 * @returns Flag whether the amount of dirty bytes in the cache exceeds the threshold
723 * @param pEndpointCache The endpoint cache the entry belongs to.
724 * @param pEntry The entry to add.
725 */
726static bool pdmacFileCacheAddDirtyEntry(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry)
727{
728 bool fDirtyBytesExceeded = false;
729 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
730
731 /* If the commit timer is disabled we commit right away. */
732 if (pCache->u32CommitTimeoutMs == 0)
733 {
734 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
735 pdmacFileCacheEntryCommit(pEndpointCache, pEntry);
736 }
737 else if (!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY))
738 {
739 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
740
741 RTSPINLOCKTMP Tmp;
742 RTSpinlockAcquire(pEndpointCache->LockList, &Tmp);
743 RTListAppend(&pEndpointCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted);
744 RTSpinlockRelease(pEndpointCache->LockList, &Tmp);
745
746 uint32_t cbDirty = ASMAtomicAddU32(&pCache->cbDirty, pEntry->cbData);
747
748 fDirtyBytesExceeded = (cbDirty >= pCache->cbCommitDirtyThreshold);
749 }
750
751 return fDirtyBytesExceeded;
752}
753
754
755/**
756 * Completes a task segment freeing all ressources and completes the task handle
757 * if everything was transfered.
758 *
759 * @returns Next task segment handle.
760 * @param pEndpointCache The endpoint cache.
761 * @param pTaskSeg Task segment to complete.
762 */
763static PPDMACFILETASKSEG pdmacFileCacheTaskComplete(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILETASKSEG pTaskSeg)
764{
765 PPDMACFILETASKSEG pNext = pTaskSeg->pNext;
766
767 uint32_t uOld = ASMAtomicSubS32(&pTaskSeg->pTask->cbTransferLeft, pTaskSeg->cbTransfer);
768 AssertMsg(uOld >= pTaskSeg->cbTransfer, ("New value would overflow\n"));
769 if (!(uOld - pTaskSeg->cbTransfer)
770 && !ASMAtomicXchgBool(&pTaskSeg->pTask->fCompleted, true))
771 pdmR3AsyncCompletionCompleteTask(&pTaskSeg->pTask->Core, true);
772
773 RTMemFree(pTaskSeg);
774
775 return pNext;
776}
777
778/**
779 * Completion callback for I/O tasks.
780 *
781 * @returns nothing.
782 * @param pTask The completed task.
783 * @param pvUser Opaque user data.
784 */
785static void pdmacFileCacheTaskCompleted(PPDMACTASKFILE pTask, void *pvUser)
786{
787 PPDMACFILECACHEENTRY pEntry = (PPDMACFILECACHEENTRY)pvUser;
788 PPDMACFILECACHEGLOBAL pCache = pEntry->pCache;
789 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pEntry->pEndpoint;
790 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
791
792 /* Reference the entry now as we are clearing the I/O in progres flag
793 * which protects the entry till now. */
794 pdmacFileEpCacheEntryRef(pEntry);
795
796 RTSemRWRequestWrite(pEndpoint->DataCache.SemRWEntries, RT_INDEFINITE_WAIT);
797 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
798
799 /* Process waiting segment list. The data in entry might have changed inbetween. */
800 bool fDirty = false;
801 PPDMACFILETASKSEG pCurr = pEntry->pWaitingHead;
802
803 AssertMsg((pCurr && pEntry->pWaitingTail) || (!pCurr && !pEntry->pWaitingTail),
804 ("The list tail was not updated correctly\n"));
805 pEntry->pWaitingTail = NULL;
806 pEntry->pWaitingHead = NULL;
807
808 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
809 {
810 AssertMsg(pEndpointCache->cWritesOutstanding > 0, ("Completed write request but outstanding task count is 0\n"));
811 ASMAtomicDecU32(&pEndpointCache->cWritesOutstanding);
812
813 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IS_DIRTY;
814
815 while (pCurr)
816 {
817 AssertMsg(pCurr->fWrite, ("Completed write entries should never have read tasks attached\n"));
818
819 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer);
820 fDirty = true;
821
822 pCurr = pdmacFileCacheTaskComplete(pEndpointCache, pCurr);
823 }
824 }
825 else
826 {
827 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_READ, ("Invalid transfer type\n"));
828 AssertMsg(!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY),
829 ("Invalid flags set\n"));
830
831 while (pCurr)
832 {
833 if (pCurr->fWrite)
834 {
835 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer);
836 fDirty = true;
837 }
838 else
839 memcpy(pCurr->pvBuf, pEntry->pbData + pCurr->uBufOffset, pCurr->cbTransfer);
840
841 pCurr = pdmacFileCacheTaskComplete(pEndpointCache, pCurr);
842 }
843 }
844
845 bool fCommit = false;
846 if (fDirty)
847 fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntry);
848
849 /* Complete a pending flush if all writes have completed */
850 if (!ASMAtomicReadU32(&pEndpointCache->cWritesOutstanding))
851 {
852 PPDMASYNCCOMPLETIONTASKFILE pTaskFlush = (PPDMASYNCCOMPLETIONTASKFILE)ASMAtomicXchgPtr((void * volatile *)&pEndpointCache->pTaskFlush, NULL);
853 if (pTaskFlush)
854 pdmR3AsyncCompletionCompleteTask(&pTaskFlush->Core, true);
855 }
856
857 RTSemRWReleaseWrite(pEndpoint->DataCache.SemRWEntries);
858
859 /* Dereference so that it isn't protected anymore except we issued anyother write for it. */
860 pdmacFileEpCacheEntryRelease(pEntry);
861
862 if (fCommit)
863 pdmacFileCacheCommitDirtyEntries(pCache);
864}
865
866/**
867 * Commit timer callback.
868 */
869static void pdmacFileCacheCommitTimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser)
870{
871 PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser;
872 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache;
873
874 LogFlowFunc(("Commit interval expired, commiting dirty entries\n"));
875
876 if (ASMAtomicReadU32(&pCache->cbDirty) > 0)
877 pdmacFileCacheCommitDirtyEntries(pCache);
878
879 TMTimerSetMillies(pTimer, pCache->u32CommitTimeoutMs);
880 LogFlowFunc(("Entries committed, going to sleep\n"));
881}
882
883/**
884 * Initializies the I/O cache.
885 *
886 * returns VBox status code.
887 * @param pClassFile The global class data for file endpoints.
888 * @param pCfgNode CFGM node to query configuration data from.
889 */
890int pdmacFileCacheInit(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile, PCFGMNODE pCfgNode)
891{
892 int rc = VINF_SUCCESS;
893 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache;
894
895 rc = CFGMR3QueryU32Def(pCfgNode, "CacheSize", &pCache->cbMax, 5 * _1M);
896 AssertLogRelRCReturn(rc, rc);
897
898 RTListInit(&pCache->ListEndpoints);
899 pCache->cRefs = 0;
900 pCache->cbCached = 0;
901 pCache->fCommitInProgress = 0;
902 LogFlowFunc((": Maximum number of bytes cached %u\n", pCache->cbMax));
903
904 /* Initialize members */
905 pCache->LruRecentlyUsedIn.pHead = NULL;
906 pCache->LruRecentlyUsedIn.pTail = NULL;
907 pCache->LruRecentlyUsedIn.cbCached = 0;
908
909 pCache->LruRecentlyUsedOut.pHead = NULL;
910 pCache->LruRecentlyUsedOut.pTail = NULL;
911 pCache->LruRecentlyUsedOut.cbCached = 0;
912
913 pCache->LruFrequentlyUsed.pHead = NULL;
914 pCache->LruFrequentlyUsed.pTail = NULL;
915 pCache->LruFrequentlyUsed.cbCached = 0;
916
917 pCache->cbRecentlyUsedInMax = (pCache->cbMax / 100) * 25; /* 25% of the buffer size */
918 pCache->cbRecentlyUsedOutMax = (pCache->cbMax / 100) * 50; /* 50% of the buffer size */
919 LogFlowFunc((": cbRecentlyUsedInMax=%u cbRecentlyUsedOutMax=%u\n", pCache->cbRecentlyUsedInMax, pCache->cbRecentlyUsedOutMax));
920
921 /** @todo r=aeichner: Experiment to find optimal default values */
922 rc = CFGMR3QueryU32Def(pCfgNode, "CacheCommitIntervalMs", &pCache->u32CommitTimeoutMs, 10000 /* 10sec */);
923 AssertLogRelRCReturn(rc, rc);
924 rc = CFGMR3QueryU32(pCfgNode, "CacheCommitThreshold", &pCache->cbCommitDirtyThreshold);
925 if ( rc == VERR_CFGM_VALUE_NOT_FOUND
926 || rc == VERR_CFGM_NO_PARENT)
927 {
928 /* Start committing after 50% of the cache are dirty */
929 pCache->cbCommitDirtyThreshold = pCache->cbMax / 2;
930 }
931 else
932 return rc;
933
934 STAMR3Register(pClassFile->Core.pVM, &pCache->cbMax,
935 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
936 "/PDM/AsyncCompletion/File/cbMax",
937 STAMUNIT_BYTES,
938 "Maximum cache size");
939 STAMR3Register(pClassFile->Core.pVM, &pCache->cbCached,
940 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
941 "/PDM/AsyncCompletion/File/cbCached",
942 STAMUNIT_BYTES,
943 "Currently used cache");
944 STAMR3Register(pClassFile->Core.pVM, &pCache->LruRecentlyUsedIn.cbCached,
945 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
946 "/PDM/AsyncCompletion/File/cbCachedMruIn",
947 STAMUNIT_BYTES,
948 "Number of bytes cached in MRU list");
949 STAMR3Register(pClassFile->Core.pVM, &pCache->LruRecentlyUsedOut.cbCached,
950 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
951 "/PDM/AsyncCompletion/File/cbCachedMruOut",
952 STAMUNIT_BYTES,
953 "Number of bytes cached in FRU list");
954 STAMR3Register(pClassFile->Core.pVM, &pCache->LruFrequentlyUsed.cbCached,
955 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
956 "/PDM/AsyncCompletion/File/cbCachedFru",
957 STAMUNIT_BYTES,
958 "Number of bytes cached in FRU ghost list");
959
960#ifdef VBOX_WITH_STATISTICS
961 STAMR3Register(pClassFile->Core.pVM, &pCache->cHits,
962 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
963 "/PDM/AsyncCompletion/File/CacheHits",
964 STAMUNIT_COUNT, "Number of hits in the cache");
965 STAMR3Register(pClassFile->Core.pVM, &pCache->cPartialHits,
966 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
967 "/PDM/AsyncCompletion/File/CachePartialHits",
968 STAMUNIT_COUNT, "Number of partial hits in the cache");
969 STAMR3Register(pClassFile->Core.pVM, &pCache->cMisses,
970 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
971 "/PDM/AsyncCompletion/File/CacheMisses",
972 STAMUNIT_COUNT, "Number of misses when accessing the cache");
973 STAMR3Register(pClassFile->Core.pVM, &pCache->StatRead,
974 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
975 "/PDM/AsyncCompletion/File/CacheRead",
976 STAMUNIT_BYTES, "Number of bytes read from the cache");
977 STAMR3Register(pClassFile->Core.pVM, &pCache->StatWritten,
978 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
979 "/PDM/AsyncCompletion/File/CacheWritten",
980 STAMUNIT_BYTES, "Number of bytes written to the cache");
981 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeGet,
982 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
983 "/PDM/AsyncCompletion/File/CacheTreeGet",
984 STAMUNIT_TICKS_PER_CALL, "Time taken to access an entry in the tree");
985 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeInsert,
986 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
987 "/PDM/AsyncCompletion/File/CacheTreeInsert",
988 STAMUNIT_TICKS_PER_CALL, "Time taken to insert an entry in the tree");
989 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeRemove,
990 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
991 "/PDM/AsyncCompletion/File/CacheTreeRemove",
992 STAMUNIT_TICKS_PER_CALL, "Time taken to remove an entry an the tree");
993 STAMR3Register(pClassFile->Core.pVM, &pCache->StatBuffersReused,
994 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
995 "/PDM/AsyncCompletion/File/CacheBuffersReused",
996 STAMUNIT_COUNT, "Number of times a buffer could be reused");
997#endif
998
999 /* Initialize the critical section */
1000 rc = RTCritSectInit(&pCache->CritSect);
1001
1002 if (RT_SUCCESS(rc))
1003 {
1004 /* Create the commit timer */
1005 if (pCache->u32CommitTimeoutMs > 0)
1006 rc = TMR3TimerCreateInternal(pClassFile->Core.pVM, TMCLOCK_REAL,
1007 pdmacFileCacheCommitTimerCallback,
1008 pClassFile,
1009 "Cache-Commit",
1010 &pClassFile->Cache.pTimerCommit);
1011
1012 if (RT_SUCCESS(rc))
1013 {
1014 LogRel(("AIOMgr: Cache successfully initialised. Cache size is %u bytes\n", pCache->cbMax));
1015 LogRel(("AIOMgr: Cache commit interval is %u ms\n", pCache->u32CommitTimeoutMs));
1016 LogRel(("AIOMgr: Cache commit threshold is %u bytes\n", pCache->cbCommitDirtyThreshold));
1017 return VINF_SUCCESS;
1018 }
1019
1020 RTCritSectDelete(&pCache->CritSect);
1021 }
1022
1023 return rc;
1024}
1025
1026/**
1027 * Destroysthe cache freeing all data.
1028 *
1029 * returns nothing.
1030 * @param pClassFile The global class data for file endpoints.
1031 */
1032void pdmacFileCacheDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile)
1033{
1034 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache;
1035
1036 /* Make sure no one else uses the cache now */
1037 pdmacFileCacheLockEnter(pCache);
1038
1039 /* Cleanup deleting all cache entries waiting for in progress entries to finish. */
1040 pdmacFileCacheDestroyList(&pCache->LruRecentlyUsedIn);
1041 pdmacFileCacheDestroyList(&pCache->LruRecentlyUsedOut);
1042 pdmacFileCacheDestroyList(&pCache->LruFrequentlyUsed);
1043
1044 pdmacFileCacheLockLeave(pCache);
1045
1046 RTCritSectDelete(&pCache->CritSect);
1047}
1048
1049/**
1050 * Initializes per endpoint cache data
1051 * like the AVL tree used to access cached entries.
1052 *
1053 * @returns VBox status code.
1054 * @param pEndpoint The endpoint to init the cache for,
1055 * @param pClassFile The global class data for file endpoints.
1056 */
1057int pdmacFileEpCacheInit(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile)
1058{
1059 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
1060
1061 pEndpointCache->pCache = &pClassFile->Cache;
1062 RTListInit(&pEndpointCache->ListDirtyNotCommitted);
1063 int rc = RTSpinlockCreate(&pEndpointCache->LockList);
1064
1065 if (RT_SUCCESS(rc))
1066 {
1067 rc = RTSemRWCreate(&pEndpointCache->SemRWEntries);
1068 if (RT_SUCCESS(rc))
1069 {
1070 pEndpointCache->pTree = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
1071 if (pEndpointCache->pTree)
1072 {
1073 pClassFile->Cache.cRefs++;
1074 RTListAppend(&pClassFile->Cache.ListEndpoints, &pEndpointCache->NodeCacheEndpoint);
1075
1076 /* Arm the timer if this is the first endpoint. */
1077 if ( pClassFile->Cache.cRefs == 1
1078 && pClassFile->Cache.u32CommitTimeoutMs > 0)
1079 rc = TMTimerSetMillies(pClassFile->Cache.pTimerCommit, pClassFile->Cache.u32CommitTimeoutMs);
1080 }
1081 else
1082 rc = VERR_NO_MEMORY;
1083
1084 if (RT_FAILURE(rc))
1085 RTSemRWDestroy(pEndpointCache->SemRWEntries);
1086 }
1087
1088 if (RT_FAILURE(rc))
1089 RTSpinlockDestroy(pEndpointCache->LockList);
1090 }
1091
1092#ifdef VBOX_WITH_STATISTICS
1093 if (RT_SUCCESS(rc))
1094 {
1095 STAMR3RegisterF(pClassFile->Core.pVM, &pEndpointCache->StatWriteDeferred,
1096 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1097 STAMUNIT_COUNT, "Number of deferred writes",
1098 "/PDM/AsyncCompletion/File/%s/Cache/DeferredWrites", RTPathFilename(pEndpoint->Core.pszUri));
1099 }
1100#endif
1101
1102 LogFlowFunc(("Leave rc=%Rrc\n", rc));
1103 return rc;
1104}
1105
1106/**
1107 * Callback for the AVL destroy routine. Frees a cache entry for this endpoint.
1108 *
1109 * @returns IPRT status code.
1110 * @param pNode The node to destroy.
1111 * @param pvUser Opaque user data.
1112 */
1113static int pdmacFileEpCacheEntryDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
1114{
1115 PPDMACFILECACHEENTRY pEntry = (PPDMACFILECACHEENTRY)pNode;
1116 PPDMACFILECACHEGLOBAL pCache = (PPDMACFILECACHEGLOBAL)pvUser;
1117 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEntry->pEndpoint->DataCache;
1118
1119 while (ASMAtomicReadU32(&pEntry->fFlags) & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY))
1120 {
1121 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1122 RTThreadSleep(250);
1123 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1124 }
1125
1126 AssertMsg(!(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY)),
1127 ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
1128
1129 bool fUpdateCache = pEntry->pList == &pCache->LruFrequentlyUsed
1130 || pEntry->pList == &pCache->LruRecentlyUsedIn;
1131
1132 pdmacFileCacheEntryRemoveFromList(pEntry);
1133
1134 if (fUpdateCache)
1135 pdmacFileCacheSub(pCache, pEntry->cbData);
1136
1137 RTMemPageFree(pEntry->pbData);
1138 RTMemFree(pEntry);
1139
1140 return VINF_SUCCESS;
1141}
1142
1143/**
1144 * Destroys all cache ressources used by the given endpoint.
1145 *
1146 * @returns nothing.
1147 * @param pEndpoint The endpoint to the destroy.
1148 */
1149void pdmacFileEpCacheDestroy(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
1150{
1151 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
1152 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1153
1154 /* Make sure nobody is accessing the cache while we delete the tree. */
1155 pdmacFileCacheLockEnter(pCache);
1156 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1157 RTAvlrFileOffsetDestroy(pEndpointCache->pTree, pdmacFileEpCacheEntryDestroy, pCache);
1158 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1159
1160 RTSpinlockDestroy(pEndpointCache->LockList);
1161
1162 pCache->cRefs--;
1163 RTListNodeRemove(&pEndpointCache->NodeCacheEndpoint);
1164
1165 if ( !pCache->cRefs
1166 && pCache->u32CommitTimeoutMs > 0)
1167 TMTimerStop(pCache->pTimerCommit);
1168
1169 pdmacFileCacheLockLeave(pCache);
1170
1171 RTSemRWDestroy(pEndpointCache->SemRWEntries);
1172
1173#ifdef VBOX_WITH_STATISTICS
1174 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
1175
1176 STAMR3Deregister(pEpClassFile->Core.pVM, &pEndpointCache->StatWriteDeferred);
1177#endif
1178}
1179
1180static PPDMACFILECACHEENTRY pdmacFileEpCacheGetCacheEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off)
1181{
1182 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1183 PPDMACFILECACHEENTRY pEntry = NULL;
1184
1185 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
1186
1187 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1188 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetRangeGet(pEndpointCache->pTree, off);
1189 if (pEntry)
1190 pdmacFileEpCacheEntryRef(pEntry);
1191 RTSemRWReleaseRead(pEndpointCache->SemRWEntries);
1192
1193 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
1194
1195 return pEntry;
1196}
1197
1198/**
1199 * Return the best fit cache entries for the given offset.
1200 *
1201 * @returns nothing.
1202 * @param pEndpointCache The endpoint cache.
1203 * @param off The offset.
1204 * @param pEntryAbove Where to store the pointer to the best fit entry above the
1205 * the given offset. NULL if not required.
1206 * @param pEntryBelow Where to store the pointer to the best fit entry below the
1207 * the given offset. NULL if not required.
1208 */
1209static void pdmacFileEpCacheGetCacheBestFitEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off,
1210 PPDMACFILECACHEENTRY *ppEntryAbove,
1211 PPDMACFILECACHEENTRY *ppEntryBelow)
1212{
1213 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1214
1215 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
1216
1217 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1218 if (ppEntryAbove)
1219 {
1220 *ppEntryAbove = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, true /*fAbove*/);
1221 if (*ppEntryAbove)
1222 pdmacFileEpCacheEntryRef(*ppEntryAbove);
1223 }
1224
1225 if (ppEntryBelow)
1226 {
1227 *ppEntryBelow = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, false /*fAbove*/);
1228 if (*ppEntryBelow)
1229 pdmacFileEpCacheEntryRef(*ppEntryBelow);
1230 }
1231 RTSemRWReleaseRead(pEndpointCache->SemRWEntries);
1232
1233 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
1234}
1235
1236static void pdmacFileEpCacheInsertEntry(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry)
1237{
1238 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1239
1240 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache);
1241 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1242 bool fInserted = RTAvlrFileOffsetInsert(pEndpointCache->pTree, &pEntry->Core);
1243 AssertMsg(fInserted, ("Node was not inserted into tree\n"));
1244 STAM_PROFILE_ADV_STOP(&pCache->StatTreeInsert, Cache);
1245 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1246}
1247
1248/**
1249 * Allocates and initializes a new entry for the cache.
1250 * The entry has a reference count of 1.
1251 *
1252 * @returns Pointer to the new cache entry or NULL if out of memory.
1253 * @param pCache The cache the entry belongs to.
1254 * @param pEndoint The endpoint the entry holds data for.
1255 * @param off Start offset.
1256 * @param cbData Size of the cache entry.
1257 * @param pbBuffer Pointer to the buffer to use.
1258 * NULL if a new buffer should be allocated.
1259 * The buffer needs to have the same size of the entry.
1260 */
1261static PPDMACFILECACHEENTRY pdmacFileCacheEntryAlloc(PPDMACFILECACHEGLOBAL pCache,
1262 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
1263 RTFOFF off, size_t cbData, uint8_t *pbBuffer)
1264{
1265 PPDMACFILECACHEENTRY pEntryNew = (PPDMACFILECACHEENTRY)RTMemAllocZ(sizeof(PDMACFILECACHEENTRY));
1266
1267 if (RT_UNLIKELY(!pEntryNew))
1268 return NULL;
1269
1270 pEntryNew->Core.Key = off;
1271 pEntryNew->Core.KeyLast = off + cbData - 1;
1272 pEntryNew->pEndpoint = pEndpoint;
1273 pEntryNew->pCache = pCache;
1274 pEntryNew->fFlags = 0;
1275 pEntryNew->cRefs = 1; /* We are using it now. */
1276 pEntryNew->pList = NULL;
1277 pEntryNew->cbData = cbData;
1278 pEntryNew->pWaitingHead = NULL;
1279 pEntryNew->pWaitingTail = NULL;
1280 if (pbBuffer)
1281 pEntryNew->pbData = pbBuffer;
1282 else
1283 pEntryNew->pbData = (uint8_t *)RTMemPageAlloc(cbData);
1284
1285 if (RT_UNLIKELY(!pEntryNew->pbData))
1286 {
1287 RTMemFree(pEntryNew);
1288 return NULL;
1289 }
1290
1291 return pEntryNew;
1292}
1293
1294/**
1295 * Adds a segment to the waiting list for a cache entry
1296 * which is currently in progress.
1297 *
1298 * @returns nothing.
1299 * @param pEntry The cache entry to add the segment to.
1300 * @param pSeg The segment to add.
1301 */
1302DECLINLINE(void) pdmacFileEpCacheEntryAddWaitingSegment(PPDMACFILECACHEENTRY pEntry, PPDMACFILETASKSEG pSeg)
1303{
1304 pSeg->pNext = NULL;
1305
1306 if (pEntry->pWaitingHead)
1307 {
1308 AssertPtr(pEntry->pWaitingTail);
1309
1310 pEntry->pWaitingTail->pNext = pSeg;
1311 pEntry->pWaitingTail = pSeg;
1312 }
1313 else
1314 {
1315 Assert(!pEntry->pWaitingTail);
1316
1317 pEntry->pWaitingHead = pSeg;
1318 pEntry->pWaitingTail = pSeg;
1319 }
1320}
1321
1322/**
1323 * Checks that a set of flags is set/clear acquiring the R/W semaphore
1324 * in exclusive mode.
1325 *
1326 * @returns true if the flag in fSet is set and the one in fClear is clear.
1327 * false othwerise.
1328 * The R/W semaphore is only held if true is returned.
1329 *
1330 * @param pEndpointCache The endpoint cache instance data.
1331 * @param pEntry The entry to check the flags for.
1332 * @param fSet The flag which is tested to be set.
1333 * @param fClear The flag which is tested to be clear.
1334 */
1335DECLINLINE(bool) pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(PPDMACFILEENDPOINTCACHE pEndpointCache,
1336 PPDMACFILECACHEENTRY pEntry,
1337 uint32_t fSet, uint32_t fClear)
1338{
1339 uint32_t fFlags = ASMAtomicReadU32(&pEntry->fFlags);
1340 bool fPassed = ((fFlags & fSet) && !(fFlags & fClear));
1341
1342 if (fPassed)
1343 {
1344 /* Acquire the lock and check again becuase the completion callback might have raced us. */
1345 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1346
1347 fFlags = ASMAtomicReadU32(&pEntry->fFlags);
1348 fPassed = ((fFlags & fSet) && !(fFlags & fClear));
1349
1350 /* Drop the lock if we didn't passed the test. */
1351 if (!fPassed)
1352 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1353 }
1354
1355 return fPassed;
1356}
1357
1358/**
1359 * Copies data to a buffer described by a I/O memory context.
1360 *
1361 * @returns nothing.
1362 * @param pIoMemCtx The I/O memory context to copy the data into.
1363 * @param pbData Pointer to the data data to copy.
1364 * @param cbData Amount of data to copy.
1365 */
1366static void pdmacFileEpCacheCopyToIoMemCtx(PPDMIOMEMCTX pIoMemCtx,
1367 uint8_t *pbData,
1368 size_t cbData)
1369{
1370 while (cbData)
1371 {
1372 size_t cbCopy = cbData;
1373 uint8_t *pbBuf = pdmIoMemCtxGetBuffer(pIoMemCtx, &cbCopy);
1374
1375 AssertPtr(pbBuf);
1376
1377 memcpy(pbBuf, pbData, cbCopy);
1378
1379 cbData -= cbCopy;
1380 pbData += cbCopy;
1381 }
1382}
1383
1384/**
1385 * Copies data from a buffer described by a I/O memory context.
1386 *
1387 * @returns nothing.
1388 * @param pIoMemCtx The I/O memory context to copy the data from.
1389 * @param pbData Pointer to the destination buffer.
1390 * @param cbData Amount of data to copy.
1391 */
1392static void pdmacFileEpCacheCopyFromIoMemCtx(PPDMIOMEMCTX pIoMemCtx,
1393 uint8_t *pbData,
1394 size_t cbData)
1395{
1396 while (cbData)
1397 {
1398 size_t cbCopy = cbData;
1399 uint8_t *pbBuf = pdmIoMemCtxGetBuffer(pIoMemCtx, &cbCopy);
1400
1401 AssertPtr(pbBuf);
1402
1403 memcpy(pbData, pbBuf, cbCopy);
1404
1405 cbData -= cbCopy;
1406 pbData += cbCopy;
1407 }
1408}
1409
1410/**
1411 * Add a buffer described by the I/O memory context
1412 * to the entry waiting for completion.
1413 *
1414 * @returns nothing.
1415 * @param pEntry The entry to add the buffer to.
1416 * @param pTask Task associated with the buffer.
1417 * @param pIoMemCtx The memory context to use.
1418 * @param OffDiff Offset from the start of the buffer
1419 * in the entry.
1420 * @param cbData Amount of data to wait for onthis entry.
1421 * @param fWrite Flag whether the task waits because it wants to write
1422 * to the cache entry.
1423 */
1424static void pdmacFileEpCacheEntryWaitersAdd(PPDMACFILECACHEENTRY pEntry,
1425 PPDMASYNCCOMPLETIONTASKFILE pTask,
1426 PPDMIOMEMCTX pIoMemCtx,
1427 RTFOFF OffDiff,
1428 size_t cbData,
1429 bool fWrite)
1430{
1431 while (cbData)
1432 {
1433 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1434 size_t cbSeg = cbData;
1435 uint8_t *pbBuf = pdmIoMemCtxGetBuffer(pIoMemCtx, &cbSeg);
1436
1437 pSeg->pTask = pTask;
1438 pSeg->uBufOffset = OffDiff;
1439 pSeg->cbTransfer = cbSeg;
1440 pSeg->pvBuf = pbBuf;
1441 pSeg->fWrite = fWrite;
1442
1443 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1444
1445 cbData -= cbSeg;
1446 OffDiff += cbSeg;
1447 }
1448}
1449
1450/**
1451 * Passthrough a part of a request directly to the I/O manager
1452 * handling the endpoint.
1453 *
1454 * @returns nothing.
1455 * @param pEndpoint The endpoint.
1456 * @param pTask The task.
1457 * @param pIoMemCtx The I/O memory context to use.
1458 * @param offStart Offset to start transfer from.
1459 * @param cbData Amount of data to transfer.
1460 * @param enmTransferType The transfer type (read/write)
1461 */
1462static void pdmacFileEpCacheRequestPassthrough(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
1463 PPDMASYNCCOMPLETIONTASKFILE pTask,
1464 PPDMIOMEMCTX pIoMemCtx,
1465 RTFOFF offStart, size_t cbData,
1466 PDMACTASKFILETRANSFER enmTransferType)
1467{
1468 while (cbData)
1469 {
1470 size_t cbSeg = cbData;
1471 uint8_t *pbBuf = pdmIoMemCtxGetBuffer(pIoMemCtx, &cbSeg);
1472 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEndpoint);
1473 AssertPtr(pIoTask);
1474
1475 pIoTask->pEndpoint = pEndpoint;
1476 pIoTask->enmTransferType = enmTransferType;
1477 pIoTask->Off = offStart;
1478 pIoTask->DataSeg.cbSeg = cbSeg;
1479 pIoTask->DataSeg.pvSeg = pbBuf;
1480 pIoTask->pvUser = pTask;
1481 pIoTask->pfnCompleted = pdmacFileEpTaskCompleted;
1482
1483 offStart += cbSeg;
1484 cbData -= cbSeg;
1485
1486 /* Send it off to the I/O manager. */
1487 pdmacFileEpAddTask(pEndpoint, pIoTask);
1488 }
1489}
1490
1491/**
1492 * Calculate aligned offset and size for a new cache entry
1493 * which do not intersect with an already existing entry and the
1494 * file end.
1495 *
1496 * @returns The number of bytes the entry can hold of the requested amount
1497 * of byte.
1498 * @param pEndpoint The endpoint.
1499 * @param pEndpointCache The endpoint cache.
1500 * @param off The start offset.
1501 * @param cb The number of bytes the entry needs to hold at least.
1502 * @param uAlignment Alignment of the boundary sizes.
1503 * @param poffAligned Where to store the aligned offset.
1504 * @param pcbAligned Where to store the aligned size of the entry.
1505 */
1506static size_t pdmacFileEpCacheEntryBoundariesCalc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
1507 PPDMACFILEENDPOINTCACHE pEndpointCache,
1508 RTFOFF off, size_t cb,
1509 unsigned uAlignment,
1510 RTFOFF *poffAligned, size_t *pcbAligned)
1511{
1512 size_t cbAligned;
1513 size_t cbInEntry = 0;
1514 RTFOFF offAligned;
1515 PPDMACFILECACHEENTRY pEntryAbove = NULL;
1516 PPDMACFILECACHEENTRY pEntryBelow = NULL;
1517
1518 /* Get the best fit entries around the offset */
1519 pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off,
1520 &pEntryAbove, &pEntryBelow);
1521
1522 /* Log the info */
1523 LogFlow(("%sest fit entry below off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n",
1524 pEntryBelow ? "B" : "No b",
1525 off,
1526 pEntryBelow ? pEntryBelow->Core.Key : 0,
1527 pEntryBelow ? pEntryBelow->Core.KeyLast : 0,
1528 pEntryBelow ? pEntryBelow->cbData : 0));
1529
1530 LogFlow(("%sest fit entry above off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n",
1531 pEntryAbove ? "B" : "No b",
1532 off,
1533 pEntryAbove ? pEntryAbove->Core.Key : 0,
1534 pEntryAbove ? pEntryAbove->Core.KeyLast : 0,
1535 pEntryAbove ? pEntryAbove->cbData : 0));
1536
1537 /* Align the offset first. */
1538 offAligned = off & ~(RTFOFF)(512-1);
1539 if ( pEntryBelow
1540 && offAligned <= pEntryBelow->Core.KeyLast)
1541 offAligned = pEntryBelow->Core.KeyLast;
1542
1543 if ( pEntryAbove
1544 && off + (RTFOFF)cb > pEntryAbove->Core.Key)
1545 {
1546 cbInEntry = pEntryAbove->Core.Key - off;
1547 cbAligned = pEntryAbove->Core.Key - offAligned;
1548 }
1549 else
1550 {
1551 /*
1552 * Align the size to a 4KB boundary.
1553 * Memory size is aligned to a page boundary
1554 * and memory is wasted if the size is rather small.
1555 * (For example reads with a size of 512 bytes).
1556 */
1557 cbInEntry = cb;
1558 cbAligned = RT_ALIGN_Z(cb + (off - offAligned), uAlignment);
1559
1560 /*
1561 * Clip to file size if the original request doesn't
1562 * exceed the file (not an appending write)
1563 */
1564 uint64_t cbReq = off + (RTFOFF)cb;
1565 if (cbReq >= pEndpoint->cbFile)
1566 cbAligned = cbReq - offAligned;
1567 else
1568 cbAligned = RT_MIN(pEndpoint->cbFile - offAligned, cbAligned);
1569 if (pEntryAbove)
1570 {
1571 Assert(pEntryAbove->Core.Key >= off);
1572 cbAligned = RT_MIN(cbAligned, (uint64_t)pEntryAbove->Core.Key - offAligned);
1573 }
1574 }
1575
1576 /* A few sanity checks */
1577 AssertMsg(!pEntryBelow || pEntryBelow->Core.KeyLast < offAligned,
1578 ("Aligned start offset intersects with another cache entry\n"));
1579 AssertMsg(!pEntryAbove || (offAligned + (RTFOFF)cbAligned) <= pEntryAbove->Core.Key,
1580 ("Aligned size intersects with another cache entry\n"));
1581 Assert(cbInEntry <= cbAligned);
1582 AssertMsg( ( offAligned + (RTFOFF)cbAligned <= (RTFOFF)pEndpoint->cbFile
1583 && off + (RTFOFF)cb <= (RTFOFF)pEndpoint->cbFile)
1584 || (offAligned + (RTFOFF)cbAligned <= off + (RTFOFF)cb),
1585 ("Unwanted file size increase\n"));
1586
1587 if (pEntryBelow)
1588 pdmacFileEpCacheEntryRelease(pEntryBelow);
1589 if (pEntryAbove)
1590 pdmacFileEpCacheEntryRelease(pEntryAbove);
1591
1592 LogFlow(("offAligned=%RTfoff cbAligned=%u\n", offAligned, cbAligned));
1593
1594 *poffAligned = offAligned;
1595 *pcbAligned = cbAligned;
1596
1597 return cbInEntry;
1598}
1599
1600/**
1601 * Create a new cache entry evicting data from the cache if required.
1602 *
1603 * @returns Pointer to the new cache entry or NULL
1604 * if not enough bytes could be evicted from the cache.
1605 * @param pEndpoint The endpoint.
1606 * @param pEndpointCache The endpoint cache.
1607 * @param off The offset.
1608 * @param cb Number of bytes the cache entry should have.
1609 * @param uAlignment Alignment the size of the entry should have.
1610 * @param pcbData Where to store the number of bytes the new
1611 * entry can hold. May be lower than actually requested
1612 * due to another entry intersecting the access range.
1613 */
1614static PPDMACFILECACHEENTRY pdmacFileEpCacheEntryCreate(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
1615 PPDMACFILEENDPOINTCACHE pEndpointCache,
1616 RTFOFF off, size_t cb,
1617 unsigned uAlignment,
1618 size_t *pcbData)
1619{
1620 RTFOFF offStart = 0;
1621 size_t cbEntry = 0;
1622 PPDMACFILECACHEENTRY pEntryNew = NULL;
1623 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1624 uint8_t *pbBuffer = NULL;
1625
1626 *pcbData = pdmacFileEpCacheEntryBoundariesCalc(pEndpoint,
1627 pEndpointCache,
1628 off, cb,
1629 uAlignment,
1630 &offStart, &cbEntry);
1631
1632 pdmacFileCacheLockEnter(pCache);
1633 bool fEnough = pdmacFileCacheReclaim(pCache, cbEntry, true, &pbBuffer);
1634
1635 if (fEnough)
1636 {
1637 LogFlow(("Evicted enough bytes (%u requested). Creating new cache entry\n", cbEntry));
1638
1639 pEntryNew = pdmacFileCacheEntryAlloc(pCache, pEndpoint,
1640 offStart, cbEntry,
1641 pbBuffer);
1642 if (RT_LIKELY(pEntryNew))
1643 {
1644 pdmacFileCacheEntryAddToList(&pCache->LruRecentlyUsedIn, pEntryNew);
1645 pdmacFileCacheAdd(pCache, cbEntry);
1646 pdmacFileCacheLockLeave(pCache);
1647
1648 pdmacFileEpCacheInsertEntry(pEndpointCache, pEntryNew);
1649
1650 AssertMsg( (off >= pEntryNew->Core.Key)
1651 && (off + (RTFOFF)*pcbData <= pEntryNew->Core.KeyLast + 1),
1652 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1653 off, pEntryNew->Core.Key));
1654 }
1655 else
1656 pdmacFileCacheLockLeave(pCache);
1657 }
1658 else
1659 pdmacFileCacheLockLeave(pCache);
1660
1661 return pEntryNew;
1662}
1663
1664/**
1665 * Reads the specified data from the endpoint using the cache if possible.
1666 *
1667 * @returns VBox status code.
1668 * @param pEndpoint The endpoint to read from.
1669 * @param pTask The task structure used as identifier for this request.
1670 * @param off The offset to start reading from.
1671 * @param paSegments Pointer to the array holding the destination buffers.
1672 * @param cSegments Number of segments in the array.
1673 * @param cbRead Number of bytes to read.
1674 */
1675int pdmacFileEpCacheRead(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
1676 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments,
1677 size_t cbRead)
1678{
1679 int rc = VINF_SUCCESS;
1680 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
1681 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1682 PPDMACFILECACHEENTRY pEntry;
1683
1684 LogFlowFunc((": pEndpoint=%#p{%s} pTask=%#p off=%RTfoff paSegments=%#p cSegments=%u cbRead=%u\n",
1685 pEndpoint, pEndpoint->Core.pszUri, pTask, off, paSegments, cSegments, cbRead));
1686
1687 pTask->cbTransferLeft = cbRead;
1688 /* Set to completed to make sure that the task is valid while we access it. */
1689 ASMAtomicWriteBool(&pTask->fCompleted, true);
1690
1691 /* Init the I/O memory context */
1692 PDMIOMEMCTX IoMemCtx;
1693 pdmIoMemCtxInit(&IoMemCtx, paSegments, cSegments);
1694
1695 while (cbRead)
1696 {
1697 size_t cbToRead;
1698
1699 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off);
1700
1701 /*
1702 * If there is no entry we try to create a new one eviciting unused pages
1703 * if the cache is full. If this is not possible we will pass the request through
1704 * and skip the caching (all entries may be still in progress so they can't
1705 * be evicted)
1706 * If we have an entry it can be in one of the LRU lists where the entry
1707 * contains data (recently used or frequently used LRU) so we can just read
1708 * the data we need and put the entry at the head of the frequently used LRU list.
1709 * In case the entry is in one of the ghost lists it doesn't contain any data.
1710 * We have to fetch it again evicting pages from either T1 or T2 to make room.
1711 */
1712 if (pEntry)
1713 {
1714 RTFOFF OffDiff = off - pEntry->Core.Key;
1715
1716 AssertMsg(off >= pEntry->Core.Key,
1717 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1718 off, pEntry->Core.Key));
1719
1720 AssertPtr(pEntry->pList);
1721
1722 cbToRead = RT_MIN(pEntry->cbData - OffDiff, cbRead);
1723
1724 AssertMsg(off + (RTFOFF)cbToRead <= pEntry->Core.Key + pEntry->Core.KeyLast + 1,
1725 ("Buffer of cache entry exceeded off=%RTfoff cbToRead=%d\n",
1726 off, cbToRead));
1727
1728 cbRead -= cbToRead;
1729
1730 if (!cbRead)
1731 STAM_COUNTER_INC(&pCache->cHits);
1732 else
1733 STAM_COUNTER_INC(&pCache->cPartialHits);
1734
1735 STAM_COUNTER_ADD(&pCache->StatRead, cbToRead);
1736
1737 /* Ghost lists contain no data. */
1738 if ( (pEntry->pList == &pCache->LruRecentlyUsedIn)
1739 || (pEntry->pList == &pCache->LruFrequentlyUsed))
1740 {
1741 if (pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1742 PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1743 PDMACFILECACHE_ENTRY_IS_DIRTY))
1744 {
1745 /* Entry didn't completed yet. Append to the list */
1746 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask,
1747 &IoMemCtx,
1748 OffDiff, cbToRead,
1749 false /* fWrite */);
1750 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1751 }
1752 else
1753 {
1754 /* Read as much as we can from the entry. */
1755 pdmacFileEpCacheCopyToIoMemCtx(&IoMemCtx, pEntry->pbData + OffDiff, cbToRead);
1756 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToRead);
1757 }
1758
1759 /* Move this entry to the top position */
1760 if (pEntry->pList == &pCache->LruFrequentlyUsed)
1761 {
1762 pdmacFileCacheLockEnter(pCache);
1763 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1764 pdmacFileCacheLockLeave(pCache);
1765 }
1766 /* Release the entry */
1767 pdmacFileEpCacheEntryRelease(pEntry);
1768 }
1769 else
1770 {
1771 uint8_t *pbBuffer = NULL;
1772
1773 LogFlow(("Fetching data for ghost entry %#p from file\n", pEntry));
1774
1775 pdmacFileCacheLockEnter(pCache);
1776 pdmacFileCacheEntryRemoveFromList(pEntry); /* Remove it before we remove data, otherwise it may get freed when evicting data. */
1777 bool fEnough = pdmacFileCacheReclaim(pCache, pEntry->cbData, true, &pbBuffer);
1778
1779 /* Move the entry to Am and fetch it to the cache. */
1780 if (fEnough)
1781 {
1782 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1783 pdmacFileCacheAdd(pCache, pEntry->cbData);
1784 pdmacFileCacheLockLeave(pCache);
1785
1786 if (pbBuffer)
1787 pEntry->pbData = pbBuffer;
1788 else
1789 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
1790 AssertPtr(pEntry->pbData);
1791
1792 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask,
1793 &IoMemCtx,
1794 OffDiff, cbToRead,
1795 false /* fWrite */);
1796 pdmacFileCacheReadFromEndpoint(pEntry);
1797 /* Release the entry */
1798 pdmacFileEpCacheEntryRelease(pEntry);
1799 }
1800 else
1801 {
1802 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
1803 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
1804 RTAvlrFileOffsetRemove(pEndpointCache->pTree, pEntry->Core.Key);
1805 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
1806 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1807
1808 pdmacFileCacheLockLeave(pCache);
1809
1810 RTMemFree(pEntry);
1811
1812 pdmacFileEpCacheRequestPassthrough(pEndpoint, pTask,
1813 &IoMemCtx, off, cbToRead,
1814 PDMACTASKFILETRANSFER_READ);
1815 }
1816 }
1817 }
1818 else
1819 {
1820#ifdef VBOX_WITH_IO_READ_CACHE
1821 /* No entry found for this offset. Create a new entry and fetch the data to the cache. */
1822 PPDMACFILECACHEENTRY pEntryNew = pdmacFileEpCacheEntryCreate(pEndpoint,
1823 pEndpointCache,
1824 off, cbRead,
1825 PAGE_SIZE,
1826 &cbToRead);
1827
1828 cbRead -= cbToRead;
1829
1830 if (pEntryNew)
1831 {
1832 if (!cbRead)
1833 STAM_COUNTER_INC(&pCache->cMisses);
1834 else
1835 STAM_COUNTER_INC(&pCache->cPartialHits);
1836
1837 pdmacFileEpCacheEntryWaitersAdd(pEntryNew, pTask,
1838 &IoMemCtx,
1839 off - pEntryNew->Core.Key,
1840 cbToRead,
1841 false /* fWrite */);
1842 pdmacFileCacheReadFromEndpoint(pEntryNew);
1843 pdmacFileEpCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */
1844 }
1845 else
1846 {
1847 /*
1848 * There is not enough free space in the cache.
1849 * Pass the request directly to the I/O manager.
1850 */
1851 LogFlow(("Couldn't evict %u bytes from the cache. Remaining request will be passed through\n", cbToRead));
1852
1853 pdmacFileEpCacheRequestPassthrough(pEndpoint, pTask,
1854 &IoMemCtx, off, cbToRead,
1855 PDMACTASKFILETRANSFER_READ);
1856 }
1857#else
1858 /* Clip read size if neccessary. */
1859 PPDMACFILECACHEENTRY pEntryAbove;
1860 pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off,
1861 &pEntryAbove, NULL);
1862
1863 if (pEntryAbove)
1864 {
1865 if (off + (RTFOFF)cbRead > pEntryAbove->Core.Key)
1866 cbToRead = pEntryAbove->Core.Key - off;
1867 else
1868 cbToRead = cbRead;
1869
1870 pdmacFileEpCacheEntryRelease(pEntryAbove);
1871 }
1872 else
1873 cbToRead = cbRead;
1874
1875 cbRead -= cbToRead;
1876 pdmacFileEpCacheRequestPassthrough(pEndpoint, pTask,
1877 &IoMemCtx, off, cbToRead,
1878 PDMACTASKFILETRANSFER_READ);
1879#endif
1880 }
1881 off += cbToRead;
1882 }
1883
1884 ASMAtomicWriteBool(&pTask->fCompleted, false);
1885
1886 if (ASMAtomicReadS32(&pTask->cbTransferLeft) == 0
1887 && !ASMAtomicXchgBool(&pTask->fCompleted, true))
1888 pdmR3AsyncCompletionCompleteTask(&pTask->Core, false);
1889 else
1890 rc = VINF_AIO_TASK_PENDING;
1891
1892 LogFlowFunc((": Leave rc=%Rrc\n", rc));
1893
1894 return rc;
1895}
1896
1897/**
1898 * Writes the given data to the endpoint using the cache if possible.
1899 *
1900 * @returns VBox status code.
1901 * @param pEndpoint The endpoint to write to.
1902 * @param pTask The task structure used as identifier for this request.
1903 * @param off The offset to start writing to
1904 * @param paSegments Pointer to the array holding the source buffers.
1905 * @param cSegments Number of segments in the array.
1906 * @param cbWrite Number of bytes to write.
1907 */
1908int pdmacFileEpCacheWrite(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
1909 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments,
1910 size_t cbWrite)
1911{
1912 int rc = VINF_SUCCESS;
1913 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
1914 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1915 PPDMACFILECACHEENTRY pEntry;
1916
1917 LogFlowFunc((": pEndpoint=%#p{%s} pTask=%#p off=%RTfoff paSegments=%#p cSegments=%u cbWrite=%u\n",
1918 pEndpoint, pEndpoint->Core.pszUri, pTask, off, paSegments, cSegments, cbWrite));
1919
1920 pTask->cbTransferLeft = cbWrite;
1921 /* Set to completed to make sure that the task is valid while we access it. */
1922 ASMAtomicWriteBool(&pTask->fCompleted, true);
1923
1924 /* Init the I/O memory context */
1925 PDMIOMEMCTX IoMemCtx;
1926 pdmIoMemCtxInit(&IoMemCtx, paSegments, cSegments);
1927
1928 while (cbWrite)
1929 {
1930 size_t cbToWrite;
1931
1932 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off);
1933
1934 if (pEntry)
1935 {
1936 /* Write the data into the entry and mark it as dirty */
1937 AssertPtr(pEntry->pList);
1938
1939 RTFOFF OffDiff = off - pEntry->Core.Key;
1940
1941 AssertMsg(off >= pEntry->Core.Key,
1942 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1943 off, pEntry->Core.Key));
1944
1945 cbToWrite = RT_MIN(pEntry->cbData - OffDiff, cbWrite);
1946 cbWrite -= cbToWrite;
1947
1948 if (!cbWrite)
1949 STAM_COUNTER_INC(&pCache->cHits);
1950 else
1951 STAM_COUNTER_INC(&pCache->cPartialHits);
1952
1953 STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
1954
1955 /* Ghost lists contain no data. */
1956 if ( (pEntry->pList == &pCache->LruRecentlyUsedIn)
1957 || (pEntry->pList == &pCache->LruFrequentlyUsed))
1958 {
1959 /* Check if the entry is dirty. */
1960 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1961 PDMACFILECACHE_ENTRY_IS_DIRTY,
1962 0))
1963 {
1964 /* If it is dirty but not in progrss just update the data. */
1965 if (!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS))
1966 {
1967 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx,
1968 pEntry->pbData + OffDiff,
1969 cbToWrite);
1970 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite);
1971 }
1972 else
1973 {
1974 /* The data isn't written to the file yet */
1975 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask,
1976 &IoMemCtx,
1977 OffDiff, cbToWrite,
1978 true /* fWrite */);
1979 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
1980 }
1981
1982 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1983 }
1984 else /* Dirty bit not set */
1985 {
1986 /*
1987 * Check if a read is in progress for this entry.
1988 * We have to defer processing in that case.
1989 */
1990 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1991 PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1992 0))
1993 {
1994 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask,
1995 &IoMemCtx,
1996 OffDiff, cbToWrite,
1997 true /* fWrite */);
1998 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
1999 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
2000 }
2001 else /* I/O in progress flag not set */
2002 {
2003 /* Write as much as we can into the entry and update the file. */
2004 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx,
2005 pEntry->pbData + OffDiff,
2006 cbToWrite);
2007 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite);
2008
2009 bool fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntry);
2010 if (fCommit)
2011 pdmacFileCacheCommitDirtyEntries(pCache);
2012 }
2013 } /* Dirty bit not set */
2014
2015 /* Move this entry to the top position */
2016 if (pEntry->pList == &pCache->LruFrequentlyUsed)
2017 {
2018 pdmacFileCacheLockEnter(pCache);
2019 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
2020 pdmacFileCacheLockLeave(pCache);
2021 }
2022
2023 pdmacFileEpCacheEntryRelease(pEntry);
2024 }
2025 else /* Entry is on the ghost list */
2026 {
2027 uint8_t *pbBuffer = NULL;
2028
2029 pdmacFileCacheLockEnter(pCache);
2030 pdmacFileCacheEntryRemoveFromList(pEntry); /* Remove it before we remove data, otherwise it may get freed when evicting data. */
2031 bool fEnough = pdmacFileCacheReclaim(pCache, pEntry->cbData, true, &pbBuffer);
2032
2033 if (fEnough)
2034 {
2035 /* Move the entry to Am and fetch it to the cache. */
2036 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
2037 pdmacFileCacheAdd(pCache, pEntry->cbData);
2038 pdmacFileCacheLockLeave(pCache);
2039
2040 if (pbBuffer)
2041 pEntry->pbData = pbBuffer;
2042 else
2043 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
2044 AssertPtr(pEntry->pbData);
2045
2046 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask,
2047 &IoMemCtx,
2048 OffDiff, cbToWrite,
2049 true /* fWrite */);
2050 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
2051 pdmacFileCacheReadFromEndpoint(pEntry);
2052
2053 /* Release the reference. If it is still needed the I/O in progress flag should protect it now. */
2054 pdmacFileEpCacheEntryRelease(pEntry);
2055 }
2056 else
2057 {
2058 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
2059 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
2060 RTAvlrFileOffsetRemove(pEndpointCache->pTree, pEntry->Core.Key);
2061 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
2062 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
2063
2064 pdmacFileCacheLockLeave(pCache);
2065
2066 RTMemFree(pEntry);
2067 pdmacFileEpCacheRequestPassthrough(pEndpoint, pTask,
2068 &IoMemCtx, off, cbToWrite,
2069 PDMACTASKFILETRANSFER_WRITE);
2070 }
2071 }
2072 }
2073 else /* No entry found */
2074 {
2075 /*
2076 * No entry found. Try to create a new cache entry to store the data in and if that fails
2077 * write directly to the file.
2078 */
2079 PPDMACFILECACHEENTRY pEntryNew = pdmacFileEpCacheEntryCreate(pEndpoint,
2080 pEndpointCache,
2081 off, cbWrite,
2082 512,
2083 &cbToWrite);
2084
2085 cbWrite -= cbToWrite;
2086
2087 if (pEntryNew)
2088 {
2089 RTFOFF offDiff = off - pEntryNew->Core.Key;
2090
2091 STAM_COUNTER_INC(&pCache->cHits);
2092
2093 /*
2094 * Check if it is possible to just write the data without waiting
2095 * for it to get fetched first.
2096 */
2097 if (!offDiff && pEntryNew->cbData == cbToWrite)
2098 {
2099 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx,
2100 pEntryNew->pbData,
2101 cbToWrite);
2102 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite);
2103
2104 bool fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntryNew);
2105 if (fCommit)
2106 pdmacFileCacheCommitDirtyEntries(pCache);
2107 STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
2108 }
2109 else
2110 {
2111 /* Defer the write and fetch the data from the endpoint. */
2112 pdmacFileEpCacheEntryWaitersAdd(pEntryNew, pTask,
2113 &IoMemCtx,
2114 offDiff, cbToWrite,
2115 true /* fWrite */);
2116 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
2117 pdmacFileCacheReadFromEndpoint(pEntryNew);
2118 }
2119
2120 pdmacFileEpCacheEntryRelease(pEntryNew);
2121 }
2122 else
2123 {
2124 /*
2125 * There is not enough free space in the cache.
2126 * Pass the request directly to the I/O manager.
2127 */
2128 LogFlow(("Couldn't evict %u bytes from the cache. Remaining request will be passed through\n", cbToWrite));
2129
2130 STAM_COUNTER_INC(&pCache->cMisses);
2131
2132 pdmacFileEpCacheRequestPassthrough(pEndpoint, pTask,
2133 &IoMemCtx, off, cbToWrite,
2134 PDMACTASKFILETRANSFER_WRITE);
2135 }
2136 }
2137
2138 off += cbToWrite;
2139 }
2140
2141 ASMAtomicWriteBool(&pTask->fCompleted, false);
2142
2143 if (ASMAtomicReadS32(&pTask->cbTransferLeft) == 0
2144 && !ASMAtomicXchgBool(&pTask->fCompleted, true))
2145 pdmR3AsyncCompletionCompleteTask(&pTask->Core, false);
2146 else
2147 rc = VINF_AIO_TASK_PENDING;
2148
2149 LogFlowFunc((": Leave rc=%Rrc\n", rc));
2150
2151 return rc;
2152}
2153
2154int pdmacFileEpCacheFlush(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask)
2155{
2156 int rc = VINF_SUCCESS;
2157
2158 LogFlowFunc((": pEndpoint=%#p{%s} pTask=%#p\n",
2159 pEndpoint, pEndpoint->Core.pszUri, pTask));
2160
2161 if (ASMAtomicReadPtr((void * volatile *)&pEndpoint->DataCache.pTaskFlush))
2162 rc = VERR_RESOURCE_BUSY;
2163 else
2164 {
2165 /* Check for dirty entries in the cache. */
2166 pdmacFileCacheEndpointCommit(&pEndpoint->DataCache);
2167 if (ASMAtomicReadU32(&pEndpoint->DataCache.cWritesOutstanding) > 0)
2168 {
2169 ASMAtomicWritePtr((void * volatile *)&pEndpoint->DataCache.pTaskFlush, pTask);
2170 rc = VINF_AIO_TASK_PENDING;
2171 }
2172 else
2173 pdmR3AsyncCompletionCompleteTask(&pTask->Core, false);
2174 }
2175
2176 LogFlowFunc((": Leave rc=%Rrc\n", rc));
2177 return rc;
2178}
2179
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette