VirtualBox

source: vbox/trunk/src/VBox/Storage/VD.cpp@ 66140

Last change on this file since 66140 was 66140, checked in by vboxsync, 8 years ago

Storage: Add backend to handle CUE/BIN images (not yet working as the previous infrastructure changes need to be wired up to the CD/DVD drive emulations)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 403.1 KB
Line 
1/* $Id: VD.cpp 66140 2017-03-16 17:29:31Z vboxsync $ */
2/** @file
3 * VBoxHDD - VBox HDD Container implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD
23#include <VBox/vd.h>
24#include <VBox/err.h>
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/uuid.h>
31#include <iprt/file.h>
32#include <iprt/string.h>
33#include <iprt/asm.h>
34#include <iprt/ldr.h>
35#include <iprt/dir.h>
36#include <iprt/path.h>
37#include <iprt/param.h>
38#include <iprt/memcache.h>
39#include <iprt/sg.h>
40#include <iprt/list.h>
41#include <iprt/avl.h>
42#include <iprt/semaphore.h>
43
44#include <VBox/vd-plugin.h>
45
46#include "VDBackends.h"
47
48/** Disable dynamic backends on non x86 architectures. This feature
49 * requires the SUPR3 library which is not available there.
50 */
51#if !defined(VBOX_HDD_NO_DYNAMIC_BACKENDS) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)
52# define VBOX_HDD_NO_DYNAMIC_BACKENDS
53#endif
54
55#define VBOXHDDDISK_SIGNATURE 0x6f0e2a7d
56
57/** Buffer size used for merging images. */
58#define VD_MERGE_BUFFER_SIZE (16 * _1M)
59
60/** Maximum number of segments in one I/O task. */
61#define VD_IO_TASK_SEGMENTS_MAX 64
62
63/** Threshold after not recently used blocks are removed from the list. */
64#define VD_DISCARD_REMOVE_THRESHOLD (10 * _1M) /** @todo experiment */
65
66/**
67 * VD async I/O interface storage descriptor.
68 */
69typedef struct VDIIOFALLBACKSTORAGE
70{
71 /** File handle. */
72 RTFILE File;
73 /** Completion callback. */
74 PFNVDCOMPLETED pfnCompleted;
75 /** Thread for async access. */
76 RTTHREAD ThreadAsync;
77} VDIIOFALLBACKSTORAGE, *PVDIIOFALLBACKSTORAGE;
78
79/**
80 * Structure containing everything I/O related
81 * for the image and cache descriptors.
82 */
83typedef struct VDIO
84{
85 /** I/O interface to the upper layer. */
86 PVDINTERFACEIO pInterfaceIo;
87
88 /** Per image internal I/O interface. */
89 VDINTERFACEIOINT VDIfIoInt;
90
91 /** Fallback I/O interface, only used if the caller doesn't provide it. */
92 VDINTERFACEIO VDIfIo;
93
94 /** Opaque backend data. */
95 void *pBackendData;
96 /** Disk this image is part of */
97 PVBOXHDD pDisk;
98 /** Flag whether to ignore flush requests. */
99 bool fIgnoreFlush;
100} VDIO, *PVDIO;
101
102/** Forward declaration of an I/O task */
103typedef struct VDIOTASK *PVDIOTASK;
104
105/**
106 * VBox HDD Container image descriptor.
107 */
108typedef struct VDIMAGE
109{
110 /** Link to parent image descriptor, if any. */
111 struct VDIMAGE *pPrev;
112 /** Link to child image descriptor, if any. */
113 struct VDIMAGE *pNext;
114 /** Container base filename. (UTF-8) */
115 char *pszFilename;
116 /** Data managed by the backend which keeps the actual info. */
117 void *pBackendData;
118 /** Cached sanitized image flags. */
119 unsigned uImageFlags;
120 /** Image open flags (only those handled generically in this code and which
121 * the backends will never ever see). */
122 unsigned uOpenFlags;
123
124 /** Function pointers for the various backend methods. */
125 PCVDIMAGEBACKEND Backend;
126 /** Pointer to list of VD interfaces, per-image. */
127 PVDINTERFACE pVDIfsImage;
128 /** I/O related things. */
129 VDIO VDIo;
130} VDIMAGE, *PVDIMAGE;
131
132/**
133 * uModified bit flags.
134 */
135#define VD_IMAGE_MODIFIED_FLAG RT_BIT(0)
136#define VD_IMAGE_MODIFIED_FIRST RT_BIT(1)
137#define VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE RT_BIT(2)
138
139
140/**
141 * VBox HDD Cache image descriptor.
142 */
143typedef struct VDCACHE
144{
145 /** Cache base filename. (UTF-8) */
146 char *pszFilename;
147 /** Data managed by the backend which keeps the actual info. */
148 void *pBackendData;
149 /** Cached sanitized image flags. */
150 unsigned uImageFlags;
151 /** Image open flags (only those handled generically in this code and which
152 * the backends will never ever see). */
153 unsigned uOpenFlags;
154
155 /** Function pointers for the various backend methods. */
156 PCVDCACHEBACKEND Backend;
157
158 /** Pointer to list of VD interfaces, per-cache. */
159 PVDINTERFACE pVDIfsCache;
160 /** I/O related things. */
161 VDIO VDIo;
162} VDCACHE, *PVDCACHE;
163
164/**
165 * A block waiting for a discard.
166 */
167typedef struct VDDISCARDBLOCK
168{
169 /** AVL core. */
170 AVLRU64NODECORE Core;
171 /** LRU list node. */
172 RTLISTNODE NodeLru;
173 /** Number of bytes to discard. */
174 size_t cbDiscard;
175 /** Bitmap of allocated sectors. */
176 void *pbmAllocated;
177} VDDISCARDBLOCK, *PVDDISCARDBLOCK;
178
179/**
180 * VD discard state.
181 */
182typedef struct VDDISCARDSTATE
183{
184 /** Number of bytes waiting for a discard. */
185 size_t cbDiscarding;
186 /** AVL tree with blocks waiting for a discard.
187 * The uOffset + cbDiscard range is the search key. */
188 PAVLRU64TREE pTreeBlocks;
189 /** LRU list of the least frequently discarded blocks.
190 * If there are to many blocks waiting the least frequently used
191 * will be removed and the range will be set to 0.
192 */
193 RTLISTNODE ListLru;
194} VDDISCARDSTATE, *PVDDISCARDSTATE;
195
196/**
197 * VD filter instance.
198 */
199typedef struct VDFILTER
200{
201 /** List node for the read filter chain. */
202 RTLISTNODE ListNodeChainRead;
203 /** List node for the write filter chain. */
204 RTLISTNODE ListNodeChainWrite;
205 /** Number of references to this filter. */
206 uint32_t cRefs;
207 /** Opaque VD filter backend instance data. */
208 void *pvBackendData;
209 /** Pointer to the filter backend interface. */
210 PCVDFILTERBACKEND pBackend;
211 /** Pointer to list of VD interfaces, per-filter. */
212 PVDINTERFACE pVDIfsFilter;
213 /** I/O related things. */
214 VDIO VDIo;
215} VDFILTER;
216/** Pointer to a VD filter instance. */
217typedef VDFILTER *PVDFILTER;
218
219/**
220 * VBox HDD Container main structure, private part.
221 */
222struct VBOXHDD
223{
224 /** Structure signature (VBOXHDDDISK_SIGNATURE). */
225 uint32_t u32Signature;
226
227 /** Image type. */
228 VDTYPE enmType;
229
230 /** Number of opened images. */
231 unsigned cImages;
232
233 /** Base image. */
234 PVDIMAGE pBase;
235
236 /** Last opened image in the chain.
237 * The same as pBase if only one image is used. */
238 PVDIMAGE pLast;
239
240 /** If a merge to one of the parents is running this may be non-NULL
241 * to indicate to what image the writes should be additionally relayed. */
242 PVDIMAGE pImageRelay;
243
244 /** Flags representing the modification state. */
245 unsigned uModified;
246
247 /** Cached size of this disk. */
248 uint64_t cbSize;
249 /** Cached PCHS geometry for this disk. */
250 VDGEOMETRY PCHSGeometry;
251 /** Cached LCHS geometry for this disk. */
252 VDGEOMETRY LCHSGeometry;
253
254 /** Pointer to list of VD interfaces, per-disk. */
255 PVDINTERFACE pVDIfsDisk;
256 /** Pointer to the common interface structure for error reporting. */
257 PVDINTERFACEERROR pInterfaceError;
258 /** Pointer to the optional thread synchronization callbacks. */
259 PVDINTERFACETHREADSYNC pInterfaceThreadSync;
260
261 /** Memory cache for I/O contexts */
262 RTMEMCACHE hMemCacheIoCtx;
263 /** Memory cache for I/O tasks. */
264 RTMEMCACHE hMemCacheIoTask;
265 /** An I/O context is currently using the disk structures
266 * Every I/O context must be placed on one of the lists below. */
267 volatile bool fLocked;
268 /** Head of pending I/O tasks waiting for completion - LIFO order. */
269 volatile PVDIOTASK pIoTasksPendingHead;
270 /** Head of newly queued I/O contexts - LIFO order. */
271 volatile PVDIOCTX pIoCtxHead;
272 /** Head of halted I/O contexts which are given back to generic
273 * disk framework by the backend. - LIFO order. */
274 volatile PVDIOCTX pIoCtxHaltedHead;
275
276 /** Head of blocked I/O contexts, processed only
277 * after pIoCtxLockOwner was freed - LIFO order. */
278 volatile PVDIOCTX pIoCtxBlockedHead;
279 /** I/O context which locked the disk for a growing write or flush request.
280 * Other flush or growing write requests need to wait until
281 * the current one completes. - NIL_VDIOCTX if unlocked. */
282 volatile PVDIOCTX pIoCtxLockOwner;
283 /** If the disk was locked by a growing write, flush or discard request this
284 * contains the start offset to check for interfering I/O while it is in progress. */
285 uint64_t uOffsetStartLocked;
286 /** If the disk was locked by a growing write, flush or discard request this contains
287 * the first non affected offset to check for interfering I/O while it is in progress. */
288 uint64_t uOffsetEndLocked;
289
290 /** Pointer to the L2 disk cache if any. */
291 PVDCACHE pCache;
292 /** Pointer to the discard state if any. */
293 PVDDISCARDSTATE pDiscard;
294
295 /** Read filter chain - PVDFILTER. */
296 RTLISTANCHOR ListFilterChainRead;
297 /** Write filter chain - PVDFILTER. */
298 RTLISTANCHOR ListFilterChainWrite;
299};
300
301# define VD_IS_LOCKED(a_pDisk) \
302 do \
303 { \
304 NOREF(a_pDisk); \
305 AssertMsg((a_pDisk)->fLocked, \
306 ("Lock not held\n"));\
307 } while(0)
308
309/**
310 * VBox parent read descriptor, used internally for compaction.
311 */
312typedef struct VDPARENTSTATEDESC
313{
314 /** Pointer to disk descriptor. */
315 PVBOXHDD pDisk;
316 /** Pointer to image descriptor. */
317 PVDIMAGE pImage;
318} VDPARENTSTATEDESC, *PVDPARENTSTATEDESC;
319
320/**
321 * Transfer direction.
322 */
323typedef enum VDIOCTXTXDIR
324{
325 /** Read */
326 VDIOCTXTXDIR_READ = 0,
327 /** Write */
328 VDIOCTXTXDIR_WRITE,
329 /** Flush */
330 VDIOCTXTXDIR_FLUSH,
331 /** Discard */
332 VDIOCTXTXDIR_DISCARD,
333 /** 32bit hack */
334 VDIOCTXTXDIR_32BIT_HACK = 0x7fffffff
335} VDIOCTXTXDIR, *PVDIOCTXTXDIR;
336
337/** Transfer function */
338typedef DECLCALLBACK(int) FNVDIOCTXTRANSFER (PVDIOCTX pIoCtx);
339/** Pointer to a transfer function. */
340typedef FNVDIOCTXTRANSFER *PFNVDIOCTXTRANSFER;
341
342/**
343 * I/O context
344 */
345typedef struct VDIOCTX
346{
347 /** Pointer to the next I/O context. */
348 struct VDIOCTX * volatile pIoCtxNext;
349 /** Disk this is request is for. */
350 PVBOXHDD pDisk;
351 /** Return code. */
352 int rcReq;
353 /** Various flags for the I/O context. */
354 uint32_t fFlags;
355 /** Number of data transfers currently pending. */
356 volatile uint32_t cDataTransfersPending;
357 /** How many meta data transfers are pending. */
358 volatile uint32_t cMetaTransfersPending;
359 /** Flag whether the request finished */
360 volatile bool fComplete;
361 /** Temporary allocated memory which is freed
362 * when the context completes. */
363 void *pvAllocation;
364 /** Transfer function. */
365 PFNVDIOCTXTRANSFER pfnIoCtxTransfer;
366 /** Next transfer part after the current one completed. */
367 PFNVDIOCTXTRANSFER pfnIoCtxTransferNext;
368 /** Transfer direction */
369 VDIOCTXTXDIR enmTxDir;
370 /** Request type dependent data. */
371 union
372 {
373 /** I/O request (read/write). */
374 struct
375 {
376 /** Number of bytes left until this context completes. */
377 volatile uint32_t cbTransferLeft;
378 /** Current offset */
379 volatile uint64_t uOffset;
380 /** Number of bytes to transfer */
381 volatile size_t cbTransfer;
382 /** Current image in the chain. */
383 PVDIMAGE pImageCur;
384 /** Start image to read from. pImageCur is reset to this
385 * value after it reached the first image in the chain. */
386 PVDIMAGE pImageStart;
387 /** S/G buffer */
388 RTSGBUF SgBuf;
389 /** Number of bytes to clear in the buffer before the current read. */
390 size_t cbBufClear;
391 /** Number of images to read. */
392 unsigned cImagesRead;
393 /** Override for the parent image to start reading from. */
394 PVDIMAGE pImageParentOverride;
395 /** Original offset of the transfer - required for filtering read requests. */
396 uint64_t uOffsetXferOrig;
397 /** Original size of the transfer - required for fitlering read requests. */
398 size_t cbXferOrig;
399 } Io;
400 /** Discard requests. */
401 struct
402 {
403 /** Pointer to the range descriptor array. */
404 PCRTRANGE paRanges;
405 /** Number of ranges in the array. */
406 unsigned cRanges;
407 /** Range descriptor index which is processed. */
408 unsigned idxRange;
409 /** Start offset to discard currently. */
410 uint64_t offCur;
411 /** How many bytes left to discard in the current range. */
412 size_t cbDiscardLeft;
413 /** How many bytes to discard in the current block (<= cbDiscardLeft). */
414 size_t cbThisDiscard;
415 /** Discard block handled currently. */
416 PVDDISCARDBLOCK pBlock;
417 } Discard;
418 } Req;
419 /** Parent I/O context if any. Sets the type of the context (root/child) */
420 PVDIOCTX pIoCtxParent;
421 /** Type dependent data (root/child) */
422 union
423 {
424 /** Root data */
425 struct
426 {
427 /** Completion callback */
428 PFNVDASYNCTRANSFERCOMPLETE pfnComplete;
429 /** User argument 1 passed on completion. */
430 void *pvUser1;
431 /** User argument 2 passed on completion. */
432 void *pvUser2;
433 } Root;
434 /** Child data */
435 struct
436 {
437 /** Saved start offset */
438 uint64_t uOffsetSaved;
439 /** Saved transfer size */
440 size_t cbTransferLeftSaved;
441 /** Number of bytes transferred from the parent if this context completes. */
442 size_t cbTransferParent;
443 /** Number of bytes to pre read */
444 size_t cbPreRead;
445 /** Number of bytes to post read. */
446 size_t cbPostRead;
447 /** Number of bytes to write left in the parent. */
448 size_t cbWriteParent;
449 /** Write type dependent data. */
450 union
451 {
452 /** Optimized */
453 struct
454 {
455 /** Bytes to fill to satisfy the block size. Not part of the virtual disk. */
456 size_t cbFill;
457 /** Bytes to copy instead of reading from the parent */
458 size_t cbWriteCopy;
459 /** Bytes to read from the image. */
460 size_t cbReadImage;
461 } Optimized;
462 } Write;
463 } Child;
464 } Type;
465} VDIOCTX;
466
467/** Default flags for an I/O context, i.e. unblocked and async. */
468#define VDIOCTX_FLAGS_DEFAULT (0)
469/** Flag whether the context is blocked. */
470#define VDIOCTX_FLAGS_BLOCKED RT_BIT_32(0)
471/** Flag whether the I/O context is using synchronous I/O. */
472#define VDIOCTX_FLAGS_SYNC RT_BIT_32(1)
473/** Flag whether the read should update the cache. */
474#define VDIOCTX_FLAGS_READ_UPDATE_CACHE RT_BIT_32(2)
475/** Flag whether free blocks should be zeroed.
476 * If false and no image has data for sepcified
477 * range VERR_VD_BLOCK_FREE is returned for the I/O context.
478 * Note that unallocated blocks are still zeroed
479 * if at least one image has valid data for a part
480 * of the range.
481 */
482#define VDIOCTX_FLAGS_ZERO_FREE_BLOCKS RT_BIT_32(3)
483/** Don't free the I/O context when complete because
484 * it was alloacted elsewhere (stack, ...). */
485#define VDIOCTX_FLAGS_DONT_FREE RT_BIT_32(4)
486/** Don't set the modified flag for this I/O context when writing. */
487#define VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG RT_BIT_32(5)
488/** The write filter was applied already and shouldn't be applied a second time.
489 * Used at the beginning of vdWriteHelperAsync() because it might be called
490 * multiple times.
491 */
492#define VDIOCTX_FLAGS_WRITE_FILTER_APPLIED RT_BIT_32(6)
493
494/** NIL I/O context pointer value. */
495#define NIL_VDIOCTX ((PVDIOCTX)0)
496
497/**
498 * List node for deferred I/O contexts.
499 */
500typedef struct VDIOCTXDEFERRED
501{
502 /** Node in the list of deferred requests.
503 * A request can be deferred if the image is growing
504 * and the request accesses the same range or if
505 * the backend needs to read or write metadata from the disk
506 * before it can continue. */
507 RTLISTNODE NodeDeferred;
508 /** I/O context this entry points to. */
509 PVDIOCTX pIoCtx;
510} VDIOCTXDEFERRED, *PVDIOCTXDEFERRED;
511
512/**
513 * I/O task.
514 */
515typedef struct VDIOTASK
516{
517 /** Next I/O task waiting in the list. */
518 struct VDIOTASK * volatile pNext;
519 /** Storage this task belongs to. */
520 PVDIOSTORAGE pIoStorage;
521 /** Optional completion callback. */
522 PFNVDXFERCOMPLETED pfnComplete;
523 /** Opaque user data. */
524 void *pvUser;
525 /** Completion status code for the task. */
526 int rcReq;
527 /** Flag whether this is a meta data transfer. */
528 bool fMeta;
529 /** Type dependent data. */
530 union
531 {
532 /** User data transfer. */
533 struct
534 {
535 /** Number of bytes this task transferred. */
536 uint32_t cbTransfer;
537 /** Pointer to the I/O context the task belongs. */
538 PVDIOCTX pIoCtx;
539 } User;
540 /** Meta data transfer. */
541 struct
542 {
543 /** Meta transfer this task is for. */
544 PVDMETAXFER pMetaXfer;
545 } Meta;
546 } Type;
547} VDIOTASK;
548
549/**
550 * Storage handle.
551 */
552typedef struct VDIOSTORAGE
553{
554 /** Image I/O state this storage handle belongs to. */
555 PVDIO pVDIo;
556 /** AVL tree for pending async metadata transfers. */
557 PAVLRFOFFTREE pTreeMetaXfers;
558 /** Storage handle */
559 void *pStorage;
560} VDIOSTORAGE;
561
562/**
563 * Metadata transfer.
564 *
565 * @note This entry can't be freed if either the list is not empty or
566 * the reference counter is not 0.
567 * The assumption is that the backends don't need to read huge amounts of
568 * metadata to complete a transfer so the additional memory overhead should
569 * be relatively small.
570 */
571typedef struct VDMETAXFER
572{
573 /** AVL core for fast search (the file offset is the key) */
574 AVLRFOFFNODECORE Core;
575 /** I/O storage for this transfer. */
576 PVDIOSTORAGE pIoStorage;
577 /** Flags. */
578 uint32_t fFlags;
579 /** List of I/O contexts waiting for this metadata transfer to complete. */
580 RTLISTNODE ListIoCtxWaiting;
581 /** Number of references to this entry. */
582 unsigned cRefs;
583 /** Size of the data stored with this entry. */
584 size_t cbMeta;
585 /** Shadow buffer which is used in case a write is still active and other
586 * writes update the shadow buffer. */
587 uint8_t *pbDataShw;
588 /** List of I/O contexts updating the shadow buffer while there is a write
589 * in progress. */
590 RTLISTNODE ListIoCtxShwWrites;
591 /** Data stored - variable size. */
592 uint8_t abData[1];
593} VDMETAXFER;
594
595/**
596 * The transfer direction for the metadata.
597 */
598#define VDMETAXFER_TXDIR_MASK 0x3
599#define VDMETAXFER_TXDIR_NONE 0x0
600#define VDMETAXFER_TXDIR_WRITE 0x1
601#define VDMETAXFER_TXDIR_READ 0x2
602#define VDMETAXFER_TXDIR_FLUSH 0x3
603#define VDMETAXFER_TXDIR_GET(flags) ((flags) & VDMETAXFER_TXDIR_MASK)
604#define VDMETAXFER_TXDIR_SET(flags, dir) ((flags) = (flags & ~VDMETAXFER_TXDIR_MASK) | (dir))
605
606/**
607 * Plugin structure.
608 */
609typedef struct VDPLUGIN
610{
611 /** Pointer to the next plugin structure. */
612 RTLISTNODE NodePlugin;
613 /** Handle of loaded plugin library. */
614 RTLDRMOD hPlugin;
615 /** Filename of the loaded plugin. */
616 char *pszFilename;
617} VDPLUGIN;
618/** Pointer to a plugin structure. */
619typedef VDPLUGIN *PVDPLUGIN;
620
621/** Head of loaded plugin list. */
622static RTLISTANCHOR g_ListPluginsLoaded;
623
624/** Number of image backends supported. */
625static unsigned g_cBackends = 0;
626/** Array of pointers to the image backends. */
627static PCVDIMAGEBACKEND *g_apBackends = NULL;
628/** Array of handles to the corresponding plugin. */
629static RTLDRMOD *g_ahBackendPlugins = NULL;
630/** Builtin image backends. */
631static PCVDIMAGEBACKEND aStaticBackends[] =
632{
633 &g_VmdkBackend,
634 &g_VDIBackend,
635 &g_VhdBackend,
636 &g_ParallelsBackend,
637 &g_DmgBackend,
638 &g_QedBackend,
639 &g_QCowBackend,
640 &g_VhdxBackend,
641 &g_RawBackend,
642 &g_CueBackend,
643 &g_ISCSIBackend
644};
645
646/** Number of supported cache backends. */
647static unsigned g_cCacheBackends = 0;
648/** Array of pointers to the cache backends. */
649static PCVDCACHEBACKEND *g_apCacheBackends = NULL;
650/** Array of handles to the corresponding plugin. */
651static RTLDRMOD *g_ahCacheBackendPlugins = NULL;
652/** Builtin cache backends. */
653static PCVDCACHEBACKEND aStaticCacheBackends[] =
654{
655 &g_VciCacheBackend
656};
657
658/** Number of supported filter backends. */
659static unsigned g_cFilterBackends = 0;
660/** Array of pointers to the filters backends. */
661static PCVDFILTERBACKEND *g_apFilterBackends = NULL;
662#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
663/** Array of handles to the corresponding plugin. */
664static PRTLDRMOD g_pahFilterBackendPlugins = NULL;
665#endif
666
667/** Forward declaration of the async discard helper. */
668static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx);
669static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx);
670static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk);
671static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc);
672static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq);
673
674/**
675 * internal: add several backends.
676 */
677static int vdAddBackends(RTLDRMOD hPlugin, PCVDIMAGEBACKEND *ppBackends, unsigned cBackends)
678{
679 PCVDIMAGEBACKEND *pTmp = (PCVDIMAGEBACKEND *)RTMemRealloc(g_apBackends,
680 (g_cBackends + cBackends) * sizeof(PCVDIMAGEBACKEND));
681 if (RT_UNLIKELY(!pTmp))
682 return VERR_NO_MEMORY;
683 g_apBackends = pTmp;
684
685 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahBackendPlugins,
686 (g_cBackends + cBackends) * sizeof(RTLDRMOD));
687 if (RT_UNLIKELY(!pTmpPlugins))
688 return VERR_NO_MEMORY;
689 g_ahBackendPlugins = pTmpPlugins;
690 memcpy(&g_apBackends[g_cBackends], ppBackends, cBackends * sizeof(PCVDIMAGEBACKEND));
691 for (unsigned i = g_cBackends; i < g_cBackends + cBackends; i++)
692 g_ahBackendPlugins[i] = hPlugin;
693 g_cBackends += cBackends;
694 return VINF_SUCCESS;
695}
696
697#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
698/**
699 * internal: add single backend.
700 */
701DECLINLINE(int) vdAddBackend(RTLDRMOD hPlugin, PCVDIMAGEBACKEND pBackend)
702{
703 return vdAddBackends(hPlugin, &pBackend, 1);
704}
705#endif
706
707/**
708 * internal: add several cache backends.
709 */
710static int vdAddCacheBackends(RTLDRMOD hPlugin, PCVDCACHEBACKEND *ppBackends, unsigned cBackends)
711{
712 PCVDCACHEBACKEND *pTmp = (PCVDCACHEBACKEND*)RTMemRealloc(g_apCacheBackends,
713 (g_cCacheBackends + cBackends) * sizeof(PCVDCACHEBACKEND));
714 if (RT_UNLIKELY(!pTmp))
715 return VERR_NO_MEMORY;
716 g_apCacheBackends = pTmp;
717
718 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahCacheBackendPlugins,
719 (g_cCacheBackends + cBackends) * sizeof(RTLDRMOD));
720 if (RT_UNLIKELY(!pTmpPlugins))
721 return VERR_NO_MEMORY;
722 g_ahCacheBackendPlugins = pTmpPlugins;
723 memcpy(&g_apCacheBackends[g_cCacheBackends], ppBackends, cBackends * sizeof(PCVDCACHEBACKEND));
724 for (unsigned i = g_cCacheBackends; i < g_cCacheBackends + cBackends; i++)
725 g_ahCacheBackendPlugins[i] = hPlugin;
726 g_cCacheBackends += cBackends;
727 return VINF_SUCCESS;
728}
729
730#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
731
732/**
733 * internal: add single cache backend.
734 */
735DECLINLINE(int) vdAddCacheBackend(RTLDRMOD hPlugin, PCVDCACHEBACKEND pBackend)
736{
737 return vdAddCacheBackends(hPlugin, &pBackend, 1);
738}
739
740
741/**
742 * Add several filter backends.
743 *
744 * @returns VBox status code.
745 * @param hPlugin Plugin handle to add.
746 * @param ppBackends Array of filter backends to add.
747 * @param cBackends Number of backends to add.
748 */
749static int vdAddFilterBackends(RTLDRMOD hPlugin, PCVDFILTERBACKEND *ppBackends, unsigned cBackends)
750{
751 PCVDFILTERBACKEND *pTmp = (PCVDFILTERBACKEND *)RTMemRealloc(g_apFilterBackends,
752 (g_cFilterBackends + cBackends) * sizeof(PCVDFILTERBACKEND));
753 if (RT_UNLIKELY(!pTmp))
754 return VERR_NO_MEMORY;
755 g_apFilterBackends = pTmp;
756
757 PRTLDRMOD pTmpPlugins = (PRTLDRMOD)RTMemRealloc(g_pahFilterBackendPlugins,
758 (g_cFilterBackends + cBackends) * sizeof(RTLDRMOD));
759 if (RT_UNLIKELY(!pTmpPlugins))
760 return VERR_NO_MEMORY;
761
762 g_pahFilterBackendPlugins = pTmpPlugins;
763 memcpy(&g_apFilterBackends[g_cFilterBackends], ppBackends, cBackends * sizeof(PCVDFILTERBACKEND));
764 for (unsigned i = g_cFilterBackends; i < g_cFilterBackends + cBackends; i++)
765 g_pahFilterBackendPlugins[i] = hPlugin;
766 g_cFilterBackends += cBackends;
767 return VINF_SUCCESS;
768}
769
770
771/**
772 * Add a single filter backend to the list of supported filters.
773 *
774 * @returns VBox status code.
775 * @param hPlugin Plugin handle to add.
776 * @param pBackend The backend to add.
777 */
778DECLINLINE(int) vdAddFilterBackend(RTLDRMOD hPlugin, PCVDFILTERBACKEND pBackend)
779{
780 return vdAddFilterBackends(hPlugin, &pBackend, 1);
781}
782
783#endif /* VBOX_HDD_NO_DYNAMIC_BACKENDS*/
784
785/**
786 * internal: issue error message.
787 */
788static int vdError(PVBOXHDD pDisk, int rc, RT_SRC_POS_DECL,
789 const char *pszFormat, ...)
790{
791 va_list va;
792 va_start(va, pszFormat);
793 if (pDisk->pInterfaceError)
794 pDisk->pInterfaceError->pfnError(pDisk->pInterfaceError->Core.pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
795 va_end(va);
796 return rc;
797}
798
799/**
800 * internal: thread synchronization, start read.
801 */
802DECLINLINE(int) vdThreadStartRead(PVBOXHDD pDisk)
803{
804 int rc = VINF_SUCCESS;
805 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
806 rc = pDisk->pInterfaceThreadSync->pfnStartRead(pDisk->pInterfaceThreadSync->Core.pvUser);
807 return rc;
808}
809
810/**
811 * internal: thread synchronization, finish read.
812 */
813DECLINLINE(int) vdThreadFinishRead(PVBOXHDD pDisk)
814{
815 int rc = VINF_SUCCESS;
816 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
817 rc = pDisk->pInterfaceThreadSync->pfnFinishRead(pDisk->pInterfaceThreadSync->Core.pvUser);
818 return rc;
819}
820
821/**
822 * internal: thread synchronization, start write.
823 */
824DECLINLINE(int) vdThreadStartWrite(PVBOXHDD pDisk)
825{
826 int rc = VINF_SUCCESS;
827 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
828 rc = pDisk->pInterfaceThreadSync->pfnStartWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
829 return rc;
830}
831
832/**
833 * internal: thread synchronization, finish write.
834 */
835DECLINLINE(int) vdThreadFinishWrite(PVBOXHDD pDisk)
836{
837 int rc = VINF_SUCCESS;
838 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
839 rc = pDisk->pInterfaceThreadSync->pfnFinishWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
840 return rc;
841}
842
843/**
844 * internal: find image format backend.
845 */
846static int vdFindBackend(const char *pszBackend, PCVDIMAGEBACKEND *ppBackend)
847{
848 int rc = VINF_SUCCESS;
849 PCVDIMAGEBACKEND pBackend = NULL;
850
851 if (!g_apBackends)
852 VDInit();
853
854 for (unsigned i = 0; i < g_cBackends; i++)
855 {
856 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
857 {
858 pBackend = g_apBackends[i];
859 break;
860 }
861 }
862 *ppBackend = pBackend;
863 return rc;
864}
865
866/**
867 * internal: find cache format backend.
868 */
869static int vdFindCacheBackend(const char *pszBackend, PCVDCACHEBACKEND *ppBackend)
870{
871 int rc = VINF_SUCCESS;
872 PCVDCACHEBACKEND pBackend = NULL;
873
874 if (!g_apCacheBackends)
875 VDInit();
876
877 for (unsigned i = 0; i < g_cCacheBackends; i++)
878 {
879 if (!RTStrICmp(pszBackend, g_apCacheBackends[i]->pszBackendName))
880 {
881 pBackend = g_apCacheBackends[i];
882 break;
883 }
884 }
885 *ppBackend = pBackend;
886 return rc;
887}
888
889/**
890 * internal: find filter backend.
891 */
892static int vdFindFilterBackend(const char *pszFilter, PCVDFILTERBACKEND *ppBackend)
893{
894 int rc = VINF_SUCCESS;
895 PCVDFILTERBACKEND pBackend = NULL;
896
897 for (unsigned i = 0; i < g_cFilterBackends; i++)
898 {
899 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
900 {
901 pBackend = g_apFilterBackends[i];
902 break;
903 }
904 }
905 *ppBackend = pBackend;
906 return rc;
907}
908
909
910/**
911 * internal: add image structure to the end of images list.
912 */
913static void vdAddImageToList(PVBOXHDD pDisk, PVDIMAGE pImage)
914{
915 pImage->pPrev = NULL;
916 pImage->pNext = NULL;
917
918 if (pDisk->pBase)
919 {
920 Assert(pDisk->cImages > 0);
921 pImage->pPrev = pDisk->pLast;
922 pDisk->pLast->pNext = pImage;
923 pDisk->pLast = pImage;
924 }
925 else
926 {
927 Assert(pDisk->cImages == 0);
928 pDisk->pBase = pImage;
929 pDisk->pLast = pImage;
930 }
931
932 pDisk->cImages++;
933}
934
935/**
936 * internal: remove image structure from the images list.
937 */
938static void vdRemoveImageFromList(PVBOXHDD pDisk, PVDIMAGE pImage)
939{
940 Assert(pDisk->cImages > 0);
941
942 if (pImage->pPrev)
943 pImage->pPrev->pNext = pImage->pNext;
944 else
945 pDisk->pBase = pImage->pNext;
946
947 if (pImage->pNext)
948 pImage->pNext->pPrev = pImage->pPrev;
949 else
950 pDisk->pLast = pImage->pPrev;
951
952 pImage->pPrev = NULL;
953 pImage->pNext = NULL;
954
955 pDisk->cImages--;
956}
957
958/**
959 * Release a referene to the filter decrementing the counter and destroying the filter
960 * when the counter reaches zero.
961 *
962 * @returns The new reference count.
963 * @param pFilter The filter to release.
964 */
965static uint32_t vdFilterRelease(PVDFILTER pFilter)
966{
967 uint32_t cRefs = ASMAtomicDecU32(&pFilter->cRefs);
968 if (!cRefs)
969 {
970 pFilter->pBackend->pfnDestroy(pFilter->pvBackendData);
971 RTMemFree(pFilter);
972 }
973
974 return cRefs;
975}
976
977/**
978 * Increments the reference counter of the given filter.
979 *
980 * @return The new reference count.
981 * @param pFilter The filter.
982 */
983static uint32_t vdFilterRetain(PVDFILTER pFilter)
984{
985 return ASMAtomicIncU32(&pFilter->cRefs);
986}
987
988/**
989 * internal: find image by index into the images list.
990 */
991static PVDIMAGE vdGetImageByNumber(PVBOXHDD pDisk, unsigned nImage)
992{
993 PVDIMAGE pImage = pDisk->pBase;
994 if (nImage == VD_LAST_IMAGE)
995 return pDisk->pLast;
996 while (pImage && nImage)
997 {
998 pImage = pImage->pNext;
999 nImage--;
1000 }
1001 return pImage;
1002}
1003
1004/**
1005 * Applies the filter chain to the given write request.
1006 *
1007 * @returns VBox status code.
1008 * @param pDisk The HDD container.
1009 * @param uOffset The start offset of the write.
1010 * @param cbWrite Number of bytes to write.
1011 * @param pIoCtx The I/O context associated with the request.
1012 */
1013static int vdFilterChainApplyWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
1014 PVDIOCTX pIoCtx)
1015{
1016 int rc = VINF_SUCCESS;
1017
1018 VD_IS_LOCKED(pDisk);
1019
1020 PVDFILTER pFilter;
1021 RTListForEach(&pDisk->ListFilterChainWrite, pFilter, VDFILTER, ListNodeChainWrite)
1022 {
1023 rc = pFilter->pBackend->pfnFilterWrite(pFilter->pvBackendData, uOffset, cbWrite, pIoCtx);
1024 if (RT_FAILURE(rc))
1025 break;
1026 /* Reset S/G buffer for the next filter. */
1027 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1028 }
1029
1030 return rc;
1031}
1032
1033/**
1034 * Applies the filter chain to the given read request.
1035 *
1036 * @returns VBox status code.
1037 * @param pDisk The HDD container.
1038 * @param uOffset The start offset of the read.
1039 * @param cbRead Number of bytes read.
1040 * @param pIoCtx The I/O context associated with the request.
1041 */
1042static int vdFilterChainApplyRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
1043 PVDIOCTX pIoCtx)
1044{
1045 int rc = VINF_SUCCESS;
1046
1047 VD_IS_LOCKED(pDisk);
1048
1049 /* Reset buffer before starting. */
1050 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1051
1052 PVDFILTER pFilter;
1053 RTListForEach(&pDisk->ListFilterChainRead, pFilter, VDFILTER, ListNodeChainRead)
1054 {
1055 rc = pFilter->pBackend->pfnFilterRead(pFilter->pvBackendData, uOffset, cbRead, pIoCtx);
1056 if (RT_FAILURE(rc))
1057 break;
1058 /* Reset S/G buffer for the next filter. */
1059 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1060 }
1061
1062 return rc;
1063}
1064
1065DECLINLINE(void) vdIoCtxRootComplete(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1066{
1067 if ( RT_SUCCESS(pIoCtx->rcReq)
1068 && pIoCtx->enmTxDir == VDIOCTXTXDIR_READ)
1069 pIoCtx->rcReq = vdFilterChainApplyRead(pDisk, pIoCtx->Req.Io.uOffsetXferOrig,
1070 pIoCtx->Req.Io.cbXferOrig, pIoCtx);
1071
1072 pIoCtx->Type.Root.pfnComplete(pIoCtx->Type.Root.pvUser1,
1073 pIoCtx->Type.Root.pvUser2,
1074 pIoCtx->rcReq);
1075}
1076
1077/**
1078 * Initialize the structure members of a given I/O context.
1079 */
1080DECLINLINE(void) vdIoCtxInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1081 uint64_t uOffset, size_t cbTransfer, PVDIMAGE pImageStart,
1082 PCRTSGBUF pcSgBuf, void *pvAllocation,
1083 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1084{
1085 pIoCtx->pDisk = pDisk;
1086 pIoCtx->enmTxDir = enmTxDir;
1087 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTransfer; Assert((uint32_t)cbTransfer == cbTransfer);
1088 pIoCtx->Req.Io.uOffset = uOffset;
1089 pIoCtx->Req.Io.cbTransfer = cbTransfer;
1090 pIoCtx->Req.Io.pImageStart = pImageStart;
1091 pIoCtx->Req.Io.pImageCur = pImageStart;
1092 pIoCtx->Req.Io.cbBufClear = 0;
1093 pIoCtx->Req.Io.pImageParentOverride = NULL;
1094 pIoCtx->Req.Io.uOffsetXferOrig = uOffset;
1095 pIoCtx->Req.Io.cbXferOrig = cbTransfer;
1096 pIoCtx->cDataTransfersPending = 0;
1097 pIoCtx->cMetaTransfersPending = 0;
1098 pIoCtx->fComplete = false;
1099 pIoCtx->fFlags = fFlags;
1100 pIoCtx->pvAllocation = pvAllocation;
1101 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1102 pIoCtx->pfnIoCtxTransferNext = NULL;
1103 pIoCtx->rcReq = VINF_SUCCESS;
1104 pIoCtx->pIoCtxParent = NULL;
1105
1106 /* There is no S/G list for a flush request. */
1107 if ( enmTxDir != VDIOCTXTXDIR_FLUSH
1108 && enmTxDir != VDIOCTXTXDIR_DISCARD)
1109 RTSgBufClone(&pIoCtx->Req.Io.SgBuf, pcSgBuf);
1110 else
1111 memset(&pIoCtx->Req.Io.SgBuf, 0, sizeof(RTSGBUF));
1112}
1113
1114/**
1115 * Internal: Tries to read the desired range from the given cache.
1116 *
1117 * @returns VBox status code.
1118 * @retval VERR_VD_BLOCK_FREE if the block is not in the cache.
1119 * pcbRead will be set to the number of bytes not in the cache.
1120 * Everything thereafter might be in the cache.
1121 * @param pCache The cache to read from.
1122 * @param uOffset Offset of the virtual disk to read.
1123 * @param cbRead How much to read.
1124 * @param pIoCtx The I/O context to read into.
1125 * @param pcbRead Where to store the number of bytes actually read.
1126 * On success this indicates the number of bytes read from the cache.
1127 * If VERR_VD_BLOCK_FREE is returned this gives the number of bytes
1128 * which are not in the cache.
1129 * In both cases everything beyond this value
1130 * might or might not be in the cache.
1131 */
1132static int vdCacheReadHelper(PVDCACHE pCache, uint64_t uOffset,
1133 size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbRead)
1134{
1135 int rc = VINF_SUCCESS;
1136
1137 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbRead=%zu pcbRead=%#p\n",
1138 pCache, uOffset, pIoCtx, cbRead, pcbRead));
1139
1140 AssertPtr(pCache);
1141 AssertPtr(pcbRead);
1142
1143 rc = pCache->Backend->pfnRead(pCache->pBackendData, uOffset, cbRead,
1144 pIoCtx, pcbRead);
1145
1146 LogFlowFunc(("returns rc=%Rrc pcbRead=%zu\n", rc, *pcbRead));
1147 return rc;
1148}
1149
1150/**
1151 * Internal: Writes data for the given block into the cache.
1152 *
1153 * @returns VBox status code.
1154 * @param pCache The cache to write to.
1155 * @param uOffset Offset of the virtual disk to write to the cache.
1156 * @param cbWrite How much to write.
1157 * @param pIoCtx The I/O context to write from.
1158 * @param pcbWritten How much data could be written, optional.
1159 */
1160static int vdCacheWriteHelper(PVDCACHE pCache, uint64_t uOffset, size_t cbWrite,
1161 PVDIOCTX pIoCtx, size_t *pcbWritten)
1162{
1163 int rc = VINF_SUCCESS;
1164
1165 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbWrite=%zu pcbWritten=%#p\n",
1166 pCache, uOffset, pIoCtx, cbWrite, pcbWritten));
1167
1168 AssertPtr(pCache);
1169 AssertPtr(pIoCtx);
1170 Assert(cbWrite > 0);
1171
1172 if (pcbWritten)
1173 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1174 pIoCtx, pcbWritten);
1175 else
1176 {
1177 size_t cbWritten = 0;
1178
1179 do
1180 {
1181 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1182 pIoCtx, &cbWritten);
1183 uOffset += cbWritten;
1184 cbWrite -= cbWritten;
1185 } while ( cbWrite
1186 && ( RT_SUCCESS(rc)
1187 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
1188 }
1189
1190 LogFlowFunc(("returns rc=%Rrc pcbWritten=%zu\n",
1191 rc, pcbWritten ? *pcbWritten : cbWrite));
1192 return rc;
1193}
1194
1195/**
1196 * Creates a new empty discard state.
1197 *
1198 * @returns Pointer to the new discard state or NULL if out of memory.
1199 */
1200static PVDDISCARDSTATE vdDiscardStateCreate(void)
1201{
1202 PVDDISCARDSTATE pDiscard = (PVDDISCARDSTATE)RTMemAllocZ(sizeof(VDDISCARDSTATE));
1203
1204 if (pDiscard)
1205 {
1206 RTListInit(&pDiscard->ListLru);
1207 pDiscard->pTreeBlocks = (PAVLRU64TREE)RTMemAllocZ(sizeof(AVLRU64TREE));
1208 if (!pDiscard->pTreeBlocks)
1209 {
1210 RTMemFree(pDiscard);
1211 pDiscard = NULL;
1212 }
1213 }
1214
1215 return pDiscard;
1216}
1217
1218/**
1219 * Removes the least recently used blocks from the waiting list until
1220 * the new value is reached.
1221 *
1222 * @returns VBox status code.
1223 * @param pDisk VD disk container.
1224 * @param pDiscard The discard state.
1225 * @param cbDiscardingNew How many bytes should be waiting on success.
1226 * The number of bytes waiting can be less.
1227 */
1228static int vdDiscardRemoveBlocks(PVBOXHDD pDisk, PVDDISCARDSTATE pDiscard, size_t cbDiscardingNew)
1229{
1230 int rc = VINF_SUCCESS;
1231
1232 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
1233 pDisk, pDiscard, cbDiscardingNew));
1234
1235 while (pDiscard->cbDiscarding > cbDiscardingNew)
1236 {
1237 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
1238
1239 Assert(!RTListIsEmpty(&pDiscard->ListLru));
1240
1241 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
1242 uint64_t offStart = pBlock->Core.Key;
1243 uint32_t idxStart = 0;
1244 size_t cbLeft = pBlock->cbDiscard;
1245 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
1246 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
1247
1248 while (cbLeft > 0)
1249 {
1250 int32_t idxEnd;
1251 size_t cbThis = cbLeft;
1252
1253 if (fAllocated)
1254 {
1255 /* Check for the first unallocated bit. */
1256 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
1257 if (idxEnd != -1)
1258 {
1259 cbThis = (idxEnd - idxStart) * 512;
1260 fAllocated = false;
1261 }
1262 }
1263 else
1264 {
1265 /* Mark as unused and check for the first set bit. */
1266 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
1267 if (idxEnd != -1)
1268 cbThis = (idxEnd - idxStart) * 512;
1269
1270
1271 VDIOCTX IoCtx;
1272 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_DISCARD, 0, 0, NULL,
1273 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
1274 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData,
1275 &IoCtx, offStart, cbThis, NULL,
1276 NULL, &cbThis, NULL,
1277 VD_DISCARD_MARK_UNUSED);
1278 if (RT_FAILURE(rc))
1279 break;
1280
1281 fAllocated = true;
1282 }
1283
1284 idxStart = idxEnd;
1285 offStart += cbThis;
1286 cbLeft -= cbThis;
1287 }
1288
1289 if (RT_FAILURE(rc))
1290 break;
1291
1292 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
1293 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
1294 RTListNodeRemove(&pBlock->NodeLru);
1295
1296 pDiscard->cbDiscarding -= pBlock->cbDiscard;
1297 RTMemFree(pBlock->pbmAllocated);
1298 RTMemFree(pBlock);
1299 }
1300
1301 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
1302
1303 LogFlowFunc(("returns rc=%Rrc\n", rc));
1304 return rc;
1305}
1306
1307/**
1308 * Destroys the current discard state, writing any waiting blocks to the image.
1309 *
1310 * @returns VBox status code.
1311 * @param pDisk VD disk container.
1312 */
1313static int vdDiscardStateDestroy(PVBOXHDD pDisk)
1314{
1315 int rc = VINF_SUCCESS;
1316
1317 if (pDisk->pDiscard)
1318 {
1319 rc = vdDiscardRemoveBlocks(pDisk, pDisk->pDiscard, 0 /* Remove all blocks. */);
1320 AssertRC(rc);
1321 RTMemFree(pDisk->pDiscard->pTreeBlocks);
1322 RTMemFree(pDisk->pDiscard);
1323 pDisk->pDiscard = NULL;
1324 }
1325
1326 return rc;
1327}
1328
1329/**
1330 * Marks the given range as allocated in the image.
1331 * Required if there are discards in progress and a write to a block which can get discarded
1332 * is written to.
1333 *
1334 * @returns VBox status code.
1335 * @param pDisk VD container data.
1336 * @param uOffset First byte to mark as allocated.
1337 * @param cbRange Number of bytes to mark as allocated.
1338 */
1339static int vdDiscardSetRangeAllocated(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRange)
1340{
1341 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
1342 int rc = VINF_SUCCESS;
1343
1344 if (pDiscard)
1345 {
1346 do
1347 {
1348 size_t cbThisRange = cbRange;
1349 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64RangeGet(pDiscard->pTreeBlocks, uOffset);
1350
1351 if (pBlock)
1352 {
1353 int32_t idxStart, idxEnd;
1354
1355 Assert(!(cbThisRange % 512));
1356 Assert(!((uOffset - pBlock->Core.Key) % 512));
1357
1358 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.KeyLast - uOffset + 1);
1359
1360 idxStart = (uOffset - pBlock->Core.Key) / 512;
1361 idxEnd = idxStart + (int32_t)(cbThisRange / 512);
1362 ASMBitSetRange(pBlock->pbmAllocated, idxStart, idxEnd);
1363 }
1364 else
1365 {
1366 pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, uOffset, true);
1367 if (pBlock)
1368 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.Key - uOffset);
1369 }
1370
1371 Assert(cbRange >= cbThisRange);
1372
1373 uOffset += cbThisRange;
1374 cbRange -= cbThisRange;
1375 } while (cbRange != 0);
1376 }
1377
1378 return rc;
1379}
1380
1381DECLINLINE(PVDIOCTX) vdIoCtxAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1382 uint64_t uOffset, size_t cbTransfer,
1383 PVDIMAGE pImageStart,PCRTSGBUF pcSgBuf,
1384 void *pvAllocation, PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1385 uint32_t fFlags)
1386{
1387 PVDIOCTX pIoCtx = NULL;
1388
1389 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1390 if (RT_LIKELY(pIoCtx))
1391 {
1392 vdIoCtxInit(pIoCtx, pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1393 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1394 }
1395
1396 return pIoCtx;
1397}
1398
1399DECLINLINE(PVDIOCTX) vdIoCtxRootAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1400 uint64_t uOffset, size_t cbTransfer,
1401 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1402 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1403 void *pvUser1, void *pvUser2,
1404 void *pvAllocation,
1405 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1406 uint32_t fFlags)
1407{
1408 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1409 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1410
1411 if (RT_LIKELY(pIoCtx))
1412 {
1413 pIoCtx->pIoCtxParent = NULL;
1414 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1415 pIoCtx->Type.Root.pvUser1 = pvUser1;
1416 pIoCtx->Type.Root.pvUser2 = pvUser2;
1417 }
1418
1419 LogFlow(("Allocated root I/O context %#p\n", pIoCtx));
1420 return pIoCtx;
1421}
1422
1423DECLINLINE(void) vdIoCtxDiscardInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, PCRTRANGE paRanges,
1424 unsigned cRanges, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1425 void *pvUser1, void *pvUser2, void *pvAllocation,
1426 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1427{
1428 pIoCtx->pIoCtxNext = NULL;
1429 pIoCtx->pDisk = pDisk;
1430 pIoCtx->enmTxDir = VDIOCTXTXDIR_DISCARD;
1431 pIoCtx->cDataTransfersPending = 0;
1432 pIoCtx->cMetaTransfersPending = 0;
1433 pIoCtx->fComplete = false;
1434 pIoCtx->fFlags = fFlags;
1435 pIoCtx->pvAllocation = pvAllocation;
1436 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1437 pIoCtx->pfnIoCtxTransferNext = NULL;
1438 pIoCtx->rcReq = VINF_SUCCESS;
1439 pIoCtx->Req.Discard.paRanges = paRanges;
1440 pIoCtx->Req.Discard.cRanges = cRanges;
1441 pIoCtx->Req.Discard.idxRange = 0;
1442 pIoCtx->Req.Discard.cbDiscardLeft = 0;
1443 pIoCtx->Req.Discard.offCur = 0;
1444 pIoCtx->Req.Discard.cbThisDiscard = 0;
1445
1446 pIoCtx->pIoCtxParent = NULL;
1447 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1448 pIoCtx->Type.Root.pvUser1 = pvUser1;
1449 pIoCtx->Type.Root.pvUser2 = pvUser2;
1450}
1451
1452DECLINLINE(PVDIOCTX) vdIoCtxDiscardAlloc(PVBOXHDD pDisk, PCRTRANGE paRanges,
1453 unsigned cRanges,
1454 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1455 void *pvUser1, void *pvUser2,
1456 void *pvAllocation,
1457 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1458 uint32_t fFlags)
1459{
1460 PVDIOCTX pIoCtx = NULL;
1461
1462 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1463 if (RT_LIKELY(pIoCtx))
1464 {
1465 vdIoCtxDiscardInit(pIoCtx, pDisk, paRanges, cRanges, pfnComplete, pvUser1,
1466 pvUser2, pvAllocation, pfnIoCtxTransfer, fFlags);
1467 }
1468
1469 LogFlow(("Allocated discard I/O context %#p\n", pIoCtx));
1470 return pIoCtx;
1471}
1472
1473DECLINLINE(PVDIOCTX) vdIoCtxChildAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1474 uint64_t uOffset, size_t cbTransfer,
1475 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1476 PVDIOCTX pIoCtxParent, size_t cbTransferParent,
1477 size_t cbWriteParent, void *pvAllocation,
1478 PFNVDIOCTXTRANSFER pfnIoCtxTransfer)
1479{
1480 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1481 pcSgBuf, pvAllocation, pfnIoCtxTransfer, pIoCtxParent->fFlags & ~VDIOCTX_FLAGS_DONT_FREE);
1482
1483 AssertPtr(pIoCtxParent);
1484 Assert(!pIoCtxParent->pIoCtxParent);
1485
1486 if (RT_LIKELY(pIoCtx))
1487 {
1488 pIoCtx->pIoCtxParent = pIoCtxParent;
1489 pIoCtx->Type.Child.uOffsetSaved = uOffset;
1490 pIoCtx->Type.Child.cbTransferLeftSaved = cbTransfer;
1491 pIoCtx->Type.Child.cbTransferParent = cbTransferParent;
1492 pIoCtx->Type.Child.cbWriteParent = cbWriteParent;
1493 }
1494
1495 LogFlow(("Allocated child I/O context %#p\n", pIoCtx));
1496 return pIoCtx;
1497}
1498
1499DECLINLINE(PVDIOTASK) vdIoTaskUserAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDIOCTX pIoCtx, uint32_t cbTransfer)
1500{
1501 PVDIOTASK pIoTask = NULL;
1502
1503 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1504 if (pIoTask)
1505 {
1506 pIoTask->pIoStorage = pIoStorage;
1507 pIoTask->pfnComplete = pfnComplete;
1508 pIoTask->pvUser = pvUser;
1509 pIoTask->fMeta = false;
1510 pIoTask->Type.User.cbTransfer = cbTransfer;
1511 pIoTask->Type.User.pIoCtx = pIoCtx;
1512 }
1513
1514 return pIoTask;
1515}
1516
1517DECLINLINE(PVDIOTASK) vdIoTaskMetaAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDMETAXFER pMetaXfer)
1518{
1519 PVDIOTASK pIoTask = NULL;
1520
1521 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1522 if (pIoTask)
1523 {
1524 pIoTask->pIoStorage = pIoStorage;
1525 pIoTask->pfnComplete = pfnComplete;
1526 pIoTask->pvUser = pvUser;
1527 pIoTask->fMeta = true;
1528 pIoTask->Type.Meta.pMetaXfer = pMetaXfer;
1529 }
1530
1531 return pIoTask;
1532}
1533
1534DECLINLINE(void) vdIoCtxFree(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1535{
1536 Log(("Freeing I/O context %#p\n", pIoCtx));
1537
1538 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_FREE))
1539 {
1540 if (pIoCtx->pvAllocation)
1541 RTMemFree(pIoCtx->pvAllocation);
1542#ifdef DEBUG
1543 memset(&pIoCtx->pDisk, 0xff, sizeof(void *));
1544#endif
1545 RTMemCacheFree(pDisk->hMemCacheIoCtx, pIoCtx);
1546 }
1547}
1548
1549DECLINLINE(void) vdIoTaskFree(PVBOXHDD pDisk, PVDIOTASK pIoTask)
1550{
1551#ifdef DEBUG
1552 memset(pIoTask, 0xff, sizeof(VDIOTASK));
1553#endif
1554 RTMemCacheFree(pDisk->hMemCacheIoTask, pIoTask);
1555}
1556
1557DECLINLINE(void) vdIoCtxChildReset(PVDIOCTX pIoCtx)
1558{
1559 AssertPtr(pIoCtx->pIoCtxParent);
1560
1561 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1562 pIoCtx->Req.Io.uOffset = pIoCtx->Type.Child.uOffsetSaved;
1563 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved;
1564 Assert((uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved == pIoCtx->Type.Child.cbTransferLeftSaved);
1565}
1566
1567DECLINLINE(PVDMETAXFER) vdMetaXferAlloc(PVDIOSTORAGE pIoStorage, uint64_t uOffset, size_t cb)
1568{
1569 PVDMETAXFER pMetaXfer = (PVDMETAXFER)RTMemAlloc(RT_OFFSETOF(VDMETAXFER, abData[cb]));
1570
1571 if (RT_LIKELY(pMetaXfer))
1572 {
1573 pMetaXfer->Core.Key = uOffset;
1574 pMetaXfer->Core.KeyLast = uOffset + cb - 1;
1575 pMetaXfer->fFlags = VDMETAXFER_TXDIR_NONE;
1576 pMetaXfer->cbMeta = cb;
1577 pMetaXfer->pIoStorage = pIoStorage;
1578 pMetaXfer->cRefs = 0;
1579 pMetaXfer->pbDataShw = NULL;
1580 RTListInit(&pMetaXfer->ListIoCtxWaiting);
1581 RTListInit(&pMetaXfer->ListIoCtxShwWrites);
1582 }
1583 return pMetaXfer;
1584}
1585
1586DECLINLINE(void) vdIoCtxAddToWaitingList(volatile PVDIOCTX *ppList, PVDIOCTX pIoCtx)
1587{
1588 /* Put it on the waiting list. */
1589 PVDIOCTX pNext = ASMAtomicUoReadPtrT(ppList, PVDIOCTX);
1590 PVDIOCTX pHeadOld;
1591 pIoCtx->pIoCtxNext = pNext;
1592 while (!ASMAtomicCmpXchgExPtr(ppList, pIoCtx, pNext, &pHeadOld))
1593 {
1594 pNext = pHeadOld;
1595 Assert(pNext != pIoCtx);
1596 pIoCtx->pIoCtxNext = pNext;
1597 ASMNopPause();
1598 }
1599}
1600
1601DECLINLINE(void) vdIoCtxDefer(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1602{
1603 LogFlowFunc(("Deferring I/O context pIoCtx=%#p\n", pIoCtx));
1604
1605 Assert(!pIoCtx->pIoCtxParent && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED));
1606 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
1607 vdIoCtxAddToWaitingList(&pDisk->pIoCtxBlockedHead, pIoCtx);
1608}
1609
1610static size_t vdIoCtxCopy(PVDIOCTX pIoCtxDst, PVDIOCTX pIoCtxSrc, size_t cbData)
1611{
1612 return RTSgBufCopy(&pIoCtxDst->Req.Io.SgBuf, &pIoCtxSrc->Req.Io.SgBuf, cbData);
1613}
1614
1615#if 0 /* unused */
1616static int vdIoCtxCmp(PVDIOCTX pIoCtx1, PVDIOCTX pIoCtx2, size_t cbData)
1617{
1618 return RTSgBufCmp(&pIoCtx1->Req.Io.SgBuf, &pIoCtx2->Req.Io.SgBuf, cbData);
1619}
1620#endif
1621
1622static size_t vdIoCtxCopyTo(PVDIOCTX pIoCtx, const uint8_t *pbData, size_t cbData)
1623{
1624 return RTSgBufCopyFromBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1625}
1626
1627static size_t vdIoCtxCopyFrom(PVDIOCTX pIoCtx, uint8_t *pbData, size_t cbData)
1628{
1629 return RTSgBufCopyToBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1630}
1631
1632static size_t vdIoCtxSet(PVDIOCTX pIoCtx, uint8_t ch, size_t cbData)
1633{
1634 return RTSgBufSet(&pIoCtx->Req.Io.SgBuf, ch, cbData);
1635}
1636
1637/**
1638 * Returns whether the given I/O context has completed.
1639 *
1640 * @returns Flag whether the I/O context is complete.
1641 * @param pIoCtx The I/O context to check.
1642 */
1643DECLINLINE(bool) vdIoCtxIsComplete(PVDIOCTX pIoCtx)
1644{
1645 if ( !pIoCtx->cMetaTransfersPending
1646 && !pIoCtx->cDataTransfersPending
1647 && !pIoCtx->pfnIoCtxTransfer)
1648 return true;
1649
1650 /*
1651 * We complete the I/O context in case of an error
1652 * if there is no I/O task pending.
1653 */
1654 if ( RT_FAILURE(pIoCtx->rcReq)
1655 && !pIoCtx->cMetaTransfersPending
1656 && !pIoCtx->cDataTransfersPending)
1657 return true;
1658
1659 return false;
1660}
1661
1662/**
1663 * Returns whether the given I/O context is blocked due to a metadata transfer
1664 * or because the backend blocked it.
1665 *
1666 * @returns Flag whether the I/O context is blocked.
1667 * @param pIoCtx The I/O context to check.
1668 */
1669DECLINLINE(bool) vdIoCtxIsBlocked(PVDIOCTX pIoCtx)
1670{
1671 /* Don't change anything if there is a metadata transfer pending or we are blocked. */
1672 if ( pIoCtx->cMetaTransfersPending
1673 || (pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1674 return true;
1675
1676 return false;
1677}
1678
1679/**
1680 * Process the I/O context, core method which assumes that the I/O context
1681 * acquired the lock.
1682 *
1683 * @returns VBox status code.
1684 * @param pIoCtx I/O context to process.
1685 */
1686static int vdIoCtxProcessLocked(PVDIOCTX pIoCtx)
1687{
1688 int rc = VINF_SUCCESS;
1689
1690 VD_IS_LOCKED(pIoCtx->pDisk);
1691
1692 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
1693
1694 if (!vdIoCtxIsComplete(pIoCtx))
1695 {
1696 if (!vdIoCtxIsBlocked(pIoCtx))
1697 {
1698 if (pIoCtx->pfnIoCtxTransfer)
1699 {
1700 /* Call the transfer function advancing to the next while there is no error. */
1701 while ( pIoCtx->pfnIoCtxTransfer
1702 && !pIoCtx->cMetaTransfersPending
1703 && RT_SUCCESS(rc))
1704 {
1705 LogFlowFunc(("calling transfer function %#p\n", pIoCtx->pfnIoCtxTransfer));
1706 rc = pIoCtx->pfnIoCtxTransfer(pIoCtx);
1707
1708 /* Advance to the next part of the transfer if the current one succeeded. */
1709 if (RT_SUCCESS(rc))
1710 {
1711 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
1712 pIoCtx->pfnIoCtxTransferNext = NULL;
1713 }
1714 }
1715 }
1716
1717 if ( RT_SUCCESS(rc)
1718 && !pIoCtx->cMetaTransfersPending
1719 && !pIoCtx->cDataTransfersPending
1720 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1721 rc = VINF_VD_ASYNC_IO_FINISHED;
1722 else if ( RT_SUCCESS(rc)
1723 || rc == VERR_VD_NOT_ENOUGH_METADATA
1724 || rc == VERR_VD_IOCTX_HALT)
1725 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1726 else if ( RT_FAILURE(rc)
1727 && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
1728 {
1729 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rc, VINF_SUCCESS);
1730
1731 /*
1732 * The I/O context completed if we have an error and there is no data
1733 * or meta data transfer pending.
1734 */
1735 if ( !pIoCtx->cMetaTransfersPending
1736 && !pIoCtx->cDataTransfersPending)
1737 rc = VINF_VD_ASYNC_IO_FINISHED;
1738 else
1739 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1740 }
1741 }
1742 else
1743 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1744 }
1745 else
1746 rc = VINF_VD_ASYNC_IO_FINISHED;
1747
1748 LogFlowFunc(("pIoCtx=%#p rc=%Rrc cDataTransfersPending=%u cMetaTransfersPending=%u fComplete=%RTbool\n",
1749 pIoCtx, rc, pIoCtx->cDataTransfersPending, pIoCtx->cMetaTransfersPending,
1750 pIoCtx->fComplete));
1751
1752 return rc;
1753}
1754
1755/**
1756 * Processes the list of waiting I/O contexts.
1757 *
1758 * @returns VBox status code, only valid if pIoCtxRc is not NULL, treat as void
1759 * function otherwise.
1760 * @param pDisk The disk structure.
1761 * @param pIoCtxRc An I/O context handle which waits on the list. When processed
1762 * The status code is returned. NULL if there is no I/O context
1763 * to return the status code for.
1764 */
1765static int vdDiskProcessWaitingIoCtx(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
1766{
1767 int rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1768
1769 LogFlowFunc(("pDisk=%#p pIoCtxRc=%#p\n", pDisk, pIoCtxRc));
1770
1771 VD_IS_LOCKED(pDisk);
1772
1773 /* Get the waiting list and process it in FIFO order. */
1774 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHead, NULL, PVDIOCTX);
1775
1776 /* Reverse it. */
1777 PVDIOCTX pCur = pIoCtxHead;
1778 pIoCtxHead = NULL;
1779 while (pCur)
1780 {
1781 PVDIOCTX pInsert = pCur;
1782 pCur = pCur->pIoCtxNext;
1783 pInsert->pIoCtxNext = pIoCtxHead;
1784 pIoCtxHead = pInsert;
1785 }
1786
1787 /* Process now. */
1788 pCur = pIoCtxHead;
1789 while (pCur)
1790 {
1791 int rcTmp;
1792 PVDIOCTX pTmp = pCur;
1793
1794 pCur = pCur->pIoCtxNext;
1795 pTmp->pIoCtxNext = NULL;
1796
1797 /*
1798 * Need to clear the sync flag here if there is a new I/O context
1799 * with it set and the context is not given in pIoCtxRc.
1800 * This happens most likely on a different thread and that one shouldn't
1801 * process the context synchronously.
1802 *
1803 * The thread who issued the context will wait on the event semaphore
1804 * anyway which is signalled when the completion handler is called.
1805 */
1806 if ( pTmp->fFlags & VDIOCTX_FLAGS_SYNC
1807 && pTmp != pIoCtxRc)
1808 pTmp->fFlags &= ~VDIOCTX_FLAGS_SYNC;
1809
1810 rcTmp = vdIoCtxProcessLocked(pTmp);
1811 if (pTmp == pIoCtxRc)
1812 {
1813 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1814 && RT_SUCCESS(pTmp->rcReq)
1815 && pTmp->enmTxDir == VDIOCTXTXDIR_READ)
1816 {
1817 int rc2 = vdFilterChainApplyRead(pDisk, pTmp->Req.Io.uOffsetXferOrig,
1818 pTmp->Req.Io.cbXferOrig, pTmp);
1819 if (RT_FAILURE(rc2))
1820 rcTmp = rc2;
1821 }
1822
1823 /* The given I/O context was processed, pass the return code to the caller. */
1824 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1825 && (pTmp->fFlags & VDIOCTX_FLAGS_SYNC))
1826 rc = pTmp->rcReq;
1827 else
1828 rc = rcTmp;
1829 }
1830 else if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1831 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1832 {
1833 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1834 vdThreadFinishWrite(pDisk);
1835 vdIoCtxRootComplete(pDisk, pTmp);
1836 vdIoCtxFree(pDisk, pTmp);
1837 }
1838 }
1839
1840 LogFlowFunc(("returns rc=%Rrc\n", rc));
1841 return rc;
1842}
1843
1844/**
1845 * Processes the list of blocked I/O contexts.
1846 *
1847 * @returns nothing.
1848 * @param pDisk The disk structure.
1849 */
1850static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk)
1851{
1852 LogFlowFunc(("pDisk=%#p\n", pDisk));
1853
1854 VD_IS_LOCKED(pDisk);
1855
1856 /* Get the waiting list and process it in FIFO order. */
1857 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxBlockedHead, NULL, PVDIOCTX);
1858
1859 /* Reverse it. */
1860 PVDIOCTX pCur = pIoCtxHead;
1861 pIoCtxHead = NULL;
1862 while (pCur)
1863 {
1864 PVDIOCTX pInsert = pCur;
1865 pCur = pCur->pIoCtxNext;
1866 pInsert->pIoCtxNext = pIoCtxHead;
1867 pIoCtxHead = pInsert;
1868 }
1869
1870 /* Process now. */
1871 pCur = pIoCtxHead;
1872 while (pCur)
1873 {
1874 int rc;
1875 PVDIOCTX pTmp = pCur;
1876
1877 pCur = pCur->pIoCtxNext;
1878 pTmp->pIoCtxNext = NULL;
1879
1880 Assert(!pTmp->pIoCtxParent);
1881 Assert(pTmp->fFlags & VDIOCTX_FLAGS_BLOCKED);
1882 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
1883
1884 rc = vdIoCtxProcessLocked(pTmp);
1885 if ( rc == VINF_VD_ASYNC_IO_FINISHED
1886 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1887 {
1888 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1889 vdThreadFinishWrite(pDisk);
1890 vdIoCtxRootComplete(pDisk, pTmp);
1891 vdIoCtxFree(pDisk, pTmp);
1892 }
1893 }
1894
1895 LogFlowFunc(("returns\n"));
1896}
1897
1898/**
1899 * Processes the I/O context trying to lock the criticial section.
1900 * The context is deferred if the critical section is busy.
1901 *
1902 * @returns VBox status code.
1903 * @param pIoCtx The I/O context to process.
1904 */
1905static int vdIoCtxProcessTryLockDefer(PVDIOCTX pIoCtx)
1906{
1907 int rc = VINF_SUCCESS;
1908 PVBOXHDD pDisk = pIoCtx->pDisk;
1909
1910 Log(("Defer pIoCtx=%#p\n", pIoCtx));
1911
1912 /* Put it on the waiting list first. */
1913 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHead, pIoCtx);
1914
1915 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
1916 {
1917 /* Leave it again, the context will be processed just before leaving the lock. */
1918 LogFlowFunc(("Successfully acquired the lock\n"));
1919 rc = vdDiskUnlock(pDisk, pIoCtx);
1920 }
1921 else
1922 {
1923 LogFlowFunc(("Lock is held\n"));
1924 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1925 }
1926
1927 return rc;
1928}
1929
1930/**
1931 * Process the I/O context in a synchronous manner, waiting
1932 * for it to complete.
1933 *
1934 * @returns VBox status code of the completed request.
1935 * @param pIoCtx The sync I/O context.
1936 * @param hEventComplete Event sempahore to wait on for completion.
1937 */
1938static int vdIoCtxProcessSync(PVDIOCTX pIoCtx, RTSEMEVENT hEventComplete)
1939{
1940 int rc = VINF_SUCCESS;
1941 PVBOXHDD pDisk = pIoCtx->pDisk;
1942
1943 LogFlowFunc(("pIoCtx=%p\n", pIoCtx));
1944
1945 AssertMsg(pIoCtx->fFlags & (VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE),
1946 ("I/O context is not marked as synchronous\n"));
1947
1948 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
1949 if (rc == VINF_VD_ASYNC_IO_FINISHED)
1950 rc = VINF_SUCCESS;
1951
1952 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1953 {
1954 rc = RTSemEventWait(hEventComplete, RT_INDEFINITE_WAIT);
1955 AssertRC(rc);
1956 }
1957
1958 rc = pIoCtx->rcReq;
1959 vdIoCtxFree(pDisk, pIoCtx);
1960
1961 return rc;
1962}
1963
1964DECLINLINE(bool) vdIoCtxIsDiskLockOwner(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1965{
1966 return pDisk->pIoCtxLockOwner == pIoCtx;
1967}
1968
1969static int vdIoCtxLockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1970{
1971 int rc = VINF_SUCCESS;
1972
1973 VD_IS_LOCKED(pDisk);
1974
1975 LogFlowFunc(("pDisk=%#p pIoCtx=%#p\n", pDisk, pIoCtx));
1976
1977 if (!ASMAtomicCmpXchgPtr(&pDisk->pIoCtxLockOwner, pIoCtx, NIL_VDIOCTX))
1978 {
1979 Assert(pDisk->pIoCtxLockOwner != pIoCtx); /* No nesting allowed. */
1980 vdIoCtxDefer(pDisk, pIoCtx);
1981 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1982 }
1983
1984 LogFlowFunc(("returns -> %Rrc\n", rc));
1985 return rc;
1986}
1987
1988static void vdIoCtxUnlockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx, bool fProcessBlockedReqs)
1989{
1990 RT_NOREF1(pIoCtx);
1991 LogFlowFunc(("pDisk=%#p pIoCtx=%#p fProcessBlockedReqs=%RTbool\n",
1992 pDisk, pIoCtx, fProcessBlockedReqs));
1993
1994 VD_IS_LOCKED(pDisk);
1995
1996 LogFlow(("Unlocking disk lock owner is %#p\n", pDisk->pIoCtxLockOwner));
1997 Assert(pDisk->pIoCtxLockOwner == pIoCtx);
1998 ASMAtomicXchgPtrT(&pDisk->pIoCtxLockOwner, NIL_VDIOCTX, PVDIOCTX);
1999
2000 if (fProcessBlockedReqs)
2001 {
2002 /* Process any blocked writes if the current request didn't caused another growing. */
2003 vdDiskProcessBlockedIoCtx(pDisk);
2004 }
2005
2006 LogFlowFunc(("returns\n"));
2007}
2008
2009/**
2010 * Internal: Reads a given amount of data from the image chain of the disk.
2011 **/
2012static int vdDiskReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2013 uint64_t uOffset, size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbThisRead)
2014{
2015 RT_NOREF1(pDisk);
2016 int rc = VINF_SUCCESS;
2017 size_t cbThisRead = cbRead;
2018
2019 AssertPtr(pcbThisRead);
2020
2021 *pcbThisRead = 0;
2022
2023 /*
2024 * Try to read from the given image.
2025 * If the block is not allocated read from override chain if present.
2026 */
2027 rc = pImage->Backend->pfnRead(pImage->pBackendData,
2028 uOffset, cbThisRead, pIoCtx,
2029 &cbThisRead);
2030
2031 if (rc == VERR_VD_BLOCK_FREE)
2032 {
2033 for (PVDIMAGE pCurrImage = pImageParentOverride ? pImageParentOverride : pImage->pPrev;
2034 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2035 pCurrImage = pCurrImage->pPrev)
2036 {
2037 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2038 uOffset, cbThisRead, pIoCtx,
2039 &cbThisRead);
2040 }
2041 }
2042
2043 if (RT_SUCCESS(rc) || rc == VERR_VD_BLOCK_FREE)
2044 *pcbThisRead = cbThisRead;
2045
2046 return rc;
2047}
2048
2049/**
2050 * internal: read the specified amount of data in whatever blocks the backend
2051 * will give us - async version.
2052 */
2053static DECLCALLBACK(int) vdReadHelperAsync(PVDIOCTX pIoCtx)
2054{
2055 int rc;
2056 PVBOXHDD pDisk = pIoCtx->pDisk;
2057 size_t cbToRead = pIoCtx->Req.Io.cbTransfer;
2058 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2059 PVDIMAGE pCurrImage = pIoCtx->Req.Io.pImageCur;
2060 PVDIMAGE pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
2061 unsigned cImagesRead = pIoCtx->Req.Io.cImagesRead;
2062 size_t cbThisRead;
2063
2064 /*
2065 * Check whether there is a full block write in progress which was not allocated.
2066 * Defer I/O if the range interferes but only if it does not belong to the
2067 * write doing the allocation.
2068 */
2069 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
2070 && uOffset >= pDisk->uOffsetStartLocked
2071 && uOffset < pDisk->uOffsetEndLocked
2072 && ( !pIoCtx->pIoCtxParent
2073 || pIoCtx->pIoCtxParent != pDisk->pIoCtxLockOwner))
2074 {
2075 Log(("Interferring read while allocating a new block => deferring read\n"));
2076 vdIoCtxDefer(pDisk, pIoCtx);
2077 return VERR_VD_ASYNC_IO_IN_PROGRESS;
2078 }
2079
2080 /* Loop until all reads started or we have a backend which needs to read metadata. */
2081 do
2082 {
2083 /* Search for image with allocated block. Do not attempt to read more
2084 * than the previous reads marked as valid. Otherwise this would return
2085 * stale data when different block sizes are used for the images. */
2086 cbThisRead = cbToRead;
2087
2088 if ( pDisk->pCache
2089 && !pImageParentOverride)
2090 {
2091 rc = vdCacheReadHelper(pDisk->pCache, uOffset, cbThisRead,
2092 pIoCtx, &cbThisRead);
2093 if (rc == VERR_VD_BLOCK_FREE)
2094 {
2095 rc = vdDiskReadHelper(pDisk, pCurrImage, NULL, uOffset, cbThisRead,
2096 pIoCtx, &cbThisRead);
2097
2098 /* If the read was successful, write the data back into the cache. */
2099 if ( RT_SUCCESS(rc)
2100 && pIoCtx->fFlags & VDIOCTX_FLAGS_READ_UPDATE_CACHE)
2101 {
2102 rc = vdCacheWriteHelper(pDisk->pCache, uOffset, cbThisRead,
2103 pIoCtx, NULL);
2104 }
2105 }
2106 }
2107 else
2108 {
2109 /*
2110 * Try to read from the given image.
2111 * If the block is not allocated read from override chain if present.
2112 */
2113 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2114 uOffset, cbThisRead, pIoCtx,
2115 &cbThisRead);
2116
2117 if ( rc == VERR_VD_BLOCK_FREE
2118 && cImagesRead != 1)
2119 {
2120 unsigned cImagesToProcess = cImagesRead;
2121
2122 pCurrImage = pImageParentOverride ? pImageParentOverride : pCurrImage->pPrev;
2123 pIoCtx->Req.Io.pImageParentOverride = NULL;
2124
2125 while (pCurrImage && rc == VERR_VD_BLOCK_FREE)
2126 {
2127 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2128 uOffset, cbThisRead,
2129 pIoCtx, &cbThisRead);
2130 if (cImagesToProcess == 1)
2131 break;
2132 else if (cImagesToProcess > 0)
2133 cImagesToProcess--;
2134
2135 if (rc == VERR_VD_BLOCK_FREE)
2136 pCurrImage = pCurrImage->pPrev;
2137 }
2138 }
2139 }
2140
2141 /* The task state will be updated on success already, don't do it here!. */
2142 if (rc == VERR_VD_BLOCK_FREE)
2143 {
2144 /* No image in the chain contains the data for the block. */
2145 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisRead); Assert(cbThisRead == (uint32_t)cbThisRead);
2146
2147 /* Fill the free space with 0 if we are told to do so
2148 * or a previous read returned valid data. */
2149 if (pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS)
2150 vdIoCtxSet(pIoCtx, '\0', cbThisRead);
2151 else
2152 pIoCtx->Req.Io.cbBufClear += cbThisRead;
2153
2154 if (pIoCtx->Req.Io.pImageCur->uOpenFlags & VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS)
2155 rc = VINF_VD_NEW_ZEROED_BLOCK;
2156 else
2157 rc = VINF_SUCCESS;
2158 }
2159 else if (rc == VERR_VD_IOCTX_HALT)
2160 {
2161 uOffset += cbThisRead;
2162 cbToRead -= cbThisRead;
2163 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2164 }
2165 else if ( RT_SUCCESS(rc)
2166 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2167 {
2168 /* First not free block, fill the space before with 0. */
2169 if ( pIoCtx->Req.Io.cbBufClear
2170 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2171 {
2172 RTSGBUF SgBuf;
2173 RTSgBufClone(&SgBuf, &pIoCtx->Req.Io.SgBuf);
2174 RTSgBufReset(&SgBuf);
2175 RTSgBufSet(&SgBuf, 0, pIoCtx->Req.Io.cbBufClear);
2176 pIoCtx->Req.Io.cbBufClear = 0;
2177 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2178 }
2179 rc = VINF_SUCCESS;
2180 }
2181
2182 if (RT_FAILURE(rc))
2183 break;
2184
2185 cbToRead -= cbThisRead;
2186 uOffset += cbThisRead;
2187 pCurrImage = pIoCtx->Req.Io.pImageStart; /* Start with the highest image in the chain. */
2188 } while (cbToRead != 0 && RT_SUCCESS(rc));
2189
2190 if ( rc == VERR_VD_NOT_ENOUGH_METADATA
2191 || rc == VERR_VD_IOCTX_HALT)
2192 {
2193 /* Save the current state. */
2194 pIoCtx->Req.Io.uOffset = uOffset;
2195 pIoCtx->Req.Io.cbTransfer = cbToRead;
2196 pIoCtx->Req.Io.pImageCur = pCurrImage ? pCurrImage : pIoCtx->Req.Io.pImageStart;
2197 }
2198
2199 return (!(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2200 ? VERR_VD_BLOCK_FREE
2201 : rc;
2202}
2203
2204/**
2205 * internal: parent image read wrapper for compacting.
2206 */
2207static DECLCALLBACK(int) vdParentRead(void *pvUser, uint64_t uOffset, void *pvBuf,
2208 size_t cbRead)
2209{
2210 PVDPARENTSTATEDESC pParentState = (PVDPARENTSTATEDESC)pvUser;
2211
2212 /** @todo
2213 * Only used for compaction so far which is not possible to mix with async I/O.
2214 * Needs to be changed if we want to support online compaction of images.
2215 */
2216 bool fLocked = ASMAtomicXchgBool(&pParentState->pDisk->fLocked, true);
2217 AssertMsgReturn(!fLocked,
2218 ("Calling synchronous parent read while another thread holds the disk lock\n"),
2219 VERR_VD_INVALID_STATE);
2220
2221 /* Fake an I/O context. */
2222 RTSGSEG Segment;
2223 RTSGBUF SgBuf;
2224 VDIOCTX IoCtx;
2225
2226 Segment.pvSeg = pvBuf;
2227 Segment.cbSeg = cbRead;
2228 RTSgBufInit(&SgBuf, &Segment, 1);
2229 vdIoCtxInit(&IoCtx, pParentState->pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pParentState->pImage,
2230 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
2231 int rc = vdReadHelperAsync(&IoCtx);
2232 ASMAtomicXchgBool(&pParentState->pDisk->fLocked, false);
2233 return rc;
2234}
2235
2236/**
2237 * Extended version of vdReadHelper(), implementing certain optimizations
2238 * for image cloning.
2239 *
2240 * @returns VBox status code.
2241 * @param pDisk The disk to read from.
2242 * @param pImage The image to start reading from.
2243 * @param pImageParentOverride The parent image to read from
2244 * if the starting image returns a free block.
2245 * If NULL is passed the real parent of the image
2246 * in the chain is used.
2247 * @param uOffset Offset in the disk to start reading from.
2248 * @param pvBuf Where to store the read data.
2249 * @param cbRead How much to read.
2250 * @param fZeroFreeBlocks Flag whether free blocks should be zeroed.
2251 * If false and no image has data for sepcified
2252 * range VERR_VD_BLOCK_FREE is returned.
2253 * Note that unallocated blocks are still zeroed
2254 * if at least one image has valid data for a part
2255 * of the range.
2256 * @param fUpdateCache Flag whether to update the attached cache if
2257 * available.
2258 * @param cImagesRead Number of images in the chain to read until
2259 * the read is cut off. A value of 0 disables the cut off.
2260 */
2261static int vdReadHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2262 uint64_t uOffset, void *pvBuf, size_t cbRead,
2263 bool fZeroFreeBlocks, bool fUpdateCache, unsigned cImagesRead)
2264{
2265 int rc = VINF_SUCCESS;
2266 uint32_t fFlags = VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2267 RTSGSEG Segment;
2268 RTSGBUF SgBuf;
2269 VDIOCTX IoCtx;
2270 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2271
2272 rc = RTSemEventCreate(&hEventComplete);
2273 if (RT_FAILURE(rc))
2274 return rc;
2275
2276 if (fZeroFreeBlocks)
2277 fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2278 if (fUpdateCache)
2279 fFlags |= VDIOCTX_FLAGS_READ_UPDATE_CACHE;
2280
2281 Segment.pvSeg = pvBuf;
2282 Segment.cbSeg = cbRead;
2283 RTSgBufInit(&SgBuf, &Segment, 1);
2284 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pImage, &SgBuf,
2285 NULL, vdReadHelperAsync, fFlags);
2286
2287 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2288 IoCtx.Req.Io.cImagesRead = cImagesRead;
2289 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2290 IoCtx.Type.Root.pvUser1 = pDisk;
2291 IoCtx.Type.Root.pvUser2 = hEventComplete;
2292 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2293 RTSemEventDestroy(hEventComplete);
2294 return rc;
2295}
2296
2297/**
2298 * internal: read the specified amount of data in whatever blocks the backend
2299 * will give us.
2300 */
2301static int vdReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2302 void *pvBuf, size_t cbRead, bool fUpdateCache)
2303{
2304 return vdReadHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbRead,
2305 true /* fZeroFreeBlocks */, fUpdateCache, 0);
2306}
2307
2308/**
2309 * internal: mark the disk as not modified.
2310 */
2311static void vdResetModifiedFlag(PVBOXHDD pDisk)
2312{
2313 if (pDisk->uModified & VD_IMAGE_MODIFIED_FLAG)
2314 {
2315 /* generate new last-modified uuid */
2316 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2317 {
2318 RTUUID Uuid;
2319
2320 RTUuidCreate(&Uuid);
2321 pDisk->pLast->Backend->pfnSetModificationUuid(pDisk->pLast->pBackendData,
2322 &Uuid);
2323
2324 if (pDisk->pCache)
2325 pDisk->pCache->Backend->pfnSetModificationUuid(pDisk->pCache->pBackendData,
2326 &Uuid);
2327 }
2328
2329 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FLAG;
2330 }
2331}
2332
2333/**
2334 * internal: mark the disk as modified.
2335 */
2336static void vdSetModifiedFlag(PVBOXHDD pDisk)
2337{
2338 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2339 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2340 {
2341 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2342
2343 /* First modify, so create a UUID and ensure it's written to disk. */
2344 vdResetModifiedFlag(pDisk);
2345
2346 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2347 {
2348 VDIOCTX IoCtx;
2349 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, NULL,
2350 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2351 pDisk->pLast->Backend->pfnFlush(pDisk->pLast->pBackendData, &IoCtx);
2352 }
2353 }
2354}
2355
2356/**
2357 * internal: write buffer to the image, taking care of block boundaries and
2358 * write optimizations.
2359 */
2360static int vdWriteHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage,
2361 PVDIMAGE pImageParentOverride, uint64_t uOffset,
2362 const void *pvBuf, size_t cbWrite,
2363 uint32_t fFlags, unsigned cImagesRead)
2364{
2365 int rc = VINF_SUCCESS;
2366 RTSGSEG Segment;
2367 RTSGBUF SgBuf;
2368 VDIOCTX IoCtx;
2369 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2370
2371 rc = RTSemEventCreate(&hEventComplete);
2372 if (RT_FAILURE(rc))
2373 return rc;
2374
2375 fFlags |= VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2376
2377 Segment.pvSeg = (void *)pvBuf;
2378 Segment.cbSeg = cbWrite;
2379 RTSgBufInit(&SgBuf, &Segment, 1);
2380 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_WRITE, uOffset, cbWrite, pImage, &SgBuf,
2381 NULL, vdWriteHelperAsync, fFlags);
2382
2383 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2384 IoCtx.Req.Io.cImagesRead = cImagesRead;
2385 IoCtx.pIoCtxParent = NULL;
2386 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2387 IoCtx.Type.Root.pvUser1 = pDisk;
2388 IoCtx.Type.Root.pvUser2 = hEventComplete;
2389 if (RT_SUCCESS(rc))
2390 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2391
2392 RTSemEventDestroy(hEventComplete);
2393 return rc;
2394}
2395
2396/**
2397 * internal: write buffer to the image, taking care of block boundaries and
2398 * write optimizations.
2399 */
2400static int vdWriteHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2401 const void *pvBuf, size_t cbWrite, uint32_t fFlags)
2402{
2403 return vdWriteHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbWrite,
2404 fFlags, 0);
2405}
2406
2407/**
2408 * Internal: Copies the content of one disk to another one applying optimizations
2409 * to speed up the copy process if possible.
2410 */
2411static int vdCopyHelper(PVBOXHDD pDiskFrom, PVDIMAGE pImageFrom, PVBOXHDD pDiskTo,
2412 uint64_t cbSize, unsigned cImagesFromRead, unsigned cImagesToRead,
2413 bool fSuppressRedundantIo, PVDINTERFACEPROGRESS pIfProgress,
2414 PVDINTERFACEPROGRESS pDstIfProgress)
2415{
2416 int rc = VINF_SUCCESS;
2417 int rc2;
2418 uint64_t uOffset = 0;
2419 uint64_t cbRemaining = cbSize;
2420 void *pvBuf = NULL;
2421 bool fLockReadFrom = false;
2422 bool fLockWriteTo = false;
2423 bool fBlockwiseCopy = false;
2424 unsigned uProgressOld = 0;
2425
2426 LogFlowFunc(("pDiskFrom=%#p pImageFrom=%#p pDiskTo=%#p cbSize=%llu cImagesFromRead=%u cImagesToRead=%u fSuppressRedundantIo=%RTbool pIfProgress=%#p pDstIfProgress=%#p\n",
2427 pDiskFrom, pImageFrom, pDiskTo, cbSize, cImagesFromRead, cImagesToRead, fSuppressRedundantIo, pDstIfProgress, pDstIfProgress));
2428
2429 if ( (fSuppressRedundantIo || (cImagesFromRead > 0))
2430 && RTListIsEmpty(&pDiskFrom->ListFilterChainRead))
2431 fBlockwiseCopy = true;
2432
2433 /* Allocate tmp buffer. */
2434 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
2435 if (!pvBuf)
2436 return rc;
2437
2438 do
2439 {
2440 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
2441
2442 /* Note that we don't attempt to synchronize cross-disk accesses.
2443 * It wouldn't be very difficult to do, just the lock order would
2444 * need to be defined somehow to prevent deadlocks. Postpone such
2445 * magic as there is no use case for this. */
2446
2447 rc2 = vdThreadStartRead(pDiskFrom);
2448 AssertRC(rc2);
2449 fLockReadFrom = true;
2450
2451 if (fBlockwiseCopy)
2452 {
2453 RTSGSEG SegmentBuf;
2454 RTSGBUF SgBuf;
2455 VDIOCTX IoCtx;
2456
2457 SegmentBuf.pvSeg = pvBuf;
2458 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
2459 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
2460 vdIoCtxInit(&IoCtx, pDiskFrom, VDIOCTXTXDIR_READ, 0, 0, NULL,
2461 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2462
2463 /* Read the source data. */
2464 rc = pImageFrom->Backend->pfnRead(pImageFrom->pBackendData,
2465 uOffset, cbThisRead, &IoCtx,
2466 &cbThisRead);
2467
2468 if ( rc == VERR_VD_BLOCK_FREE
2469 && cImagesFromRead != 1)
2470 {
2471 unsigned cImagesToProcess = cImagesFromRead;
2472
2473 for (PVDIMAGE pCurrImage = pImageFrom->pPrev;
2474 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2475 pCurrImage = pCurrImage->pPrev)
2476 {
2477 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2478 uOffset, cbThisRead,
2479 &IoCtx, &cbThisRead);
2480 if (cImagesToProcess == 1)
2481 break;
2482 else if (cImagesToProcess > 0)
2483 cImagesToProcess--;
2484 }
2485 }
2486 }
2487 else
2488 rc = vdReadHelper(pDiskFrom, pImageFrom, uOffset, pvBuf, cbThisRead,
2489 false /* fUpdateCache */);
2490
2491 if (RT_FAILURE(rc) && rc != VERR_VD_BLOCK_FREE)
2492 break;
2493
2494 rc2 = vdThreadFinishRead(pDiskFrom);
2495 AssertRC(rc2);
2496 fLockReadFrom = false;
2497
2498 if (rc != VERR_VD_BLOCK_FREE)
2499 {
2500 rc2 = vdThreadStartWrite(pDiskTo);
2501 AssertRC(rc2);
2502 fLockWriteTo = true;
2503
2504 /* Only do collapsed I/O if we are copying the data blockwise. */
2505 rc = vdWriteHelperEx(pDiskTo, pDiskTo->pLast, NULL, uOffset, pvBuf,
2506 cbThisRead, VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG /* fFlags */,
2507 fBlockwiseCopy ? cImagesToRead : 0);
2508 if (RT_FAILURE(rc))
2509 break;
2510
2511 rc2 = vdThreadFinishWrite(pDiskTo);
2512 AssertRC(rc2);
2513 fLockWriteTo = false;
2514 }
2515 else /* Don't propagate the error to the outside */
2516 rc = VINF_SUCCESS;
2517
2518 uOffset += cbThisRead;
2519 cbRemaining -= cbThisRead;
2520
2521 unsigned uProgressNew = uOffset * 99 / cbSize;
2522 if (uProgressNew != uProgressOld)
2523 {
2524 uProgressOld = uProgressNew;
2525
2526 if (pIfProgress && pIfProgress->pfnProgress)
2527 {
2528 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
2529 uProgressOld);
2530 if (RT_FAILURE(rc))
2531 break;
2532 }
2533 if (pDstIfProgress && pDstIfProgress->pfnProgress)
2534 {
2535 rc = pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser,
2536 uProgressOld);
2537 if (RT_FAILURE(rc))
2538 break;
2539 }
2540 }
2541 } while (uOffset < cbSize);
2542
2543 RTMemFree(pvBuf);
2544
2545 if (fLockReadFrom)
2546 {
2547 rc2 = vdThreadFinishRead(pDiskFrom);
2548 AssertRC(rc2);
2549 }
2550
2551 if (fLockWriteTo)
2552 {
2553 rc2 = vdThreadFinishWrite(pDiskTo);
2554 AssertRC(rc2);
2555 }
2556
2557 LogFlowFunc(("returns rc=%Rrc\n", rc));
2558 return rc;
2559}
2560
2561/**
2562 * Flush helper async version.
2563 */
2564static DECLCALLBACK(int) vdSetModifiedHelperAsync(PVDIOCTX pIoCtx)
2565{
2566 int rc = VINF_SUCCESS;
2567 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2568
2569 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
2570 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2571 rc = VINF_SUCCESS;
2572
2573 return rc;
2574}
2575
2576/**
2577 * internal: mark the disk as modified - async version.
2578 */
2579static int vdSetModifiedFlagAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
2580{
2581 int rc = VINF_SUCCESS;
2582
2583 VD_IS_LOCKED(pDisk);
2584
2585 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2586 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2587 {
2588 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
2589 if (RT_SUCCESS(rc))
2590 {
2591 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2592
2593 /* First modify, so create a UUID and ensure it's written to disk. */
2594 vdResetModifiedFlag(pDisk);
2595
2596 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2597 {
2598 PVDIOCTX pIoCtxFlush = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_FLUSH,
2599 0, 0, pDisk->pLast,
2600 NULL, pIoCtx, 0, 0, NULL,
2601 vdSetModifiedHelperAsync);
2602
2603 if (pIoCtxFlush)
2604 {
2605 rc = vdIoCtxProcessLocked(pIoCtxFlush);
2606 if (rc == VINF_VD_ASYNC_IO_FINISHED)
2607 {
2608 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs */);
2609 vdIoCtxFree(pDisk, pIoCtxFlush);
2610 }
2611 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2612 {
2613 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
2614 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2615 }
2616 else /* Another error */
2617 vdIoCtxFree(pDisk, pIoCtxFlush);
2618 }
2619 else
2620 rc = VERR_NO_MEMORY;
2621 }
2622 }
2623 }
2624
2625 return rc;
2626}
2627
2628static DECLCALLBACK(int) vdWriteHelperCommitAsync(PVDIOCTX pIoCtx)
2629{
2630 int rc = VINF_SUCCESS;
2631 PVDIMAGE pImage = pIoCtx->Req.Io.pImageStart;
2632 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2633 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2634 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2635
2636 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2637 rc = pImage->Backend->pfnWrite(pImage->pBackendData,
2638 pIoCtx->Req.Io.uOffset - cbPreRead,
2639 cbPreRead + cbThisWrite + cbPostRead,
2640 pIoCtx, NULL, &cbPreRead, &cbPostRead, 0);
2641 Assert(rc != VERR_VD_BLOCK_FREE);
2642 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPreRead == 0);
2643 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPostRead == 0);
2644 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2645 rc = VINF_SUCCESS;
2646 else if (rc == VERR_VD_IOCTX_HALT)
2647 {
2648 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2649 rc = VINF_SUCCESS;
2650 }
2651
2652 LogFlowFunc(("returns rc=%Rrc\n", rc));
2653 return rc;
2654}
2655
2656static DECLCALLBACK(int) vdWriteHelperOptimizedCmpAndWriteAsync(PVDIOCTX pIoCtx)
2657{
2658 int rc = VINF_SUCCESS;
2659 size_t cbThisWrite = 0;
2660 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2661 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2662 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2663 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2664 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2665 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2666
2667 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2668
2669 AssertPtr(pIoCtxParent);
2670 Assert(!pIoCtxParent->pIoCtxParent);
2671 Assert(!pIoCtx->Req.Io.cbTransferLeft && !pIoCtx->cMetaTransfersPending);
2672
2673 vdIoCtxChildReset(pIoCtx);
2674 cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2675 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2676
2677 /* Check if the write would modify anything in this block. */
2678 if (!RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &pIoCtxParent->Req.Io.SgBuf, cbThisWrite))
2679 {
2680 RTSGBUF SgBufSrcTmp;
2681
2682 RTSgBufClone(&SgBufSrcTmp, &pIoCtxParent->Req.Io.SgBuf);
2683 RTSgBufAdvance(&SgBufSrcTmp, cbThisWrite);
2684 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbThisWrite);
2685
2686 if (!cbWriteCopy || !RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &SgBufSrcTmp, cbWriteCopy))
2687 {
2688 /* Block is completely unchanged, so no need to write anything. */
2689 LogFlowFunc(("Block didn't changed\n"));
2690 ASMAtomicWriteU32(&pIoCtx->Req.Io.cbTransferLeft, 0);
2691 RTSgBufAdvance(&pIoCtxParent->Req.Io.SgBuf, cbThisWrite);
2692 return VINF_VD_ASYNC_IO_FINISHED;
2693 }
2694 }
2695
2696 /* Copy the data to the right place in the buffer. */
2697 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2698 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2699 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2700
2701 /* Handle the data that goes after the write to fill the block. */
2702 if (cbPostRead)
2703 {
2704 /* Now assemble the remaining data. */
2705 if (cbWriteCopy)
2706 {
2707 /*
2708 * The S/G buffer of the parent needs to be cloned because
2709 * it is not allowed to modify the state.
2710 */
2711 RTSGBUF SgBufParentTmp;
2712
2713 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2714 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2715 }
2716
2717 /* Zero out the remainder of this block. Will never be visible, as this
2718 * is beyond the limit of the image. */
2719 if (cbFill)
2720 {
2721 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbReadImage);
2722 vdIoCtxSet(pIoCtx, '\0', cbFill);
2723 }
2724 }
2725
2726 /* Write the full block to the virtual disk. */
2727 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2728 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2729
2730 return rc;
2731}
2732
2733static DECLCALLBACK(int) vdWriteHelperOptimizedPreReadAsync(PVDIOCTX pIoCtx)
2734{
2735 int rc = VINF_SUCCESS;
2736
2737 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2738
2739 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2740
2741 if ( pIoCtx->Req.Io.cbTransferLeft
2742 && !pIoCtx->cDataTransfersPending)
2743 rc = vdReadHelperAsync(pIoCtx);
2744
2745 if ( ( RT_SUCCESS(rc)
2746 || (rc == VERR_VD_ASYNC_IO_IN_PROGRESS))
2747 && ( pIoCtx->Req.Io.cbTransferLeft
2748 || pIoCtx->cMetaTransfersPending))
2749 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2750 else
2751 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedCmpAndWriteAsync;
2752
2753 return rc;
2754}
2755
2756/**
2757 * internal: write a complete block (only used for diff images), taking the
2758 * remaining data from parent images. This implementation optimizes out writes
2759 * that do not change the data relative to the state as of the parent images.
2760 * All backends which support differential/growing images support this - async version.
2761 */
2762static DECLCALLBACK(int) vdWriteHelperOptimizedAsync(PVDIOCTX pIoCtx)
2763{
2764 PVBOXHDD pDisk = pIoCtx->pDisk;
2765 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2766 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2767 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2768 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2769 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2770 size_t cbFill = 0;
2771 size_t cbWriteCopy = 0;
2772 size_t cbReadImage = 0;
2773
2774 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2775
2776 AssertPtr(pIoCtx->pIoCtxParent);
2777 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2778
2779 if (cbPostRead)
2780 {
2781 /* Figure out how much we cannot read from the image, because
2782 * the last block to write might exceed the nominal size of the
2783 * image for technical reasons. */
2784 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2785 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2786
2787 /* If we have data to be written, use that instead of reading
2788 * data from the image. */
2789 if (cbWrite > cbThisWrite)
2790 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2791
2792 /* The rest must be read from the image. */
2793 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2794 }
2795
2796 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2797 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2798 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2799
2800 /* Read the entire data of the block so that we can compare whether it will
2801 * be modified by the write or not. */
2802 size_t cbTmp = cbPreRead + cbThisWrite + cbPostRead - cbFill; Assert(cbTmp == (uint32_t)cbTmp);
2803 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTmp;
2804 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2805 pIoCtx->Req.Io.uOffset -= cbPreRead;
2806
2807 /* Next step */
2808 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedPreReadAsync;
2809 return VINF_SUCCESS;
2810}
2811
2812static DECLCALLBACK(int) vdWriteHelperStandardReadImageAsync(PVDIOCTX pIoCtx)
2813{
2814 int rc = VINF_SUCCESS;
2815
2816 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2817
2818 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2819
2820 if ( pIoCtx->Req.Io.cbTransferLeft
2821 && !pIoCtx->cDataTransfersPending)
2822 rc = vdReadHelperAsync(pIoCtx);
2823
2824 if ( RT_SUCCESS(rc)
2825 && ( pIoCtx->Req.Io.cbTransferLeft
2826 || pIoCtx->cMetaTransfersPending))
2827 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2828 else
2829 {
2830 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2831
2832 /* Zero out the remainder of this block. Will never be visible, as this
2833 * is beyond the limit of the image. */
2834 if (cbFill)
2835 vdIoCtxSet(pIoCtx, '\0', cbFill);
2836
2837 /* Write the full block to the virtual disk. */
2838 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2839
2840 vdIoCtxChildReset(pIoCtx);
2841 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2842 }
2843
2844 return rc;
2845}
2846
2847static DECLCALLBACK(int) vdWriteHelperStandardAssemble(PVDIOCTX pIoCtx)
2848{
2849 int rc = VINF_SUCCESS;
2850 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2851 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2852 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2853
2854 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2855
2856 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2857 if (cbPostRead)
2858 {
2859 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2860 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2861 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2862
2863 /* Now assemble the remaining data. */
2864 if (cbWriteCopy)
2865 {
2866 /*
2867 * The S/G buffer of the parent needs to be cloned because
2868 * it is not allowed to modify the state.
2869 */
2870 RTSGBUF SgBufParentTmp;
2871
2872 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2873 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2874 }
2875
2876 if (cbReadImage)
2877 {
2878 /* Read remaining data. */
2879 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardReadImageAsync;
2880
2881 /* Read the data that goes before the write to fill the block. */
2882 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbReadImage; Assert(cbReadImage == (uint32_t)cbReadImage);
2883 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2884 pIoCtx->Req.Io.uOffset += cbWriteCopy;
2885 }
2886 else
2887 {
2888 /* Zero out the remainder of this block. Will never be visible, as this
2889 * is beyond the limit of the image. */
2890 if (cbFill)
2891 vdIoCtxSet(pIoCtx, '\0', cbFill);
2892
2893 /* Write the full block to the virtual disk. */
2894 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2895 vdIoCtxChildReset(pIoCtx);
2896 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2897 }
2898 }
2899 else
2900 {
2901 /* Write the full block to the virtual disk. */
2902 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2903 vdIoCtxChildReset(pIoCtx);
2904 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2905 }
2906
2907 return rc;
2908}
2909
2910static DECLCALLBACK(int) vdWriteHelperStandardPreReadAsync(PVDIOCTX pIoCtx)
2911{
2912 int rc = VINF_SUCCESS;
2913
2914 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2915
2916 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2917
2918 if ( pIoCtx->Req.Io.cbTransferLeft
2919 && !pIoCtx->cDataTransfersPending)
2920 rc = vdReadHelperAsync(pIoCtx);
2921
2922 if ( RT_SUCCESS(rc)
2923 && ( pIoCtx->Req.Io.cbTransferLeft
2924 || pIoCtx->cMetaTransfersPending))
2925 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2926 else
2927 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2928
2929 return rc;
2930}
2931
2932static DECLCALLBACK(int) vdWriteHelperStandardAsync(PVDIOCTX pIoCtx)
2933{
2934 PVBOXHDD pDisk = pIoCtx->pDisk;
2935 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2936 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2937 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2938 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2939 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2940 size_t cbFill = 0;
2941 size_t cbWriteCopy = 0;
2942 size_t cbReadImage = 0;
2943
2944 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2945
2946 AssertPtr(pIoCtx->pIoCtxParent);
2947 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2948
2949 /* Calculate the amount of data to read that goes after the write to fill the block. */
2950 if (cbPostRead)
2951 {
2952 /* If we have data to be written, use that instead of reading
2953 * data from the image. */
2954 if (cbWrite > cbThisWrite)
2955 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2956 else
2957 cbWriteCopy = 0;
2958
2959 /* Figure out how much we cannot read from the image, because
2960 * the last block to write might exceed the nominal size of the
2961 * image for technical reasons. */
2962 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2963 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2964
2965 /* The rest must be read from the image. */
2966 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2967 }
2968
2969 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2970 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2971 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2972
2973 /* Next step */
2974 if (cbPreRead)
2975 {
2976 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardPreReadAsync;
2977
2978 /* Read the data that goes before the write to fill the block. */
2979 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbPreRead; Assert(cbPreRead == (uint32_t)cbPreRead);
2980 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2981 pIoCtx->Req.Io.uOffset -= cbPreRead;
2982 }
2983 else
2984 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2985
2986 return VINF_SUCCESS;
2987}
2988
2989/**
2990 * internal: write buffer to the image, taking care of block boundaries and
2991 * write optimizations - async version.
2992 */
2993static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx)
2994{
2995 int rc;
2996 size_t cbWrite = pIoCtx->Req.Io.cbTransfer;
2997 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2998 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2999 PVBOXHDD pDisk = pIoCtx->pDisk;
3000 unsigned fWrite;
3001 size_t cbThisWrite;
3002 size_t cbPreRead, cbPostRead;
3003
3004 /* Apply write filter chain here if it was not done already. */
3005 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_WRITE_FILTER_APPLIED))
3006 {
3007 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbWrite, pIoCtx);
3008 if (RT_FAILURE(rc))
3009 return rc;
3010 pIoCtx->fFlags |= VDIOCTX_FLAGS_WRITE_FILTER_APPLIED;
3011 }
3012
3013 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG))
3014 {
3015 rc = vdSetModifiedFlagAsync(pDisk, pIoCtx);
3016 if (RT_FAILURE(rc)) /* Includes I/O in progress. */
3017 return rc;
3018 }
3019
3020 rc = vdDiscardSetRangeAllocated(pDisk, uOffset, cbWrite);
3021 if (RT_FAILURE(rc))
3022 return rc;
3023
3024 /* Loop until all written. */
3025 do
3026 {
3027 /* Try to write the possibly partial block to the last opened image.
3028 * This works when the block is already allocated in this image or
3029 * if it is a full-block write (and allocation isn't suppressed below).
3030 * For image formats which don't support zero blocks, it's beneficial
3031 * to avoid unnecessarily allocating unchanged blocks. This prevents
3032 * unwanted expanding of images. VMDK is an example. */
3033 cbThisWrite = cbWrite;
3034
3035 /*
3036 * Check whether there is a full block write in progress which was not allocated.
3037 * Defer I/O if the range interferes.
3038 */
3039 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
3040 && uOffset >= pDisk->uOffsetStartLocked
3041 && uOffset < pDisk->uOffsetEndLocked)
3042 {
3043 Log(("Interferring write while allocating a new block => deferring write\n"));
3044 vdIoCtxDefer(pDisk, pIoCtx);
3045 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3046 break;
3047 }
3048
3049 fWrite = (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3050 ? 0 : VD_WRITE_NO_ALLOC;
3051 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset, cbThisWrite,
3052 pIoCtx, &cbThisWrite, &cbPreRead, &cbPostRead,
3053 fWrite);
3054 if (rc == VERR_VD_BLOCK_FREE)
3055 {
3056 /* Lock the disk .*/
3057 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3058 if (RT_SUCCESS(rc))
3059 {
3060 /*
3061 * Allocate segment and buffer in one go.
3062 * A bit hackish but avoids the need to allocate memory twice.
3063 */
3064 PRTSGBUF pTmp = (PRTSGBUF)RTMemAlloc(cbPreRead + cbThisWrite + cbPostRead + sizeof(RTSGSEG) + sizeof(RTSGBUF));
3065 AssertBreakStmt(pTmp, rc = VERR_NO_MEMORY);
3066 PRTSGSEG pSeg = (PRTSGSEG)(pTmp + 1);
3067
3068 pSeg->pvSeg = pSeg + 1;
3069 pSeg->cbSeg = cbPreRead + cbThisWrite + cbPostRead;
3070 RTSgBufInit(pTmp, pSeg, 1);
3071
3072 PVDIOCTX pIoCtxWrite = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_WRITE,
3073 uOffset, pSeg->cbSeg, pImage,
3074 pTmp,
3075 pIoCtx, cbThisWrite,
3076 cbWrite,
3077 pTmp,
3078 (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3079 ? vdWriteHelperStandardAsync
3080 : vdWriteHelperOptimizedAsync);
3081 if (!VALID_PTR(pIoCtxWrite))
3082 {
3083 RTMemTmpFree(pTmp);
3084 rc = VERR_NO_MEMORY;
3085 break;
3086 }
3087
3088 LogFlowFunc(("Disk is growing because of pIoCtx=%#p pIoCtxWrite=%#p\n",
3089 pIoCtx, pIoCtxWrite));
3090
3091 /* Save the current range for the growing operation to check for intersecting requests later. */
3092 pDisk->uOffsetStartLocked = uOffset - cbPreRead;
3093 pDisk->uOffsetEndLocked = uOffset + cbThisWrite + cbPostRead;
3094
3095 pIoCtxWrite->Type.Child.cbPreRead = cbPreRead;
3096 pIoCtxWrite->Type.Child.cbPostRead = cbPostRead;
3097 pIoCtxWrite->Req.Io.pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
3098
3099 /* Process the write request */
3100 rc = vdIoCtxProcessLocked(pIoCtxWrite);
3101
3102 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3103 {
3104 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3105 vdIoCtxFree(pDisk, pIoCtxWrite);
3106 break;
3107 }
3108 else if ( rc == VINF_VD_ASYNC_IO_FINISHED
3109 && ASMAtomicCmpXchgBool(&pIoCtxWrite->fComplete, true, false))
3110 {
3111 LogFlow(("Child write request completed\n"));
3112 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbThisWrite);
3113 Assert(cbThisWrite == (uint32_t)cbThisWrite);
3114 rc = pIoCtxWrite->rcReq;
3115 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisWrite);
3116 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3117 vdIoCtxFree(pDisk, pIoCtxWrite);
3118 }
3119 else
3120 {
3121 LogFlow(("Child write pending\n"));
3122 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
3123 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3124 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3125 cbWrite -= cbThisWrite;
3126 uOffset += cbThisWrite;
3127 break;
3128 }
3129 }
3130 else
3131 {
3132 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3133 break;
3134 }
3135 }
3136
3137 if (rc == VERR_VD_IOCTX_HALT)
3138 {
3139 cbWrite -= cbThisWrite;
3140 uOffset += cbThisWrite;
3141 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3142 break;
3143 }
3144 else if (rc == VERR_VD_NOT_ENOUGH_METADATA)
3145 break;
3146
3147 cbWrite -= cbThisWrite;
3148 uOffset += cbThisWrite;
3149 } while (cbWrite != 0 && (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
3150
3151 if ( rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3152 || rc == VERR_VD_NOT_ENOUGH_METADATA
3153 || rc == VERR_VD_IOCTX_HALT)
3154 {
3155 /*
3156 * Tell the caller that we don't need to go back here because all
3157 * writes are initiated.
3158 */
3159 if ( !cbWrite
3160 && rc != VERR_VD_IOCTX_HALT)
3161 rc = VINF_SUCCESS;
3162
3163 pIoCtx->Req.Io.uOffset = uOffset;
3164 pIoCtx->Req.Io.cbTransfer = cbWrite;
3165 }
3166
3167 return rc;
3168}
3169
3170/**
3171 * Flush helper async version.
3172 */
3173static DECLCALLBACK(int) vdFlushHelperAsync(PVDIOCTX pIoCtx)
3174{
3175 int rc = VINF_SUCCESS;
3176 PVBOXHDD pDisk = pIoCtx->pDisk;
3177 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
3178
3179 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3180 if (RT_SUCCESS(rc))
3181 {
3182 /* Mark the whole disk as locked. */
3183 pDisk->uOffsetStartLocked = 0;
3184 pDisk->uOffsetEndLocked = UINT64_C(0xffffffffffffffff);
3185
3186 vdResetModifiedFlag(pDisk);
3187 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
3188 if ( ( RT_SUCCESS(rc)
3189 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3190 || rc == VERR_VD_IOCTX_HALT)
3191 && pDisk->pCache)
3192 {
3193 rc = pDisk->pCache->Backend->pfnFlush(pDisk->pCache->pBackendData, pIoCtx);
3194 if ( RT_SUCCESS(rc)
3195 || ( rc != VERR_VD_ASYNC_IO_IN_PROGRESS
3196 && rc != VERR_VD_IOCTX_HALT))
3197 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3198 else if (rc != VERR_VD_IOCTX_HALT)
3199 rc = VINF_SUCCESS;
3200 }
3201 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3202 rc = VINF_SUCCESS;
3203 else if (rc != VERR_VD_IOCTX_HALT)/* Some other error. */
3204 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3205 }
3206
3207 return rc;
3208}
3209
3210/**
3211 * Async discard helper - discards a whole block which is recorded in the block
3212 * tree.
3213 *
3214 * @returns VBox status code.
3215 * @param pIoCtx The I/O context to operate on.
3216 */
3217static DECLCALLBACK(int) vdDiscardWholeBlockAsync(PVDIOCTX pIoCtx)
3218{
3219 int rc = VINF_SUCCESS;
3220 PVBOXHDD pDisk = pIoCtx->pDisk;
3221 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3222 PVDDISCARDBLOCK pBlock = pIoCtx->Req.Discard.pBlock;
3223 size_t cbPreAllocated, cbPostAllocated, cbActuallyDiscarded;
3224
3225 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3226
3227 AssertPtr(pBlock);
3228
3229 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3230 pBlock->Core.Key, pBlock->cbDiscard,
3231 &cbPreAllocated, &cbPostAllocated,
3232 &cbActuallyDiscarded, NULL, 0);
3233 Assert(rc != VERR_VD_DISCARD_ALIGNMENT_NOT_MET);
3234 Assert(!cbPreAllocated);
3235 Assert(!cbPostAllocated);
3236 Assert(cbActuallyDiscarded == pBlock->cbDiscard || RT_FAILURE(rc));
3237
3238 /* Remove the block on success. */
3239 if ( RT_SUCCESS(rc)
3240 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3241 {
3242 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3243 Assert(pBlockRemove == pBlock); RT_NOREF1(pBlockRemove);
3244
3245 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3246 RTListNodeRemove(&pBlock->NodeLru);
3247 RTMemFree(pBlock->pbmAllocated);
3248 RTMemFree(pBlock);
3249 pIoCtx->Req.Discard.pBlock = NULL;/* Safety precaution. */
3250 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3251 rc = VINF_SUCCESS;
3252 }
3253
3254 LogFlowFunc(("returns rc=%Rrc\n", rc));
3255 return rc;
3256}
3257
3258/**
3259 * Removes the least recently used blocks from the waiting list until
3260 * the new value is reached - version for async I/O.
3261 *
3262 * @returns VBox status code.
3263 * @param pDisk VD disk container.
3264 * @param pIoCtx The I/O context associated with this discard operation.
3265 * @param cbDiscardingNew How many bytes should be waiting on success.
3266 * The number of bytes waiting can be less.
3267 */
3268static int vdDiscardRemoveBlocksAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx, size_t cbDiscardingNew)
3269{
3270 int rc = VINF_SUCCESS;
3271 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3272
3273 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
3274 pDisk, pDiscard, cbDiscardingNew));
3275
3276 while (pDiscard->cbDiscarding > cbDiscardingNew)
3277 {
3278 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
3279
3280 Assert(!RTListIsEmpty(&pDiscard->ListLru));
3281
3282 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
3283 uint64_t offStart = pBlock->Core.Key;
3284 uint32_t idxStart = 0;
3285 size_t cbLeft = pBlock->cbDiscard;
3286 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
3287 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
3288
3289 while (cbLeft > 0)
3290 {
3291 int32_t idxEnd;
3292 size_t cbThis = cbLeft;
3293
3294 if (fAllocated)
3295 {
3296 /* Check for the first unallocated bit. */
3297 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
3298 if (idxEnd != -1)
3299 {
3300 cbThis = (idxEnd - idxStart) * 512;
3301 fAllocated = false;
3302 }
3303 }
3304 else
3305 {
3306 /* Mark as unused and check for the first set bit. */
3307 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
3308 if (idxEnd != -1)
3309 cbThis = (idxEnd - idxStart) * 512;
3310
3311 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3312 offStart, cbThis, NULL, NULL, &cbThis,
3313 NULL, VD_DISCARD_MARK_UNUSED);
3314 if ( RT_FAILURE(rc)
3315 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3316 break;
3317
3318 fAllocated = true;
3319 }
3320
3321 idxStart = idxEnd;
3322 offStart += cbThis;
3323 cbLeft -= cbThis;
3324 }
3325
3326 if ( RT_FAILURE(rc)
3327 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3328 break;
3329
3330 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3331 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
3332 RTListNodeRemove(&pBlock->NodeLru);
3333
3334 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3335 RTMemFree(pBlock->pbmAllocated);
3336 RTMemFree(pBlock);
3337 }
3338
3339 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3340 rc = VINF_SUCCESS;
3341
3342 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
3343
3344 LogFlowFunc(("returns rc=%Rrc\n", rc));
3345 return rc;
3346}
3347
3348/**
3349 * Async discard helper - discards the current range if there is no matching
3350 * block in the tree.
3351 *
3352 * @returns VBox status code.
3353 * @param pIoCtx The I/O context to operate on.
3354 */
3355static DECLCALLBACK(int) vdDiscardCurrentRangeAsync(PVDIOCTX pIoCtx)
3356{
3357 PVBOXHDD pDisk = pIoCtx->pDisk;
3358 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3359 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3360 size_t cbThisDiscard = pIoCtx->Req.Discard.cbThisDiscard;
3361 void *pbmAllocated = NULL;
3362 size_t cbPreAllocated, cbPostAllocated;
3363 int rc = VINF_SUCCESS;
3364
3365 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3366
3367 /* No block found, try to discard using the backend first. */
3368 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3369 offStart, cbThisDiscard, &cbPreAllocated,
3370 &cbPostAllocated, &cbThisDiscard,
3371 &pbmAllocated, 0);
3372 if (rc == VERR_VD_DISCARD_ALIGNMENT_NOT_MET)
3373 {
3374 /* Create new discard block. */
3375 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTMemAllocZ(sizeof(VDDISCARDBLOCK));
3376 if (pBlock)
3377 {
3378 pBlock->Core.Key = offStart - cbPreAllocated;
3379 pBlock->Core.KeyLast = offStart + cbThisDiscard + cbPostAllocated - 1;
3380 pBlock->cbDiscard = cbPreAllocated + cbThisDiscard + cbPostAllocated;
3381 pBlock->pbmAllocated = pbmAllocated;
3382 bool fInserted = RTAvlrU64Insert(pDiscard->pTreeBlocks, &pBlock->Core);
3383 Assert(fInserted); NOREF(fInserted);
3384
3385 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3386 pDiscard->cbDiscarding += pBlock->cbDiscard;
3387
3388 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3389 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3390 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3391 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3392
3393 if (pDiscard->cbDiscarding > VD_DISCARD_REMOVE_THRESHOLD)
3394 rc = vdDiscardRemoveBlocksAsync(pDisk, pIoCtx, VD_DISCARD_REMOVE_THRESHOLD);
3395 else
3396 rc = VINF_SUCCESS;
3397
3398 if (RT_SUCCESS(rc))
3399 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3400 }
3401 else
3402 {
3403 RTMemFree(pbmAllocated);
3404 rc = VERR_NO_MEMORY;
3405 }
3406 }
3407 else if ( RT_SUCCESS(rc)
3408 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS) /* Save state and andvance to next range. */
3409 {
3410 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3411 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3412 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3413 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3414 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3415 rc = VINF_SUCCESS;
3416 }
3417
3418 LogFlowFunc(("returns rc=%Rrc\n", rc));
3419 return rc;
3420}
3421
3422/**
3423 * Async discard helper - entry point.
3424 *
3425 * @returns VBox status code.
3426 * @param pIoCtx The I/O context to operate on.
3427 */
3428static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx)
3429{
3430 int rc = VINF_SUCCESS;
3431 PVBOXHDD pDisk = pIoCtx->pDisk;
3432 PCRTRANGE paRanges = pIoCtx->Req.Discard.paRanges;
3433 unsigned cRanges = pIoCtx->Req.Discard.cRanges;
3434 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3435
3436 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3437
3438 /* Check if the I/O context processed all ranges. */
3439 if ( pIoCtx->Req.Discard.idxRange == cRanges
3440 && !pIoCtx->Req.Discard.cbDiscardLeft)
3441 {
3442 LogFlowFunc(("All ranges discarded, completing\n"));
3443 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDeferredReqs*/);
3444 return VINF_SUCCESS;
3445 }
3446
3447 if (pDisk->pIoCtxLockOwner != pIoCtx)
3448 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3449
3450 if (RT_SUCCESS(rc))
3451 {
3452 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3453 size_t cbDiscardLeft = pIoCtx->Req.Discard.cbDiscardLeft;
3454 size_t cbThisDiscard;
3455
3456 pDisk->uOffsetStartLocked = offStart;
3457 pDisk->uOffsetEndLocked = offStart + cbDiscardLeft;
3458
3459 if (RT_UNLIKELY(!pDiscard))
3460 {
3461 pDiscard = vdDiscardStateCreate();
3462 if (!pDiscard)
3463 return VERR_NO_MEMORY;
3464
3465 pDisk->pDiscard = pDiscard;
3466 }
3467
3468 if (!pIoCtx->Req.Discard.cbDiscardLeft)
3469 {
3470 offStart = paRanges[pIoCtx->Req.Discard.idxRange].offStart;
3471 cbDiscardLeft = paRanges[pIoCtx->Req.Discard.idxRange].cbRange;
3472 LogFlowFunc(("New range descriptor loaded (%u) offStart=%llu cbDiscard=%zu\n",
3473 pIoCtx->Req.Discard.idxRange, offStart, cbDiscardLeft));
3474 pIoCtx->Req.Discard.idxRange++;
3475 }
3476
3477 /* Look for a matching block in the AVL tree first. */
3478 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, false);
3479 if (!pBlock || pBlock->Core.KeyLast < offStart)
3480 {
3481 PVDDISCARDBLOCK pBlockAbove = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, true);
3482
3483 /* Clip range to remain in the current block. */
3484 if (pBlockAbove)
3485 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlockAbove->Core.KeyLast - offStart + 1);
3486 else
3487 cbThisDiscard = cbDiscardLeft;
3488
3489 Assert(!(cbThisDiscard % 512));
3490 pIoCtx->Req.Discard.pBlock = NULL;
3491 pIoCtx->pfnIoCtxTransferNext = vdDiscardCurrentRangeAsync;
3492 }
3493 else
3494 {
3495 /* Range lies partly in the block, update allocation bitmap. */
3496 int32_t idxStart, idxEnd;
3497
3498 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlock->Core.KeyLast - offStart + 1);
3499
3500 AssertPtr(pBlock);
3501
3502 Assert(!(cbThisDiscard % 512));
3503 Assert(!((offStart - pBlock->Core.Key) % 512));
3504
3505 idxStart = (offStart - pBlock->Core.Key) / 512;
3506 idxEnd = idxStart + (int32_t)(cbThisDiscard / 512);
3507
3508 ASMBitClearRange(pBlock->pbmAllocated, idxStart, idxEnd);
3509
3510 cbDiscardLeft -= cbThisDiscard;
3511 offStart += cbThisDiscard;
3512
3513 /* Call the backend to discard the block if it is completely unallocated now. */
3514 if (ASMBitFirstSet((volatile void *)pBlock->pbmAllocated, (uint32_t)(pBlock->cbDiscard / 512)) == -1)
3515 {
3516 pIoCtx->Req.Discard.pBlock = pBlock;
3517 pIoCtx->pfnIoCtxTransferNext = vdDiscardWholeBlockAsync;
3518 rc = VINF_SUCCESS;
3519 }
3520 else
3521 {
3522 RTListNodeRemove(&pBlock->NodeLru);
3523 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3524
3525 /* Start with next range. */
3526 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3527 rc = VINF_SUCCESS;
3528 }
3529 }
3530
3531 /* Save state in the context. */
3532 pIoCtx->Req.Discard.offCur = offStart;
3533 pIoCtx->Req.Discard.cbDiscardLeft = cbDiscardLeft;
3534 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3535 }
3536
3537 LogFlowFunc(("returns rc=%Rrc\n", rc));
3538 return rc;
3539}
3540
3541#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3542
3543/**
3544 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterImage}
3545 */
3546static DECLCALLBACK(int) vdPluginRegisterImage(void *pvUser, PCVDIMAGEBACKEND pBackend)
3547{
3548 int rc = VINF_SUCCESS;
3549
3550 if (VD_VERSION_ARE_COMPATIBLE(VD_IMGBACKEND_VERSION, pBackend->u32Version))
3551 vdAddBackend((RTLDRMOD)pvUser, pBackend);
3552 else
3553 {
3554 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3555 rc = VERR_IGNORED;
3556 }
3557
3558 return rc;
3559}
3560
3561/**
3562 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterCache}
3563 */
3564static DECLCALLBACK(int) vdPluginRegisterCache(void *pvUser, PCVDCACHEBACKEND pBackend)
3565{
3566 int rc = VINF_SUCCESS;
3567
3568 if (VD_VERSION_ARE_COMPATIBLE(VD_CACHEBACKEND_VERSION, pBackend->u32Version))
3569 vdAddCacheBackend((RTLDRMOD)pvUser, pBackend);
3570 else
3571 {
3572 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3573 rc = VERR_IGNORED;
3574 }
3575
3576 return rc;
3577}
3578
3579/**
3580 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterFilter}
3581 */
3582static DECLCALLBACK(int) vdPluginRegisterFilter(void *pvUser, PCVDFILTERBACKEND pBackend)
3583{
3584 int rc = VINF_SUCCESS;
3585
3586 if (VD_VERSION_ARE_COMPATIBLE(VD_FLTBACKEND_VERSION, pBackend->u32Version))
3587 vdAddFilterBackend((RTLDRMOD)pvUser, pBackend);
3588 else
3589 {
3590 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3591 rc = VERR_IGNORED;
3592 }
3593
3594 return rc;
3595}
3596
3597/**
3598 * Checks whether the given plugin filename was already loaded.
3599 *
3600 * @returns Pointer to already loaded plugin, NULL if not found.
3601 * @param pszFilename The filename to check.
3602 */
3603static PVDPLUGIN vdPluginFind(const char *pszFilename)
3604{
3605 PVDPLUGIN pIt;
3606 RTListForEach(&g_ListPluginsLoaded, pIt, VDPLUGIN, NodePlugin)
3607 {
3608 if (!RTStrCmp(pIt->pszFilename, pszFilename))
3609 return pIt;
3610 }
3611
3612 return NULL;
3613}
3614
3615/**
3616 * Adds a plugin to the list of loaded plugins.
3617 *
3618 * @returns VBox status code.
3619 * @param hPlugin Plugin handle to add.
3620 * @param pszFilename The associated filename, used for finding duplicates.
3621 */
3622static int vdAddPlugin(RTLDRMOD hPlugin, const char *pszFilename)
3623{
3624 int rc = VINF_SUCCESS;
3625 PVDPLUGIN pPlugin = (PVDPLUGIN)RTMemAllocZ(sizeof(VDPLUGIN));
3626
3627 if (pPlugin)
3628 {
3629 pPlugin->hPlugin = hPlugin;
3630 pPlugin->pszFilename = RTStrDup(pszFilename);
3631 if (pPlugin->pszFilename)
3632 RTListAppend(&g_ListPluginsLoaded, &pPlugin->NodePlugin);
3633 else
3634 {
3635 RTMemFree(pPlugin);
3636 rc = VERR_NO_MEMORY;
3637 }
3638 }
3639 else
3640 rc = VERR_NO_MEMORY;
3641
3642 return rc;
3643}
3644
3645static int vdRemovePlugin(const char *pszFilename)
3646{
3647 /* Find plugin to be removed from the list. */
3648 PVDPLUGIN pIt = vdPluginFind(pszFilename);
3649 if (!pIt)
3650 return VINF_SUCCESS;
3651
3652 /** @todo r=klaus: need to add a plugin entry point for unregistering the
3653 * backends. Only if this doesn't exist (or fails to work) we should fall
3654 * back to the following uncoordinated backend cleanup. */
3655 for (unsigned i = 0; i < g_cBackends; i++)
3656 {
3657 while (i < g_cBackends && g_ahBackendPlugins[i] == pIt->hPlugin)
3658 {
3659 memcpy(&g_apBackends[i], &g_apBackends[i + 1], (g_cBackends - i - 1) * sizeof(PCVDIMAGEBACKEND));
3660 memcpy(&g_ahBackendPlugins[i], &g_ahBackendPlugins[i + 1], (g_cBackends - i - 1) * sizeof(RTLDRMOD));
3661 /** @todo for now skip reallocating, doesn't save much */
3662 g_cBackends--;
3663 }
3664 }
3665 for (unsigned i = 0; i < g_cCacheBackends; i++)
3666 {
3667 while (i < g_cCacheBackends && g_ahCacheBackendPlugins[i] == pIt->hPlugin)
3668 {
3669 memcpy(&g_apCacheBackends[i], &g_apCacheBackends[i + 1], (g_cCacheBackends - i - 1) * sizeof(PCVDCACHEBACKEND));
3670 memcpy(&g_ahCacheBackendPlugins[i], &g_ahCacheBackendPlugins[i + 1], (g_cCacheBackends - i - 1) * sizeof(RTLDRMOD));
3671 /** @todo for now skip reallocating, doesn't save much */
3672 g_cCacheBackends--;
3673 }
3674 }
3675 for (unsigned i = 0; i < g_cFilterBackends; i++)
3676 {
3677 while (i < g_cFilterBackends && g_pahFilterBackendPlugins[i] == pIt->hPlugin)
3678 {
3679 memcpy(&g_apFilterBackends[i], &g_apFilterBackends[i + 1], (g_cFilterBackends - i - 1) * sizeof(PCVDFILTERBACKEND));
3680 memcpy(&g_pahFilterBackendPlugins[i], &g_pahFilterBackendPlugins[i + 1], (g_cFilterBackends - i - 1) * sizeof(RTLDRMOD));
3681 /** @todo for now skip reallocating, doesn't save much */
3682 g_cFilterBackends--;
3683 }
3684 }
3685
3686 /* Remove the plugin node now, all traces of it are gone. */
3687 RTListNodeRemove(&pIt->NodePlugin);
3688 RTLdrClose(pIt->hPlugin);
3689 RTStrFree(pIt->pszFilename);
3690 RTMemFree(pIt);
3691
3692 return VINF_SUCCESS;
3693}
3694
3695#endif /* !VBOX_HDD_NO_DYNAMIC_BACKENDS */
3696
3697/**
3698 * Worker for VDPluginLoadFromFilename() and vdPluginLoadFromPath().
3699 *
3700 * @returns VBox status code.
3701 * @param pszFilename The plugin filename to load.
3702 */
3703static int vdPluginLoadFromFilename(const char *pszFilename)
3704{
3705#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3706 /* Plugin loaded? Nothing to do. */
3707 if (vdPluginFind(pszFilename))
3708 return VINF_SUCCESS;
3709
3710 RTLDRMOD hPlugin = NIL_RTLDRMOD;
3711 int rc = SUPR3HardenedLdrLoadPlugIn(pszFilename, &hPlugin, NULL);
3712 if (RT_SUCCESS(rc))
3713 {
3714 VDBACKENDREGISTER BackendRegister;
3715 PFNVDPLUGINLOAD pfnVDPluginLoad = NULL;
3716
3717 BackendRegister.u32Version = VD_BACKENDREG_CB_VERSION;
3718 BackendRegister.pfnRegisterImage = vdPluginRegisterImage;
3719 BackendRegister.pfnRegisterCache = vdPluginRegisterCache;
3720 BackendRegister.pfnRegisterFilter = vdPluginRegisterFilter;
3721
3722 rc = RTLdrGetSymbol(hPlugin, VD_PLUGIN_LOAD_NAME, (void**)&pfnVDPluginLoad);
3723 if (RT_FAILURE(rc) || !pfnVDPluginLoad)
3724 {
3725 LogFunc(("error resolving the entry point %s in plugin %s, rc=%Rrc, pfnVDPluginLoad=%#p\n",
3726 VD_PLUGIN_LOAD_NAME, pszFilename, rc, pfnVDPluginLoad));
3727 if (RT_SUCCESS(rc))
3728 rc = VERR_SYMBOL_NOT_FOUND;
3729 }
3730
3731 if (RT_SUCCESS(rc))
3732 {
3733 /* Get the function table. */
3734 rc = pfnVDPluginLoad(hPlugin, &BackendRegister);
3735 }
3736 else
3737 LogFunc(("ignored plugin '%s': rc=%Rrc\n", pszFilename, rc));
3738
3739 /* Create a plugin entry on success. */
3740 if (RT_SUCCESS(rc))
3741 vdAddPlugin(hPlugin, pszFilename);
3742 else
3743 RTLdrClose(hPlugin);
3744 }
3745
3746 return rc;
3747#else
3748 RT_NOREF1(pszFilename);
3749 return VERR_NOT_IMPLEMENTED;
3750#endif
3751}
3752
3753/**
3754 * Worker for VDPluginLoadFromPath() and vdLoadDynamicBackends().
3755 *
3756 * @returns VBox status code.
3757 * @param pszPath The path to load plugins from.
3758 */
3759static int vdPluginLoadFromPath(const char *pszPath)
3760{
3761#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3762 /* To get all entries with VBoxHDD as prefix. */
3763 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3764 if (!pszPluginFilter)
3765 return VERR_NO_STR_MEMORY;
3766
3767 PRTDIRENTRYEX pPluginDirEntry = NULL;
3768 PRTDIR pPluginDir = NULL;
3769 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3770 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3771 if (RT_SUCCESS(rc))
3772 {
3773 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3774 if (pPluginDirEntry)
3775 {
3776 while ( (rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK))
3777 != VERR_NO_MORE_FILES)
3778 {
3779 char *pszPluginPath = NULL;
3780
3781 if (rc == VERR_BUFFER_OVERFLOW)
3782 {
3783 /* allocate new buffer. */
3784 RTMemFree(pPluginDirEntry);
3785 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3786 if (!pPluginDirEntry)
3787 {
3788 rc = VERR_NO_MEMORY;
3789 break;
3790 }
3791 /* Retry. */
3792 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3793 if (RT_FAILURE(rc))
3794 break;
3795 }
3796 else if (RT_FAILURE(rc))
3797 break;
3798
3799 /* We got the new entry. */
3800 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3801 continue;
3802
3803 /* Prepend the path to the libraries. */
3804 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3805 if (!pszPluginPath)
3806 {
3807 rc = VERR_NO_STR_MEMORY;
3808 break;
3809 }
3810
3811 rc = vdPluginLoadFromFilename(pszPluginPath);
3812 RTStrFree(pszPluginPath);
3813 }
3814
3815 RTMemFree(pPluginDirEntry);
3816 }
3817 else
3818 rc = VERR_NO_MEMORY;
3819
3820 RTDirClose(pPluginDir);
3821 }
3822 else
3823 {
3824 /* On Windows the above immediately signals that there are no
3825 * files matching, while on other platforms enumerating the
3826 * files below fails. Either way: no plugins. */
3827 }
3828
3829 if (rc == VERR_NO_MORE_FILES)
3830 rc = VINF_SUCCESS;
3831 RTStrFree(pszPluginFilter);
3832 return rc;
3833#else
3834 RT_NOREF1(pszPath);
3835 return VERR_NOT_IMPLEMENTED;
3836#endif
3837}
3838
3839/**
3840 * internal: scans plugin directory and loads found plugins.
3841 */
3842static int vdLoadDynamicBackends(void)
3843{
3844#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3845 /*
3846 * Enumerate plugin backends from the application directory where the other
3847 * shared libraries are.
3848 */
3849 char szPath[RTPATH_MAX];
3850 int rc = RTPathAppPrivateArch(szPath, sizeof(szPath));
3851 if (RT_FAILURE(rc))
3852 return rc;
3853
3854 return vdPluginLoadFromPath(szPath);
3855#else
3856 return VINF_SUCCESS;
3857#endif
3858}
3859
3860/**
3861 * Worker for VDPluginUnloadFromFilename() and vdPluginUnloadFromPath().
3862 *
3863 * @returns VBox status code.
3864 * @param pszFilename The plugin filename to unload.
3865 */
3866static int vdPluginUnloadFromFilename(const char *pszFilename)
3867{
3868#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3869 return vdRemovePlugin(pszFilename);
3870#else
3871 RT_NOREF1(pszFilename);
3872 return VERR_NOT_IMPLEMENTED;
3873#endif
3874}
3875
3876/**
3877 * Worker for VDPluginUnloadFromPath().
3878 *
3879 * @returns VBox status code.
3880 * @param pszPath The path to unload plugins from.
3881 */
3882static int vdPluginUnloadFromPath(const char *pszPath)
3883{
3884#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3885 /* To get all entries with VBoxHDD as prefix. */
3886 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3887 if (!pszPluginFilter)
3888 return VERR_NO_STR_MEMORY;
3889
3890 PRTDIRENTRYEX pPluginDirEntry = NULL;
3891 PRTDIR pPluginDir = NULL;
3892 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3893 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3894 if (RT_SUCCESS(rc))
3895 {
3896 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3897 if (pPluginDirEntry)
3898 {
3899 while ((rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK)) != VERR_NO_MORE_FILES)
3900 {
3901 char *pszPluginPath = NULL;
3902
3903 if (rc == VERR_BUFFER_OVERFLOW)
3904 {
3905 /* allocate new buffer. */
3906 RTMemFree(pPluginDirEntry);
3907 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3908 if (!pPluginDirEntry)
3909 {
3910 rc = VERR_NO_MEMORY;
3911 break;
3912 }
3913 /* Retry. */
3914 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3915 if (RT_FAILURE(rc))
3916 break;
3917 }
3918 else if (RT_FAILURE(rc))
3919 break;
3920
3921 /* We got the new entry. */
3922 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3923 continue;
3924
3925 /* Prepend the path to the libraries. */
3926 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3927 if (!pszPluginPath)
3928 {
3929 rc = VERR_NO_STR_MEMORY;
3930 break;
3931 }
3932
3933 rc = vdPluginUnloadFromFilename(pszPluginPath);
3934 RTStrFree(pszPluginPath);
3935 }
3936
3937 RTMemFree(pPluginDirEntry);
3938 }
3939 else
3940 rc = VERR_NO_MEMORY;
3941
3942 RTDirClose(pPluginDir);
3943 }
3944 else
3945 {
3946 /* On Windows the above immediately signals that there are no
3947 * files matching, while on other platforms enumerating the
3948 * files below fails. Either way: no plugins. */
3949 }
3950
3951 if (rc == VERR_NO_MORE_FILES)
3952 rc = VINF_SUCCESS;
3953 RTStrFree(pszPluginFilter);
3954 return rc;
3955#else
3956 RT_NOREF1(pszPath);
3957 return VERR_NOT_IMPLEMENTED;
3958#endif
3959}
3960
3961/**
3962 * VD async I/O interface open callback.
3963 */
3964static DECLCALLBACK(int) vdIOOpenFallback(void *pvUser, const char *pszLocation,
3965 uint32_t fOpen, PFNVDCOMPLETED pfnCompleted,
3966 void **ppStorage)
3967{
3968 RT_NOREF1(pvUser);
3969 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)RTMemAllocZ(sizeof(VDIIOFALLBACKSTORAGE));
3970
3971 if (!pStorage)
3972 return VERR_NO_MEMORY;
3973
3974 pStorage->pfnCompleted = pfnCompleted;
3975
3976 /* Open the file. */
3977 int rc = RTFileOpen(&pStorage->File, pszLocation, fOpen);
3978 if (RT_SUCCESS(rc))
3979 {
3980 *ppStorage = pStorage;
3981 return VINF_SUCCESS;
3982 }
3983
3984 RTMemFree(pStorage);
3985 return rc;
3986}
3987
3988/**
3989 * VD async I/O interface close callback.
3990 */
3991static DECLCALLBACK(int) vdIOCloseFallback(void *pvUser, void *pvStorage)
3992{
3993 RT_NOREF1(pvUser);
3994 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
3995
3996 RTFileClose(pStorage->File);
3997 RTMemFree(pStorage);
3998 return VINF_SUCCESS;
3999}
4000
4001static DECLCALLBACK(int) vdIODeleteFallback(void *pvUser, const char *pcszFilename)
4002{
4003 RT_NOREF1(pvUser);
4004 return RTFileDelete(pcszFilename);
4005}
4006
4007static DECLCALLBACK(int) vdIOMoveFallback(void *pvUser, const char *pcszSrc, const char *pcszDst, unsigned fMove)
4008{
4009 RT_NOREF1(pvUser);
4010 return RTFileMove(pcszSrc, pcszDst, fMove);
4011}
4012
4013static DECLCALLBACK(int) vdIOGetFreeSpaceFallback(void *pvUser, const char *pcszFilename, int64_t *pcbFreeSpace)
4014{
4015 RT_NOREF1(pvUser);
4016 return RTFsQuerySizes(pcszFilename, NULL, pcbFreeSpace, NULL, NULL);
4017}
4018
4019static DECLCALLBACK(int) vdIOGetModificationTimeFallback(void *pvUser, const char *pcszFilename, PRTTIMESPEC pModificationTime)
4020{
4021 RT_NOREF1(pvUser);
4022 RTFSOBJINFO info;
4023 int rc = RTPathQueryInfo(pcszFilename, &info, RTFSOBJATTRADD_NOTHING);
4024 if (RT_SUCCESS(rc))
4025 *pModificationTime = info.ModificationTime;
4026 return rc;
4027}
4028
4029/**
4030 * VD async I/O interface callback for retrieving the file size.
4031 */
4032static DECLCALLBACK(int) vdIOGetSizeFallback(void *pvUser, void *pvStorage, uint64_t *pcbSize)
4033{
4034 RT_NOREF1(pvUser);
4035 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4036
4037 return RTFileGetSize(pStorage->File, pcbSize);
4038}
4039
4040/**
4041 * VD async I/O interface callback for setting the file size.
4042 */
4043static DECLCALLBACK(int) vdIOSetSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize)
4044{
4045 RT_NOREF1(pvUser);
4046 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4047
4048 return RTFileSetSize(pStorage->File, cbSize);
4049}
4050
4051/**
4052 * VD async I/O interface callback for setting the file allocation size.
4053 */
4054static DECLCALLBACK(int) vdIOSetAllocationSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize,
4055 uint32_t fFlags)
4056{
4057 RT_NOREF2(pvUser, fFlags);
4058 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4059
4060 return RTFileSetAllocationSize(pStorage->File, cbSize, RTFILE_ALLOC_SIZE_F_DEFAULT);
4061}
4062
4063/**
4064 * VD async I/O interface callback for a synchronous write to the file.
4065 */
4066static DECLCALLBACK(int) vdIOWriteSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4067 const void *pvBuf, size_t cbWrite, size_t *pcbWritten)
4068{
4069 RT_NOREF1(pvUser);
4070 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4071
4072 return RTFileWriteAt(pStorage->File, uOffset, pvBuf, cbWrite, pcbWritten);
4073}
4074
4075/**
4076 * VD async I/O interface callback for a synchronous read from the file.
4077 */
4078static DECLCALLBACK(int) vdIOReadSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4079 void *pvBuf, size_t cbRead, size_t *pcbRead)
4080{
4081 RT_NOREF1(pvUser);
4082 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4083
4084 return RTFileReadAt(pStorage->File, uOffset, pvBuf, cbRead, pcbRead);
4085}
4086
4087/**
4088 * VD async I/O interface callback for a synchronous flush of the file data.
4089 */
4090static DECLCALLBACK(int) vdIOFlushSyncFallback(void *pvUser, void *pvStorage)
4091{
4092 RT_NOREF1(pvUser);
4093 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4094
4095 return RTFileFlush(pStorage->File);
4096}
4097
4098/**
4099 * VD async I/O interface callback for a asynchronous read from the file.
4100 */
4101static DECLCALLBACK(int) vdIOReadAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4102 PCRTSGSEG paSegments, size_t cSegments,
4103 size_t cbRead, void *pvCompletion,
4104 void **ppTask)
4105{
4106 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbRead, pvCompletion, ppTask);
4107 return VERR_NOT_IMPLEMENTED;
4108}
4109
4110/**
4111 * VD async I/O interface callback for a asynchronous write to the file.
4112 */
4113static DECLCALLBACK(int) vdIOWriteAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4114 PCRTSGSEG paSegments, size_t cSegments,
4115 size_t cbWrite, void *pvCompletion,
4116 void **ppTask)
4117{
4118 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbWrite, pvCompletion, ppTask);
4119 return VERR_NOT_IMPLEMENTED;
4120}
4121
4122/**
4123 * VD async I/O interface callback for a asynchronous flush of the file data.
4124 */
4125static DECLCALLBACK(int) vdIOFlushAsyncFallback(void *pvUser, void *pStorage,
4126 void *pvCompletion, void **ppTask)
4127{
4128 RT_NOREF4(pvUser, pStorage, pvCompletion, ppTask);
4129 return VERR_NOT_IMPLEMENTED;
4130}
4131
4132/**
4133 * Internal - Continues an I/O context after
4134 * it was halted because of an active transfer.
4135 */
4136static int vdIoCtxContinue(PVDIOCTX pIoCtx, int rcReq)
4137{
4138 PVBOXHDD pDisk = pIoCtx->pDisk;
4139 int rc = VINF_SUCCESS;
4140
4141 VD_IS_LOCKED(pDisk);
4142
4143 if (RT_FAILURE(rcReq))
4144 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
4145
4146 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
4147 {
4148 /* Continue the transfer */
4149 rc = vdIoCtxProcessLocked(pIoCtx);
4150
4151 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4152 && ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
4153 {
4154 LogFlowFunc(("I/O context completed pIoCtx=%#p\n", pIoCtx));
4155 if (pIoCtx->pIoCtxParent)
4156 {
4157 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
4158
4159 Assert(!pIoCtxParent->pIoCtxParent);
4160 if (RT_FAILURE(pIoCtx->rcReq))
4161 ASMAtomicCmpXchgS32(&pIoCtxParent->rcReq, pIoCtx->rcReq, VINF_SUCCESS);
4162
4163 ASMAtomicDecU32(&pIoCtxParent->cDataTransfersPending);
4164
4165 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE)
4166 {
4167 LogFlowFunc(("I/O context transferred %u bytes for the parent pIoCtxParent=%p\n",
4168 pIoCtx->Type.Child.cbTransferParent, pIoCtxParent));
4169
4170 /* Update the parent state. */
4171 Assert(pIoCtxParent->Req.Io.cbTransferLeft >= pIoCtx->Type.Child.cbTransferParent);
4172 ASMAtomicSubU32(&pIoCtxParent->Req.Io.cbTransferLeft, (uint32_t)pIoCtx->Type.Child.cbTransferParent);
4173 }
4174 else
4175 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH);
4176
4177 /*
4178 * A completed child write means that we finished growing the image.
4179 * We have to process any pending writes now.
4180 */
4181 vdIoCtxUnlockDisk(pDisk, pIoCtxParent, false /* fProcessDeferredReqs */);
4182
4183 /* Unblock the parent */
4184 pIoCtxParent->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4185
4186 rc = vdIoCtxProcessLocked(pIoCtxParent);
4187
4188 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4189 && ASMAtomicCmpXchgBool(&pIoCtxParent->fComplete, true, false))
4190 {
4191 LogFlowFunc(("Parent I/O context completed pIoCtxParent=%#p rcReq=%Rrc\n", pIoCtxParent, pIoCtxParent->rcReq));
4192 vdIoCtxRootComplete(pDisk, pIoCtxParent);
4193 vdThreadFinishWrite(pDisk);
4194 vdIoCtxFree(pDisk, pIoCtxParent);
4195 vdDiskProcessBlockedIoCtx(pDisk);
4196 }
4197 else if (!vdIoCtxIsDiskLockOwner(pDisk, pIoCtx))
4198 {
4199 /* Process any pending writes if the current request didn't caused another growing. */
4200 vdDiskProcessBlockedIoCtx(pDisk);
4201 }
4202 }
4203 else
4204 {
4205 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH)
4206 {
4207 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDerredReqs */);
4208 vdThreadFinishWrite(pDisk);
4209 }
4210 else if ( pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE
4211 || pIoCtx->enmTxDir == VDIOCTXTXDIR_DISCARD)
4212 vdThreadFinishWrite(pDisk);
4213 else
4214 {
4215 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_READ);
4216 vdThreadFinishRead(pDisk);
4217 }
4218
4219 LogFlowFunc(("I/O context completed pIoCtx=%#p rcReq=%Rrc\n", pIoCtx, pIoCtx->rcReq));
4220 vdIoCtxRootComplete(pDisk, pIoCtx);
4221 }
4222
4223 vdIoCtxFree(pDisk, pIoCtx);
4224 }
4225 }
4226
4227 return VINF_SUCCESS;
4228}
4229
4230/**
4231 * Internal - Called when user transfer completed.
4232 */
4233static int vdUserXferCompleted(PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
4234 PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4235 size_t cbTransfer, int rcReq)
4236{
4237 int rc = VINF_SUCCESS;
4238 PVBOXHDD pDisk = pIoCtx->pDisk;
4239
4240 LogFlowFunc(("pIoStorage=%#p pIoCtx=%#p pfnComplete=%#p pvUser=%#p cbTransfer=%zu rcReq=%Rrc\n",
4241 pIoStorage, pIoCtx, pfnComplete, pvUser, cbTransfer, rcReq));
4242
4243 VD_IS_LOCKED(pDisk);
4244
4245 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbTransfer);
4246 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTransfer); Assert(cbTransfer == (uint32_t)cbTransfer);
4247 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4248
4249 if (pfnComplete)
4250 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4251
4252 if (RT_SUCCESS(rc))
4253 rc = vdIoCtxContinue(pIoCtx, rcReq);
4254 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4255 rc = VINF_SUCCESS;
4256
4257 return rc;
4258}
4259
4260static void vdIoCtxContinueDeferredList(PVDIOSTORAGE pIoStorage, PRTLISTANCHOR pListWaiting,
4261 PFNVDXFERCOMPLETED pfnComplete, void *pvUser, int rcReq)
4262{
4263 LogFlowFunc(("pIoStorage=%#p pListWaiting=%#p pfnComplete=%#p pvUser=%#p rcReq=%Rrc\n",
4264 pIoStorage, pListWaiting, pfnComplete, pvUser, rcReq));
4265
4266 /* Go through the waiting list and continue the I/O contexts. */
4267 while (!RTListIsEmpty(pListWaiting))
4268 {
4269 int rc = VINF_SUCCESS;
4270 PVDIOCTXDEFERRED pDeferred = RTListGetFirst(pListWaiting, VDIOCTXDEFERRED, NodeDeferred);
4271 PVDIOCTX pIoCtx = pDeferred->pIoCtx;
4272 RTListNodeRemove(&pDeferred->NodeDeferred);
4273
4274 RTMemFree(pDeferred);
4275 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
4276
4277 if (pfnComplete)
4278 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4279
4280 LogFlow(("Completion callback for I/O context %#p returned %Rrc\n", pIoCtx, rc));
4281
4282 if (RT_SUCCESS(rc))
4283 {
4284 rc = vdIoCtxContinue(pIoCtx, rcReq);
4285 AssertRC(rc);
4286 }
4287 else
4288 Assert(rc == VERR_VD_ASYNC_IO_IN_PROGRESS);
4289 }
4290}
4291
4292/**
4293 * Internal - Called when a meta transfer completed.
4294 */
4295static int vdMetaXferCompleted(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4296 PVDMETAXFER pMetaXfer, int rcReq)
4297{
4298 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4299 RTLISTNODE ListIoCtxWaiting;
4300 bool fFlush;
4301
4302 LogFlowFunc(("pIoStorage=%#p pfnComplete=%#p pvUser=%#p pMetaXfer=%#p rcReq=%Rrc\n",
4303 pIoStorage, pfnComplete, pvUser, pMetaXfer, rcReq));
4304
4305 VD_IS_LOCKED(pDisk);
4306
4307 fFlush = VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_FLUSH;
4308
4309 if (!fFlush)
4310 {
4311 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4312
4313 if (RT_FAILURE(rcReq))
4314 {
4315 /* Remove from the AVL tree. */
4316 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4317 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4318 Assert(fRemoved); NOREF(fRemoved);
4319 /* If this was a write check if there is a shadow buffer with updated data. */
4320 if (pMetaXfer->pbDataShw)
4321 {
4322 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
4323 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4324 RTListConcatenate(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4325 RTMemFree(pMetaXfer->pbDataShw);
4326 pMetaXfer->pbDataShw = NULL;
4327 }
4328 RTMemFree(pMetaXfer);
4329 }
4330 else
4331 {
4332 /* Increase the reference counter to make sure it doesn't go away before the last context is processed. */
4333 pMetaXfer->cRefs++;
4334 }
4335 }
4336 else
4337 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4338
4339 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4340 vdIoCtxContinueDeferredList(pIoStorage, &ListIoCtxWaiting, pfnComplete, pvUser, rcReq);
4341
4342 /*
4343 * If there is a shadow buffer and the previous write was successful update with the
4344 * new data and trigger a new write.
4345 */
4346 if ( pMetaXfer->pbDataShw
4347 && RT_SUCCESS(rcReq)
4348 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
4349 {
4350 LogFlowFunc(("pMetaXfer=%#p Updating from shadow buffer and triggering new write\n", pMetaXfer));
4351 memcpy(pMetaXfer->abData, pMetaXfer->pbDataShw, pMetaXfer->cbMeta);
4352 RTMemFree(pMetaXfer->pbDataShw);
4353 pMetaXfer->pbDataShw = NULL;
4354 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4355
4356 /* Setup a new I/O write. */
4357 PVDIOTASK pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
4358 if (RT_LIKELY(pIoTask))
4359 {
4360 void *pvTask = NULL;
4361 RTSGSEG Seg;
4362
4363 Seg.cbSeg = pMetaXfer->cbMeta;
4364 Seg.pvSeg = pMetaXfer->abData;
4365
4366 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
4367 rcReq = pIoStorage->pVDIo->pInterfaceIo->pfnWriteAsync(pIoStorage->pVDIo->pInterfaceIo->Core.pvUser,
4368 pIoStorage->pStorage,
4369 pMetaXfer->Core.Key, &Seg, 1,
4370 pMetaXfer->cbMeta, pIoTask,
4371 &pvTask);
4372 if ( RT_SUCCESS(rcReq)
4373 || rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4374 {
4375 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4376 vdIoTaskFree(pDisk, pIoTask);
4377 }
4378 else
4379 RTListMove(&pMetaXfer->ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4380 }
4381 else
4382 rcReq = VERR_NO_MEMORY;
4383
4384 /* Cleanup if there was an error or the request completed already. */
4385 if (rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4386 vdIoCtxContinueDeferredList(pIoStorage, &pMetaXfer->ListIoCtxShwWrites, pfnComplete, pvUser, rcReq);
4387 }
4388
4389 /* Remove if not used anymore. */
4390 if (!fFlush)
4391 {
4392 pMetaXfer->cRefs--;
4393 if (!pMetaXfer->cRefs && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting))
4394 {
4395 /* Remove from the AVL tree. */
4396 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4397 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4398 Assert(fRemoved); NOREF(fRemoved);
4399 RTMemFree(pMetaXfer);
4400 }
4401 }
4402 else if (fFlush)
4403 RTMemFree(pMetaXfer);
4404
4405 return VINF_SUCCESS;
4406}
4407
4408/**
4409 * Processes a list of waiting I/O tasks. The disk lock must be held by caller.
4410 *
4411 * @returns nothing.
4412 * @param pDisk The disk to process the list for.
4413 */
4414static void vdIoTaskProcessWaitingList(PVBOXHDD pDisk)
4415{
4416 LogFlowFunc(("pDisk=%#p\n", pDisk));
4417
4418 VD_IS_LOCKED(pDisk);
4419
4420 PVDIOTASK pHead = ASMAtomicXchgPtrT(&pDisk->pIoTasksPendingHead, NULL, PVDIOTASK);
4421
4422 Log(("I/O task list cleared\n"));
4423
4424 /* Reverse order. */
4425 PVDIOTASK pCur = pHead;
4426 pHead = NULL;
4427 while (pCur)
4428 {
4429 PVDIOTASK pInsert = pCur;
4430 pCur = pCur->pNext;
4431 pInsert->pNext = pHead;
4432 pHead = pInsert;
4433 }
4434
4435 while (pHead)
4436 {
4437 PVDIOSTORAGE pIoStorage = pHead->pIoStorage;
4438
4439 if (!pHead->fMeta)
4440 vdUserXferCompleted(pIoStorage, pHead->Type.User.pIoCtx,
4441 pHead->pfnComplete, pHead->pvUser,
4442 pHead->Type.User.cbTransfer, pHead->rcReq);
4443 else
4444 vdMetaXferCompleted(pIoStorage, pHead->pfnComplete, pHead->pvUser,
4445 pHead->Type.Meta.pMetaXfer, pHead->rcReq);
4446
4447 pCur = pHead;
4448 pHead = pHead->pNext;
4449 vdIoTaskFree(pDisk, pCur);
4450 }
4451}
4452
4453/**
4454 * Process any I/O context on the halted list.
4455 *
4456 * @returns nothing.
4457 * @param pDisk The disk.
4458 */
4459static void vdIoCtxProcessHaltedList(PVBOXHDD pDisk)
4460{
4461 LogFlowFunc(("pDisk=%#p\n", pDisk));
4462
4463 VD_IS_LOCKED(pDisk);
4464
4465 /* Get the waiting list and process it in FIFO order. */
4466 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHaltedHead, NULL, PVDIOCTX);
4467
4468 /* Reverse it. */
4469 PVDIOCTX pCur = pIoCtxHead;
4470 pIoCtxHead = NULL;
4471 while (pCur)
4472 {
4473 PVDIOCTX pInsert = pCur;
4474 pCur = pCur->pIoCtxNext;
4475 pInsert->pIoCtxNext = pIoCtxHead;
4476 pIoCtxHead = pInsert;
4477 }
4478
4479 /* Process now. */
4480 pCur = pIoCtxHead;
4481 while (pCur)
4482 {
4483 PVDIOCTX pTmp = pCur;
4484
4485 pCur = pCur->pIoCtxNext;
4486 pTmp->pIoCtxNext = NULL;
4487
4488 /* Continue */
4489 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4490 vdIoCtxContinue(pTmp, pTmp->rcReq);
4491 }
4492}
4493
4494/**
4495 * Unlock the disk and process pending tasks.
4496 *
4497 * @returns VBox status code.
4498 * @param pDisk The disk to unlock.
4499 * @param pIoCtxRc The I/O context to get the status code from, optional.
4500 */
4501static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
4502{
4503 int rc = VINF_SUCCESS;
4504
4505 VD_IS_LOCKED(pDisk);
4506
4507 /*
4508 * Process the list of waiting I/O tasks first
4509 * because they might complete I/O contexts.
4510 * Same for the list of halted I/O contexts.
4511 * Afterwards comes the list of new I/O contexts.
4512 */
4513 vdIoTaskProcessWaitingList(pDisk);
4514 vdIoCtxProcessHaltedList(pDisk);
4515 rc = vdDiskProcessWaitingIoCtx(pDisk, pIoCtxRc);
4516 ASMAtomicXchgBool(&pDisk->fLocked, false);
4517
4518 /*
4519 * Need to check for new I/O tasks and waiting I/O contexts now
4520 * again as other threads might added them while we processed
4521 * previous lists.
4522 */
4523 while ( ASMAtomicUoReadPtrT(&pDisk->pIoCtxHead, PVDIOCTX) != NULL
4524 || ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK) != NULL
4525 || ASMAtomicUoReadPtrT(&pDisk->pIoCtxHaltedHead, PVDIOCTX) != NULL)
4526 {
4527 /* Try lock disk again. */
4528 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4529 {
4530 vdIoTaskProcessWaitingList(pDisk);
4531 vdIoCtxProcessHaltedList(pDisk);
4532 vdDiskProcessWaitingIoCtx(pDisk, NULL);
4533 ASMAtomicXchgBool(&pDisk->fLocked, false);
4534 }
4535 else /* Let the other thread everything when he unlocks the disk. */
4536 break;
4537 }
4538
4539 return rc;
4540}
4541
4542/**
4543 * Try to lock the disk to complete pressing of the I/O task.
4544 * The completion is deferred if the disk is locked already.
4545 *
4546 * @returns nothing.
4547 * @param pIoTask The I/O task to complete.
4548 */
4549static void vdXferTryLockDiskDeferIoTask(PVDIOTASK pIoTask)
4550{
4551 PVDIOSTORAGE pIoStorage = pIoTask->pIoStorage;
4552 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4553
4554 Log(("Deferring I/O task pIoTask=%p\n", pIoTask));
4555
4556 /* Put it on the waiting list. */
4557 PVDIOTASK pNext = ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK);
4558 PVDIOTASK pHeadOld;
4559 pIoTask->pNext = pNext;
4560 while (!ASMAtomicCmpXchgExPtr(&pDisk->pIoTasksPendingHead, pIoTask, pNext, &pHeadOld))
4561 {
4562 pNext = pHeadOld;
4563 Assert(pNext != pIoTask);
4564 pIoTask->pNext = pNext;
4565 ASMNopPause();
4566 }
4567
4568 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4569 {
4570 /* Release disk lock, it will take care of processing all lists. */
4571 vdDiskUnlock(pDisk, NULL);
4572 }
4573}
4574
4575static DECLCALLBACK(int) vdIOIntReqCompleted(void *pvUser, int rcReq)
4576{
4577 PVDIOTASK pIoTask = (PVDIOTASK)pvUser;
4578
4579 LogFlowFunc(("Task completed pIoTask=%#p\n", pIoTask));
4580
4581 pIoTask->rcReq = rcReq;
4582 vdXferTryLockDiskDeferIoTask(pIoTask);
4583 return VINF_SUCCESS;
4584}
4585
4586/**
4587 * VD I/O interface callback for opening a file.
4588 */
4589static DECLCALLBACK(int) vdIOIntOpen(void *pvUser, const char *pszLocation,
4590 unsigned uOpenFlags, PPVDIOSTORAGE ppIoStorage)
4591{
4592 int rc = VINF_SUCCESS;
4593 PVDIO pVDIo = (PVDIO)pvUser;
4594 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
4595
4596 if (!pIoStorage)
4597 return VERR_NO_MEMORY;
4598
4599 /* Create the AVl tree. */
4600 pIoStorage->pTreeMetaXfers = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
4601 if (pIoStorage->pTreeMetaXfers)
4602 {
4603 rc = pVDIo->pInterfaceIo->pfnOpen(pVDIo->pInterfaceIo->Core.pvUser,
4604 pszLocation, uOpenFlags,
4605 vdIOIntReqCompleted,
4606 &pIoStorage->pStorage);
4607 if (RT_SUCCESS(rc))
4608 {
4609 pIoStorage->pVDIo = pVDIo;
4610 *ppIoStorage = pIoStorage;
4611 return VINF_SUCCESS;
4612 }
4613
4614 RTMemFree(pIoStorage->pTreeMetaXfers);
4615 }
4616 else
4617 rc = VERR_NO_MEMORY;
4618
4619 RTMemFree(pIoStorage);
4620 return rc;
4621}
4622
4623static DECLCALLBACK(int) vdIOIntTreeMetaXferDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
4624{
4625 RT_NOREF2(pNode, pvUser);
4626 AssertMsgFailed(("Tree should be empty at this point!\n"));
4627 return VINF_SUCCESS;
4628}
4629
4630static DECLCALLBACK(int) vdIOIntClose(void *pvUser, PVDIOSTORAGE pIoStorage)
4631{
4632 int rc = VINF_SUCCESS;
4633 PVDIO pVDIo = (PVDIO)pvUser;
4634
4635 /* We free everything here, even if closing the file failed for some reason. */
4636 rc = pVDIo->pInterfaceIo->pfnClose(pVDIo->pInterfaceIo->Core.pvUser, pIoStorage->pStorage);
4637 RTAvlrFileOffsetDestroy(pIoStorage->pTreeMetaXfers, vdIOIntTreeMetaXferDestroy, NULL);
4638 RTMemFree(pIoStorage->pTreeMetaXfers);
4639 RTMemFree(pIoStorage);
4640 return rc;
4641}
4642
4643static DECLCALLBACK(int) vdIOIntDelete(void *pvUser, const char *pcszFilename)
4644{
4645 PVDIO pVDIo = (PVDIO)pvUser;
4646 return pVDIo->pInterfaceIo->pfnDelete(pVDIo->pInterfaceIo->Core.pvUser,
4647 pcszFilename);
4648}
4649
4650static DECLCALLBACK(int) vdIOIntMove(void *pvUser, const char *pcszSrc, const char *pcszDst,
4651 unsigned fMove)
4652{
4653 PVDIO pVDIo = (PVDIO)pvUser;
4654 return pVDIo->pInterfaceIo->pfnMove(pVDIo->pInterfaceIo->Core.pvUser,
4655 pcszSrc, pcszDst, fMove);
4656}
4657
4658static DECLCALLBACK(int) vdIOIntGetFreeSpace(void *pvUser, const char *pcszFilename,
4659 int64_t *pcbFreeSpace)
4660{
4661 PVDIO pVDIo = (PVDIO)pvUser;
4662 return pVDIo->pInterfaceIo->pfnGetFreeSpace(pVDIo->pInterfaceIo->Core.pvUser,
4663 pcszFilename, pcbFreeSpace);
4664}
4665
4666static DECLCALLBACK(int) vdIOIntGetModificationTime(void *pvUser, const char *pcszFilename,
4667 PRTTIMESPEC pModificationTime)
4668{
4669 PVDIO pVDIo = (PVDIO)pvUser;
4670 return pVDIo->pInterfaceIo->pfnGetModificationTime(pVDIo->pInterfaceIo->Core.pvUser,
4671 pcszFilename, pModificationTime);
4672}
4673
4674static DECLCALLBACK(int) vdIOIntGetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4675 uint64_t *pcbSize)
4676{
4677 PVDIO pVDIo = (PVDIO)pvUser;
4678 return pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4679 pIoStorage->pStorage, pcbSize);
4680}
4681
4682static DECLCALLBACK(int) vdIOIntSetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4683 uint64_t cbSize)
4684{
4685 PVDIO pVDIo = (PVDIO)pvUser;
4686 return pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4687 pIoStorage->pStorage, cbSize);
4688}
4689
4690static DECLCALLBACK(int) vdIOIntSetAllocationSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4691 uint64_t cbSize, uint32_t fFlags,
4692 PVDINTERFACEPROGRESS pIfProgress,
4693 unsigned uPercentStart, unsigned uPercentSpan)
4694{
4695 PVDIO pVDIo = (PVDIO)pvUser;
4696 int rc = pVDIo->pInterfaceIo->pfnSetAllocationSize(pVDIo->pInterfaceIo->Core.pvUser,
4697 pIoStorage->pStorage, cbSize, fFlags);
4698 if (rc == VERR_NOT_SUPPORTED)
4699 {
4700 /* Fallback if the underlying medium does not support optimized storage allocation. */
4701 uint64_t cbSizeCur = 0;
4702 rc = pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4703 pIoStorage->pStorage, &cbSizeCur);
4704 if (RT_SUCCESS(rc))
4705 {
4706 if (cbSizeCur < cbSize)
4707 {
4708 const size_t cbBuf = 128 * _1K;
4709 void *pvBuf = RTMemTmpAllocZ(cbBuf);
4710 if (RT_LIKELY(pvBuf))
4711 {
4712 uint64_t cbFill = cbSize - cbSizeCur;
4713 uint64_t uOff = 0;
4714
4715 /* Write data to all blocks. */
4716 while ( uOff < cbFill
4717 && RT_SUCCESS(rc))
4718 {
4719 size_t cbChunk = (size_t)RT_MIN(cbFill - uOff, cbBuf);
4720
4721 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4722 pIoStorage->pStorage, cbSizeCur + uOff,
4723 pvBuf, cbChunk, NULL);
4724 if (RT_SUCCESS(rc))
4725 {
4726 uOff += cbChunk;
4727
4728 rc = vdIfProgress(pIfProgress, uPercentStart + uOff * uPercentSpan / cbFill);
4729 }
4730 }
4731
4732 RTMemTmpFree(pvBuf);
4733 }
4734 else
4735 rc = VERR_NO_MEMORY;
4736 }
4737 else if (cbSizeCur > cbSize)
4738 rc = pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4739 pIoStorage->pStorage, cbSize);
4740 }
4741 }
4742
4743 if (RT_SUCCESS(rc))
4744 rc = vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4745
4746 return rc;
4747}
4748
4749static DECLCALLBACK(int) vdIOIntReadUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4750 PVDIOCTX pIoCtx, size_t cbRead)
4751{
4752 int rc = VINF_SUCCESS;
4753 PVDIO pVDIo = (PVDIO)pvUser;
4754 PVBOXHDD pDisk = pVDIo->pDisk;
4755
4756 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbRead=%u\n",
4757 pvUser, pIoStorage, uOffset, pIoCtx, cbRead));
4758
4759 /** @todo Enable check for sync I/O later. */
4760 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4761 VD_IS_LOCKED(pDisk);
4762
4763 Assert(cbRead > 0);
4764
4765 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4766 {
4767 RTSGSEG Seg;
4768 unsigned cSegments = 1;
4769 size_t cbTaskRead = 0;
4770
4771 /* Synchronous I/O contexts only have one buffer segment. */
4772 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4773 ("Invalid number of buffer segments for synchronous I/O context"),
4774 VERR_INVALID_PARAMETER);
4775
4776 cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbRead);
4777 Assert(cbRead == cbTaskRead);
4778 Assert(cSegments == 1);
4779 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4780 pIoStorage->pStorage, uOffset,
4781 Seg.pvSeg, cbRead, NULL);
4782 if (RT_SUCCESS(rc))
4783 {
4784 Assert(cbRead == (uint32_t)cbRead);
4785 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbRead);
4786 }
4787 }
4788 else
4789 {
4790 /* Build the S/G array and spawn a new I/O task */
4791 while (cbRead)
4792 {
4793 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4794 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4795 size_t cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbRead);
4796
4797 Assert(cSegments > 0);
4798 Assert(cbTaskRead > 0);
4799 AssertMsg(cbTaskRead <= cbRead, ("Invalid number of bytes to read\n"));
4800
4801 LogFlow(("Reading %u bytes into %u segments\n", cbTaskRead, cSegments));
4802
4803#ifdef RT_STRICT
4804 for (unsigned i = 0; i < cSegments; i++)
4805 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4806 ("Segment %u is invalid\n", i));
4807#endif
4808
4809 Assert(cbTaskRead == (uint32_t)cbTaskRead);
4810 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, NULL, NULL, pIoCtx, (uint32_t)cbTaskRead);
4811
4812 if (!pIoTask)
4813 return VERR_NO_MEMORY;
4814
4815 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4816
4817 void *pvTask;
4818 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4819 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
4820 pIoStorage->pStorage, uOffset,
4821 aSeg, cSegments, cbTaskRead, pIoTask,
4822 &pvTask);
4823 if (RT_SUCCESS(rc))
4824 {
4825 AssertMsg(cbTaskRead <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4826 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskRead);
4827 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4828 vdIoTaskFree(pDisk, pIoTask);
4829 }
4830 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4831 {
4832 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4833 vdIoTaskFree(pDisk, pIoTask);
4834 break;
4835 }
4836
4837 uOffset += cbTaskRead;
4838 cbRead -= cbTaskRead;
4839 }
4840 }
4841
4842 LogFlowFunc(("returns rc=%Rrc\n", rc));
4843 return rc;
4844}
4845
4846static DECLCALLBACK(int) vdIOIntWriteUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4847 PVDIOCTX pIoCtx, size_t cbWrite, PFNVDXFERCOMPLETED pfnComplete,
4848 void *pvCompleteUser)
4849{
4850 int rc = VINF_SUCCESS;
4851 PVDIO pVDIo = (PVDIO)pvUser;
4852 PVBOXHDD pDisk = pVDIo->pDisk;
4853
4854 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbWrite=%u\n",
4855 pvUser, pIoStorage, uOffset, pIoCtx, cbWrite));
4856
4857 /** @todo Enable check for sync I/O later. */
4858 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4859 VD_IS_LOCKED(pDisk);
4860
4861 Assert(cbWrite > 0);
4862
4863 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4864 {
4865 RTSGSEG Seg;
4866 unsigned cSegments = 1;
4867 size_t cbTaskWrite = 0;
4868
4869 /* Synchronous I/O contexts only have one buffer segment. */
4870 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4871 ("Invalid number of buffer segments for synchronous I/O context"),
4872 VERR_INVALID_PARAMETER);
4873
4874 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbWrite);
4875 Assert(cbWrite == cbTaskWrite);
4876 Assert(cSegments == 1);
4877 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4878 pIoStorage->pStorage, uOffset,
4879 Seg.pvSeg, cbWrite, NULL);
4880 if (RT_SUCCESS(rc))
4881 {
4882 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbWrite);
4883 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbWrite);
4884 }
4885 }
4886 else
4887 {
4888 /* Build the S/G array and spawn a new I/O task */
4889 while (cbWrite)
4890 {
4891 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4892 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4893 size_t cbTaskWrite = 0;
4894
4895 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbWrite);
4896
4897 Assert(cSegments > 0);
4898 Assert(cbTaskWrite > 0);
4899 AssertMsg(cbTaskWrite <= cbWrite, ("Invalid number of bytes to write\n"));
4900
4901 LogFlow(("Writing %u bytes from %u segments\n", cbTaskWrite, cSegments));
4902
4903#ifdef DEBUG
4904 for (unsigned i = 0; i < cSegments; i++)
4905 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4906 ("Segment %u is invalid\n", i));
4907#endif
4908
4909 Assert(cbTaskWrite == (uint32_t)cbTaskWrite);
4910 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, pfnComplete, pvCompleteUser, pIoCtx, (uint32_t)cbTaskWrite);
4911
4912 if (!pIoTask)
4913 return VERR_NO_MEMORY;
4914
4915 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4916
4917 void *pvTask;
4918 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4919 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
4920 pIoStorage->pStorage,
4921 uOffset, aSeg, cSegments,
4922 cbTaskWrite, pIoTask, &pvTask);
4923 if (RT_SUCCESS(rc))
4924 {
4925 AssertMsg(cbTaskWrite <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4926 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskWrite);
4927 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4928 vdIoTaskFree(pDisk, pIoTask);
4929 }
4930 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4931 {
4932 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4933 vdIoTaskFree(pDisk, pIoTask);
4934 break;
4935 }
4936
4937 uOffset += cbTaskWrite;
4938 cbWrite -= cbTaskWrite;
4939 }
4940 }
4941
4942 LogFlowFunc(("returns rc=%Rrc\n", rc));
4943 return rc;
4944}
4945
4946static DECLCALLBACK(int) vdIOIntReadMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4947 void *pvBuf, size_t cbRead, PVDIOCTX pIoCtx,
4948 PPVDMETAXFER ppMetaXfer, PFNVDXFERCOMPLETED pfnComplete,
4949 void *pvCompleteUser)
4950{
4951 PVDIO pVDIo = (PVDIO)pvUser;
4952 PVBOXHDD pDisk = pVDIo->pDisk;
4953 int rc = VINF_SUCCESS;
4954 RTSGSEG Seg;
4955 PVDIOTASK pIoTask;
4956 PVDMETAXFER pMetaXfer = NULL;
4957 void *pvTask = NULL;
4958
4959 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbRead=%u\n",
4960 pvUser, pIoStorage, uOffset, pvBuf, cbRead));
4961
4962 AssertMsgReturn( pIoCtx
4963 || (!ppMetaXfer && !pfnComplete && !pvCompleteUser),
4964 ("A synchronous metadata read is requested but the parameters are wrong\n"),
4965 VERR_INVALID_POINTER);
4966
4967 /** @todo Enable check for sync I/O later. */
4968 if ( pIoCtx
4969 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4970 VD_IS_LOCKED(pDisk);
4971
4972 if ( !pIoCtx
4973 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4974 {
4975 /* Handle synchronous metadata I/O. */
4976 /** @todo Integrate with metadata transfers below. */
4977 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4978 pIoStorage->pStorage, uOffset,
4979 pvBuf, cbRead, NULL);
4980 if (ppMetaXfer)
4981 *ppMetaXfer = NULL;
4982 }
4983 else
4984 {
4985 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
4986 if (!pMetaXfer)
4987 {
4988#ifdef RT_STRICT
4989 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGetBestFit(pIoStorage->pTreeMetaXfers, uOffset, false /* fAbove */);
4990 AssertMsg(!pMetaXfer || (pMetaXfer->Core.Key + (RTFOFF)pMetaXfer->cbMeta <= (RTFOFF)uOffset),
4991 ("Overlapping meta transfers!\n"));
4992#endif
4993
4994 /* Allocate a new meta transfer. */
4995 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbRead);
4996 if (!pMetaXfer)
4997 return VERR_NO_MEMORY;
4998
4999 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5000 if (!pIoTask)
5001 {
5002 RTMemFree(pMetaXfer);
5003 return VERR_NO_MEMORY;
5004 }
5005
5006 Seg.cbSeg = cbRead;
5007 Seg.pvSeg = pMetaXfer->abData;
5008
5009 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_READ);
5010 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
5011 pIoStorage->pStorage,
5012 uOffset, &Seg, 1,
5013 cbRead, pIoTask, &pvTask);
5014
5015 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5016 {
5017 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5018 Assert(fInserted); NOREF(fInserted);
5019 }
5020 else
5021 RTMemFree(pMetaXfer);
5022
5023 if (RT_SUCCESS(rc))
5024 {
5025 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5026 vdIoTaskFree(pDisk, pIoTask);
5027 }
5028 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS && !pfnComplete)
5029 rc = VERR_VD_NOT_ENOUGH_METADATA;
5030 }
5031
5032 Assert(VALID_PTR(pMetaXfer) || RT_FAILURE(rc));
5033
5034 if (RT_SUCCESS(rc) || rc == VERR_VD_NOT_ENOUGH_METADATA || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5035 {
5036 /* If it is pending add the request to the list. */
5037 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_READ)
5038 {
5039 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5040 AssertPtr(pDeferred);
5041
5042 RTListInit(&pDeferred->NodeDeferred);
5043 pDeferred->pIoCtx = pIoCtx;
5044
5045 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5046 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5047 rc = VERR_VD_NOT_ENOUGH_METADATA;
5048 }
5049 else
5050 {
5051 /* Transfer the data. */
5052 pMetaXfer->cRefs++;
5053 Assert(pMetaXfer->cbMeta >= cbRead);
5054 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5055 if (pMetaXfer->pbDataShw)
5056 memcpy(pvBuf, pMetaXfer->pbDataShw, cbRead);
5057 else
5058 memcpy(pvBuf, pMetaXfer->abData, cbRead);
5059 *ppMetaXfer = pMetaXfer;
5060 }
5061 }
5062 }
5063
5064 LogFlowFunc(("returns rc=%Rrc\n", rc));
5065 return rc;
5066}
5067
5068static DECLCALLBACK(int) vdIOIntWriteMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
5069 const void *pvBuf, size_t cbWrite, PVDIOCTX pIoCtx,
5070 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5071{
5072 PVDIO pVDIo = (PVDIO)pvUser;
5073 PVBOXHDD pDisk = pVDIo->pDisk;
5074 int rc = VINF_SUCCESS;
5075 RTSGSEG Seg;
5076 PVDIOTASK pIoTask;
5077 PVDMETAXFER pMetaXfer = NULL;
5078 bool fInTree = false;
5079 void *pvTask = NULL;
5080
5081 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbWrite=%u\n",
5082 pvUser, pIoStorage, uOffset, pvBuf, cbWrite));
5083
5084 AssertMsgReturn( pIoCtx
5085 || (!pfnComplete && !pvCompleteUser),
5086 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5087 VERR_INVALID_POINTER);
5088
5089 /** @todo Enable check for sync I/O later. */
5090 if ( pIoCtx
5091 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5092 VD_IS_LOCKED(pDisk);
5093
5094 if ( !pIoCtx
5095 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5096 {
5097 /* Handle synchronous metadata I/O. */
5098 /** @todo Integrate with metadata transfers below. */
5099 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
5100 pIoStorage->pStorage, uOffset,
5101 pvBuf, cbWrite, NULL);
5102 }
5103 else
5104 {
5105 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
5106 if (!pMetaXfer)
5107 {
5108 /* Allocate a new meta transfer. */
5109 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbWrite);
5110 if (!pMetaXfer)
5111 return VERR_NO_MEMORY;
5112 }
5113 else
5114 {
5115 Assert(pMetaXfer->cbMeta >= cbWrite);
5116 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5117 fInTree = true;
5118 }
5119
5120 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5121 {
5122 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5123 if (!pIoTask)
5124 {
5125 RTMemFree(pMetaXfer);
5126 return VERR_NO_MEMORY;
5127 }
5128
5129 memcpy(pMetaXfer->abData, pvBuf, cbWrite);
5130 Seg.cbSeg = cbWrite;
5131 Seg.pvSeg = pMetaXfer->abData;
5132
5133 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5134
5135 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
5136 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
5137 pIoStorage->pStorage,
5138 uOffset, &Seg, 1, cbWrite, pIoTask,
5139 &pvTask);
5140 if (RT_SUCCESS(rc))
5141 {
5142 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5143 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5144 vdIoTaskFree(pDisk, pIoTask);
5145 if (fInTree && !pMetaXfer->cRefs)
5146 {
5147 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5148 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5149 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5150 RTMemFree(pMetaXfer);
5151 pMetaXfer = NULL;
5152 }
5153 }
5154 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5155 {
5156 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5157 AssertPtr(pDeferred);
5158
5159 RTListInit(&pDeferred->NodeDeferred);
5160 pDeferred->pIoCtx = pIoCtx;
5161
5162 if (!fInTree)
5163 {
5164 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5165 Assert(fInserted); NOREF(fInserted);
5166 }
5167
5168 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5169 }
5170 else
5171 {
5172 RTMemFree(pMetaXfer);
5173 pMetaXfer = NULL;
5174 }
5175 }
5176 else
5177 {
5178 /* I/O is in progress, update shadow buffer and add to waiting list. */
5179 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5180 if (!pMetaXfer->pbDataShw)
5181 {
5182 /* Allocate shadow buffer and set initial state. */
5183 LogFlowFunc(("pMetaXfer=%#p Creating shadow buffer\n", pMetaXfer));
5184 pMetaXfer->pbDataShw = (uint8_t *)RTMemAlloc(pMetaXfer->cbMeta);
5185 if (RT_LIKELY(pMetaXfer->pbDataShw))
5186 memcpy(pMetaXfer->pbDataShw, pMetaXfer->abData, pMetaXfer->cbMeta);
5187 else
5188 rc = VERR_NO_MEMORY;
5189 }
5190
5191 if (RT_SUCCESS(rc))
5192 {
5193 /* Update with written data and append to waiting list. */
5194 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5195 if (pDeferred)
5196 {
5197 LogFlowFunc(("pMetaXfer=%#p Updating shadow buffer\n", pMetaXfer));
5198
5199 RTListInit(&pDeferred->NodeDeferred);
5200 pDeferred->pIoCtx = pIoCtx;
5201 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5202 memcpy(pMetaXfer->pbDataShw, pvBuf, cbWrite);
5203 RTListAppend(&pMetaXfer->ListIoCtxShwWrites, &pDeferred->NodeDeferred);
5204 }
5205 else
5206 {
5207 /*
5208 * Free shadow buffer if there is no one depending on it, i.e.
5209 * we just allocated it.
5210 */
5211 if (RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites))
5212 {
5213 RTMemFree(pMetaXfer->pbDataShw);
5214 pMetaXfer->pbDataShw = NULL;
5215 }
5216 rc = VERR_NO_MEMORY;
5217 }
5218 }
5219 }
5220 }
5221
5222 LogFlowFunc(("returns rc=%Rrc\n", rc));
5223 return rc;
5224}
5225
5226static DECLCALLBACK(void) vdIOIntMetaXferRelease(void *pvUser, PVDMETAXFER pMetaXfer)
5227{
5228 PVDIO pVDIo = (PVDIO)pvUser;
5229 PVBOXHDD pDisk = pVDIo->pDisk;
5230 PVDIOSTORAGE pIoStorage;
5231
5232 /*
5233 * It is possible that we get called with a NULL metadata xfer handle
5234 * for synchronous I/O. Just exit.
5235 */
5236 if (!pMetaXfer)
5237 return;
5238
5239 pIoStorage = pMetaXfer->pIoStorage;
5240
5241 VD_IS_LOCKED(pDisk);
5242
5243 Assert( VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE
5244 || VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5245 Assert(pMetaXfer->cRefs > 0);
5246
5247 pMetaXfer->cRefs--;
5248 if ( !pMetaXfer->cRefs
5249 && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting)
5250 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5251 {
5252 /* Free the meta data entry. */
5253 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5254 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5255 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5256
5257 RTMemFree(pMetaXfer);
5258 }
5259}
5260
5261static DECLCALLBACK(int) vdIOIntFlush(void *pvUser, PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
5262 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5263{
5264 PVDIO pVDIo = (PVDIO)pvUser;
5265 PVBOXHDD pDisk = pVDIo->pDisk;
5266 int rc = VINF_SUCCESS;
5267 PVDIOTASK pIoTask;
5268 PVDMETAXFER pMetaXfer = NULL;
5269 void *pvTask = NULL;
5270
5271 LogFlowFunc(("pvUser=%#p pIoStorage=%#p pIoCtx=%#p\n",
5272 pvUser, pIoStorage, pIoCtx));
5273
5274 AssertMsgReturn( pIoCtx
5275 || (!pfnComplete && !pvCompleteUser),
5276 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5277 VERR_INVALID_POINTER);
5278
5279 /** @todo Enable check for sync I/O later. */
5280 if ( pIoCtx
5281 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5282 VD_IS_LOCKED(pDisk);
5283
5284 if (pVDIo->fIgnoreFlush)
5285 return VINF_SUCCESS;
5286
5287 if ( !pIoCtx
5288 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5289 {
5290 /* Handle synchronous flushes. */
5291 /** @todo Integrate with metadata transfers below. */
5292 rc = pVDIo->pInterfaceIo->pfnFlushSync(pVDIo->pInterfaceIo->Core.pvUser,
5293 pIoStorage->pStorage);
5294 }
5295 else
5296 {
5297 /* Allocate a new meta transfer. */
5298 pMetaXfer = vdMetaXferAlloc(pIoStorage, 0, 0);
5299 if (!pMetaXfer)
5300 return VERR_NO_MEMORY;
5301
5302 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
5303 if (!pIoTask)
5304 {
5305 RTMemFree(pMetaXfer);
5306 return VERR_NO_MEMORY;
5307 }
5308
5309 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5310
5311 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5312 AssertPtr(pDeferred);
5313
5314 RTListInit(&pDeferred->NodeDeferred);
5315 pDeferred->pIoCtx = pIoCtx;
5316
5317 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5318 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_FLUSH);
5319 rc = pVDIo->pInterfaceIo->pfnFlushAsync(pVDIo->pInterfaceIo->Core.pvUser,
5320 pIoStorage->pStorage,
5321 pIoTask, &pvTask);
5322 if (RT_SUCCESS(rc))
5323 {
5324 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5325 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5326 vdIoTaskFree(pDisk, pIoTask);
5327 RTMemFree(pDeferred);
5328 RTMemFree(pMetaXfer);
5329 }
5330 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
5331 RTMemFree(pMetaXfer);
5332 }
5333
5334 LogFlowFunc(("returns rc=%Rrc\n", rc));
5335 return rc;
5336}
5337
5338static DECLCALLBACK(size_t) vdIOIntIoCtxCopyTo(void *pvUser, PVDIOCTX pIoCtx,
5339 const void *pvBuf, size_t cbBuf)
5340{
5341 PVDIO pVDIo = (PVDIO)pvUser;
5342 PVBOXHDD pDisk = pVDIo->pDisk;
5343 size_t cbCopied = 0;
5344
5345 /** @todo Enable check for sync I/O later. */
5346 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5347 VD_IS_LOCKED(pDisk);
5348
5349 cbCopied = vdIoCtxCopyTo(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5350 Assert(cbCopied == cbBuf);
5351
5352 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCopied); - triggers with vdCopyHelper/dmgRead.
5353 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5354
5355 return cbCopied;
5356}
5357
5358static DECLCALLBACK(size_t) vdIOIntIoCtxCopyFrom(void *pvUser, PVDIOCTX pIoCtx,
5359 void *pvBuf, size_t cbBuf)
5360{
5361 PVDIO pVDIo = (PVDIO)pvUser;
5362 PVBOXHDD pDisk = pVDIo->pDisk;
5363 size_t cbCopied = 0;
5364
5365 /** @todo Enable check for sync I/O later. */
5366 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5367 VD_IS_LOCKED(pDisk);
5368
5369 cbCopied = vdIoCtxCopyFrom(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5370 Assert(cbCopied == cbBuf);
5371
5372 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft > cbCopied); - triggers with vdCopyHelper/dmgRead.
5373 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5374
5375 return cbCopied;
5376}
5377
5378static DECLCALLBACK(size_t) vdIOIntIoCtxSet(void *pvUser, PVDIOCTX pIoCtx, int ch, size_t cb)
5379{
5380 PVDIO pVDIo = (PVDIO)pvUser;
5381 PVBOXHDD pDisk = pVDIo->pDisk;
5382 size_t cbSet = 0;
5383
5384 /** @todo Enable check for sync I/O later. */
5385 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5386 VD_IS_LOCKED(pDisk);
5387
5388 cbSet = vdIoCtxSet(pIoCtx, ch, cb);
5389 Assert(cbSet == cb);
5390
5391 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbSet); - triggers with vdCopyHelper/dmgRead.
5392 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbSet);
5393
5394 return cbSet;
5395}
5396
5397static DECLCALLBACK(size_t) vdIOIntIoCtxSegArrayCreate(void *pvUser, PVDIOCTX pIoCtx,
5398 PRTSGSEG paSeg, unsigned *pcSeg,
5399 size_t cbData)
5400{
5401 PVDIO pVDIo = (PVDIO)pvUser;
5402 PVBOXHDD pDisk = pVDIo->pDisk;
5403 size_t cbCreated = 0;
5404
5405 /** @todo It is possible that this gets called from a filter plugin
5406 * outside of the disk lock. Refine assertion or remove completely. */
5407#if 0
5408 /** @todo Enable check for sync I/O later. */
5409 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5410 VD_IS_LOCKED(pDisk);
5411#else
5412 NOREF(pDisk);
5413#endif
5414
5415 cbCreated = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, paSeg, pcSeg, cbData);
5416 Assert(!paSeg || cbData == cbCreated);
5417
5418 return cbCreated;
5419}
5420
5421static DECLCALLBACK(void) vdIOIntIoCtxCompleted(void *pvUser, PVDIOCTX pIoCtx, int rcReq,
5422 size_t cbCompleted)
5423{
5424 PVDIO pVDIo = (PVDIO)pvUser;
5425 PVBOXHDD pDisk = pVDIo->pDisk;
5426
5427 LogFlowFunc(("pvUser=%#p pIoCtx=%#p rcReq=%Rrc cbCompleted=%zu\n",
5428 pvUser, pIoCtx, rcReq, cbCompleted));
5429
5430 /*
5431 * Grab the disk critical section to avoid races with other threads which
5432 * might still modify the I/O context.
5433 * Example is that iSCSI is doing an asynchronous write but calls us already
5434 * while the other thread is still hanging in vdWriteHelperAsync and couldn't update
5435 * the blocked state yet.
5436 * It can overwrite the state to true before we call vdIoCtxContinue and the
5437 * the request would hang indefinite.
5438 */
5439 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
5440 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCompleted);
5441 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCompleted);
5442
5443 /* Set next transfer function if the current one finished.
5444 * @todo: Find a better way to prevent vdIoCtxContinue from calling the current helper again. */
5445 if (!pIoCtx->Req.Io.cbTransferLeft)
5446 {
5447 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
5448 pIoCtx->pfnIoCtxTransferNext = NULL;
5449 }
5450
5451 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHaltedHead, pIoCtx);
5452 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
5453 {
5454 /* Immediately drop the lock again, it will take care of processing the list. */
5455 vdDiskUnlock(pDisk, NULL);
5456 }
5457}
5458
5459static DECLCALLBACK(bool) vdIOIntIoCtxIsSynchronous(void *pvUser, PVDIOCTX pIoCtx)
5460{
5461 NOREF(pvUser);
5462 return !!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC);
5463}
5464
5465static DECLCALLBACK(bool) vdIOIntIoCtxIsZero(void *pvUser, PVDIOCTX pIoCtx, size_t cbCheck,
5466 bool fAdvance)
5467{
5468 NOREF(pvUser);
5469
5470 bool fIsZero = RTSgBufIsZero(&pIoCtx->Req.Io.SgBuf, cbCheck);
5471 if (fIsZero && fAdvance)
5472 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbCheck);
5473
5474 return fIsZero;
5475}
5476
5477static DECLCALLBACK(size_t) vdIOIntIoCtxGetDataUnitSize(void *pvUser, PVDIOCTX pIoCtx)
5478{
5479 RT_NOREF1(pIoCtx);
5480 PVDIO pVDIo = (PVDIO)pvUser;
5481 PVBOXHDD pDisk = pVDIo->pDisk;
5482
5483 PVDIMAGE pImage = vdGetImageByNumber(pDisk, VD_LAST_IMAGE);
5484 AssertPtrReturn(pImage, 0);
5485 return pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
5486}
5487
5488/**
5489 * VD I/O interface callback for opening a file (limited version for VDGetFormat).
5490 */
5491static DECLCALLBACK(int) vdIOIntOpenLimited(void *pvUser, const char *pszLocation,
5492 uint32_t fOpen, PPVDIOSTORAGE ppIoStorage)
5493{
5494 int rc = VINF_SUCCESS;
5495 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5496 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
5497
5498 if (!pIoStorage)
5499 return VERR_NO_MEMORY;
5500
5501 rc = pInterfaceIo->pfnOpen(NULL, pszLocation, fOpen, NULL, &pIoStorage->pStorage);
5502 if (RT_SUCCESS(rc))
5503 *ppIoStorage = pIoStorage;
5504 else
5505 RTMemFree(pIoStorage);
5506
5507 return rc;
5508}
5509
5510static DECLCALLBACK(int) vdIOIntCloseLimited(void *pvUser, PVDIOSTORAGE pIoStorage)
5511{
5512 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5513 int rc = pInterfaceIo->pfnClose(NULL, pIoStorage->pStorage);
5514
5515 RTMemFree(pIoStorage);
5516 return rc;
5517}
5518
5519static DECLCALLBACK(int) vdIOIntDeleteLimited(void *pvUser, const char *pcszFilename)
5520{
5521 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5522 return pInterfaceIo->pfnDelete(NULL, pcszFilename);
5523}
5524
5525static DECLCALLBACK(int) vdIOIntMoveLimited(void *pvUser, const char *pcszSrc,
5526 const char *pcszDst, unsigned fMove)
5527{
5528 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5529 return pInterfaceIo->pfnMove(NULL, pcszSrc, pcszDst, fMove);
5530}
5531
5532static DECLCALLBACK(int) vdIOIntGetFreeSpaceLimited(void *pvUser, const char *pcszFilename,
5533 int64_t *pcbFreeSpace)
5534{
5535 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5536 return pInterfaceIo->pfnGetFreeSpace(NULL, pcszFilename, pcbFreeSpace);
5537}
5538
5539static DECLCALLBACK(int) vdIOIntGetModificationTimeLimited(void *pvUser,
5540 const char *pcszFilename,
5541 PRTTIMESPEC pModificationTime)
5542{
5543 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5544 return pInterfaceIo->pfnGetModificationTime(NULL, pcszFilename, pModificationTime);
5545}
5546
5547static DECLCALLBACK(int) vdIOIntGetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5548 uint64_t *pcbSize)
5549{
5550 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5551 return pInterfaceIo->pfnGetSize(NULL, pIoStorage->pStorage, pcbSize);
5552}
5553
5554static DECLCALLBACK(int) vdIOIntSetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5555 uint64_t cbSize)
5556{
5557 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5558 return pInterfaceIo->pfnSetSize(NULL, pIoStorage->pStorage, cbSize);
5559}
5560
5561static DECLCALLBACK(int) vdIOIntWriteUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5562 uint64_t uOffset, PVDIOCTX pIoCtx,
5563 size_t cbWrite,
5564 PFNVDXFERCOMPLETED pfnComplete,
5565 void *pvCompleteUser)
5566{
5567 NOREF(pvUser);
5568 NOREF(pStorage);
5569 NOREF(uOffset);
5570 NOREF(pIoCtx);
5571 NOREF(cbWrite);
5572 NOREF(pfnComplete);
5573 NOREF(pvCompleteUser);
5574 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5575}
5576
5577static DECLCALLBACK(int) vdIOIntReadUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5578 uint64_t uOffset, PVDIOCTX pIoCtx,
5579 size_t cbRead)
5580{
5581 NOREF(pvUser);
5582 NOREF(pStorage);
5583 NOREF(uOffset);
5584 NOREF(pIoCtx);
5585 NOREF(cbRead);
5586 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5587}
5588
5589static DECLCALLBACK(int) vdIOIntWriteMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5590 uint64_t uOffset, const void *pvBuffer,
5591 size_t cbBuffer, PVDIOCTX pIoCtx,
5592 PFNVDXFERCOMPLETED pfnComplete,
5593 void *pvCompleteUser)
5594{
5595 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5596
5597 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5598 ("Async I/O not implemented for the limited interface"),
5599 VERR_NOT_SUPPORTED);
5600
5601 return pInterfaceIo->pfnWriteSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5602}
5603
5604static DECLCALLBACK(int) vdIOIntReadMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5605 uint64_t uOffset, void *pvBuffer,
5606 size_t cbBuffer, PVDIOCTX pIoCtx,
5607 PPVDMETAXFER ppMetaXfer,
5608 PFNVDXFERCOMPLETED pfnComplete,
5609 void *pvCompleteUser)
5610{
5611 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5612
5613 AssertMsgReturn(!pIoCtx && !ppMetaXfer && !pfnComplete && !pvCompleteUser,
5614 ("Async I/O not implemented for the limited interface"),
5615 VERR_NOT_SUPPORTED);
5616
5617 return pInterfaceIo->pfnReadSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5618}
5619
5620#if 0 /* unsed */
5621static int vdIOIntMetaXferReleaseLimited(void *pvUser, PVDMETAXFER pMetaXfer)
5622{
5623 /* This is a NOP in this case. */
5624 NOREF(pvUser);
5625 NOREF(pMetaXfer);
5626 return VINF_SUCCESS;
5627}
5628#endif
5629
5630static DECLCALLBACK(int) vdIOIntFlushLimited(void *pvUser, PVDIOSTORAGE pStorage,
5631 PVDIOCTX pIoCtx,
5632 PFNVDXFERCOMPLETED pfnComplete,
5633 void *pvCompleteUser)
5634{
5635 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5636
5637 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5638 ("Async I/O not implemented for the limited interface"),
5639 VERR_NOT_SUPPORTED);
5640
5641 return pInterfaceIo->pfnFlushSync(NULL, pStorage->pStorage);
5642}
5643
5644/**
5645 * internal: send output to the log (unconditionally).
5646 */
5647static DECLCALLBACK(int) vdLogMessage(void *pvUser, const char *pszFormat, va_list args)
5648{
5649 NOREF(pvUser);
5650 RTLogPrintfV(pszFormat, args);
5651 return VINF_SUCCESS;
5652}
5653
5654DECLINLINE(int) vdMessageWrapper(PVBOXHDD pDisk, const char *pszFormat, ...)
5655{
5656 va_list va;
5657 va_start(va, pszFormat);
5658 int rc = pDisk->pInterfaceError->pfnMessage(pDisk->pInterfaceError->Core.pvUser,
5659 pszFormat, va);
5660 va_end(va);
5661 return rc;
5662}
5663
5664
5665/**
5666 * internal: adjust PCHS geometry
5667 */
5668static void vdFixupPCHSGeometry(PVDGEOMETRY pPCHS, uint64_t cbSize)
5669{
5670 /* Fix broken PCHS geometry. Can happen for two reasons: either the backend
5671 * mixes up PCHS and LCHS, or the application used to create the source
5672 * image has put garbage in it. Additionally, if the PCHS geometry covers
5673 * more than the image size, set it back to the default. */
5674 if ( pPCHS->cHeads > 16
5675 || pPCHS->cSectors > 63
5676 || pPCHS->cCylinders == 0
5677 || (uint64_t)pPCHS->cHeads * pPCHS->cSectors * pPCHS->cCylinders * 512 > cbSize)
5678 {
5679 Assert(!(RT_MIN(cbSize / 512 / 16 / 63, 16383) - (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383)));
5680 pPCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383);
5681 pPCHS->cHeads = 16;
5682 pPCHS->cSectors = 63;
5683 }
5684}
5685
5686/**
5687 * internal: adjust PCHS geometry
5688 */
5689static void vdFixupLCHSGeometry(PVDGEOMETRY pLCHS, uint64_t cbSize)
5690{
5691 /* Fix broken LCHS geometry. Can happen for two reasons: either the backend
5692 * mixes up PCHS and LCHS, or the application used to create the source
5693 * image has put garbage in it. The fix in this case is to clear the LCHS
5694 * geometry to trigger autodetection when it is used next. If the geometry
5695 * already says "please autodetect" (cylinders=0) keep it. */
5696 if ( ( pLCHS->cHeads > 255
5697 || pLCHS->cHeads == 0
5698 || pLCHS->cSectors > 63
5699 || pLCHS->cSectors == 0)
5700 && pLCHS->cCylinders != 0)
5701 {
5702 pLCHS->cCylinders = 0;
5703 pLCHS->cHeads = 0;
5704 pLCHS->cSectors = 0;
5705 }
5706 /* Always recompute the number of cylinders stored in the LCHS
5707 * geometry if it isn't set to "autotedetect" at the moment.
5708 * This is very useful if the destination image size is
5709 * larger or smaller than the source image size. Do not modify
5710 * the number of heads and sectors. Windows guests hate it. */
5711 if ( pLCHS->cCylinders != 0
5712 && pLCHS->cHeads != 0 /* paranoia */
5713 && pLCHS->cSectors != 0 /* paranoia */)
5714 {
5715 Assert(!(RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024) - (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024)));
5716 pLCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024);
5717 }
5718}
5719
5720/**
5721 * Sets the I/O callbacks of the given interface to the fallback methods
5722 *
5723 * @returns nothing.
5724 * @param pIfIo The I/O interface to setup.
5725 */
5726static void vdIfIoFallbackCallbacksSetup(PVDINTERFACEIO pIfIo)
5727{
5728 pIfIo->pfnOpen = vdIOOpenFallback;
5729 pIfIo->pfnClose = vdIOCloseFallback;
5730 pIfIo->pfnDelete = vdIODeleteFallback;
5731 pIfIo->pfnMove = vdIOMoveFallback;
5732 pIfIo->pfnGetFreeSpace = vdIOGetFreeSpaceFallback;
5733 pIfIo->pfnGetModificationTime = vdIOGetModificationTimeFallback;
5734 pIfIo->pfnGetSize = vdIOGetSizeFallback;
5735 pIfIo->pfnSetSize = vdIOSetSizeFallback;
5736 pIfIo->pfnSetAllocationSize = vdIOSetAllocationSizeFallback;
5737 pIfIo->pfnReadSync = vdIOReadSyncFallback;
5738 pIfIo->pfnWriteSync = vdIOWriteSyncFallback;
5739 pIfIo->pfnFlushSync = vdIOFlushSyncFallback;
5740 pIfIo->pfnReadAsync = vdIOReadAsyncFallback;
5741 pIfIo->pfnWriteAsync = vdIOWriteAsyncFallback;
5742 pIfIo->pfnFlushAsync = vdIOFlushAsyncFallback;
5743}
5744
5745/**
5746 * Sets the internal I/O callbacks of the given interface.
5747 *
5748 * @returns nothing.
5749 * @param pIfIoInt The internal I/O interface to setup.
5750 */
5751static void vdIfIoIntCallbacksSetup(PVDINTERFACEIOINT pIfIoInt)
5752{
5753 pIfIoInt->pfnOpen = vdIOIntOpen;
5754 pIfIoInt->pfnClose = vdIOIntClose;
5755 pIfIoInt->pfnDelete = vdIOIntDelete;
5756 pIfIoInt->pfnMove = vdIOIntMove;
5757 pIfIoInt->pfnGetFreeSpace = vdIOIntGetFreeSpace;
5758 pIfIoInt->pfnGetModificationTime = vdIOIntGetModificationTime;
5759 pIfIoInt->pfnGetSize = vdIOIntGetSize;
5760 pIfIoInt->pfnSetSize = vdIOIntSetSize;
5761 pIfIoInt->pfnSetAllocationSize = vdIOIntSetAllocationSize;
5762 pIfIoInt->pfnReadUser = vdIOIntReadUser;
5763 pIfIoInt->pfnWriteUser = vdIOIntWriteUser;
5764 pIfIoInt->pfnReadMeta = vdIOIntReadMeta;
5765 pIfIoInt->pfnWriteMeta = vdIOIntWriteMeta;
5766 pIfIoInt->pfnMetaXferRelease = vdIOIntMetaXferRelease;
5767 pIfIoInt->pfnFlush = vdIOIntFlush;
5768 pIfIoInt->pfnIoCtxCopyFrom = vdIOIntIoCtxCopyFrom;
5769 pIfIoInt->pfnIoCtxCopyTo = vdIOIntIoCtxCopyTo;
5770 pIfIoInt->pfnIoCtxSet = vdIOIntIoCtxSet;
5771 pIfIoInt->pfnIoCtxSegArrayCreate = vdIOIntIoCtxSegArrayCreate;
5772 pIfIoInt->pfnIoCtxCompleted = vdIOIntIoCtxCompleted;
5773 pIfIoInt->pfnIoCtxIsSynchronous = vdIOIntIoCtxIsSynchronous;
5774 pIfIoInt->pfnIoCtxIsZero = vdIOIntIoCtxIsZero;
5775 pIfIoInt->pfnIoCtxGetDataUnitSize = vdIOIntIoCtxGetDataUnitSize;
5776}
5777
5778/**
5779 * Internally used completion handler for synchronous I/O contexts.
5780 */
5781static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq)
5782{
5783 RT_NOREF2(pvUser1, rcReq);
5784 RTSEMEVENT hEvent = (RTSEMEVENT)pvUser2;
5785
5786 RTSemEventSignal(hEvent);
5787}
5788
5789/**
5790 * Creates a new region list from the given one converting to match the flags if necessary.
5791 *
5792 * @returns VBox status code.
5793 * @param pRegionList The region list to convert from.
5794 * @param fFlags The flags for the new region list.
5795 * @param ppRegionList Where to store the new region list on success.
5796 */
5797static int vdRegionListConv(PCVDREGIONLIST pRegionList, uint32_t fFlags, PPVDREGIONLIST ppRegionList)
5798{
5799 int rc = VINF_SUCCESS;
5800 PVDREGIONLIST pRegionListNew = (PVDREGIONLIST)RTMemDup(pRegionList, RT_UOFFSETOF(VDREGIONLIST, aRegions[pRegionList->cRegions]));
5801 if (RT_LIKELY(pRegionListNew))
5802 {
5803 /* Do we have to convert anything? */
5804 if (pRegionList->fFlags != fFlags)
5805 {
5806 uint64_t offRegionNext = 0;
5807
5808 pRegionListNew->fFlags = fFlags;
5809 for (unsigned i = 0; i < pRegionListNew->cRegions; i++)
5810 {
5811 PVDREGIONDESC pRegion = &pRegionListNew->aRegions[i];
5812
5813 if ( (fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS)
5814 && !(pRegionList->fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS))
5815 {
5816 Assert(!(pRegion->cRegionBlocksOrBytes % pRegion->cbBlock));
5817
5818 /* Convert from bytes to logical blocks. */
5819 pRegion->offRegion = offRegionNext;
5820 pRegion->cRegionBlocksOrBytes = pRegion->cRegionBlocksOrBytes / pRegion->cbBlock;
5821 offRegionNext += pRegion->cRegionBlocksOrBytes;
5822 }
5823 else
5824 {
5825 /* Convert from logical blocks to bytes. */
5826 pRegion->offRegion = offRegionNext;
5827 pRegion->cRegionBlocksOrBytes = pRegion->cRegionBlocksOrBytes * pRegion->cbBlock;
5828 offRegionNext += pRegion->cRegionBlocksOrBytes;
5829 }
5830 }
5831 }
5832
5833 *ppRegionList = pRegionListNew;
5834 }
5835 else
5836 rc = VERR_NO_MEMORY;
5837
5838 return rc;
5839}
5840
5841/**
5842 * Initializes HDD backends.
5843 *
5844 * @returns VBox status code.
5845 */
5846VBOXDDU_DECL(int) VDInit(void)
5847{
5848 int rc = vdAddBackends(NIL_RTLDRMOD, aStaticBackends, RT_ELEMENTS(aStaticBackends));
5849 if (RT_SUCCESS(rc))
5850 {
5851 rc = vdAddCacheBackends(NIL_RTLDRMOD, aStaticCacheBackends, RT_ELEMENTS(aStaticCacheBackends));
5852 if (RT_SUCCESS(rc))
5853 {
5854 RTListInit(&g_ListPluginsLoaded);
5855 rc = vdLoadDynamicBackends();
5856 }
5857 }
5858 LogRel(("VD: VDInit finished\n"));
5859 return rc;
5860}
5861
5862/**
5863 * Destroys loaded HDD backends.
5864 *
5865 * @returns VBox status code.
5866 */
5867VBOXDDU_DECL(int) VDShutdown(void)
5868{
5869 if (!g_apBackends)
5870 return VERR_INTERNAL_ERROR;
5871
5872 if (g_apCacheBackends)
5873 RTMemFree(g_apCacheBackends);
5874 RTMemFree(g_apBackends);
5875
5876 g_cBackends = 0;
5877 g_apBackends = NULL;
5878
5879 /* Clear the supported cache backends. */
5880 g_cCacheBackends = 0;
5881 g_apCacheBackends = NULL;
5882
5883#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
5884 PVDPLUGIN pPlugin, pPluginNext;
5885 RTListForEachSafe(&g_ListPluginsLoaded, pPlugin, pPluginNext, VDPLUGIN, NodePlugin)
5886 {
5887 RTLdrClose(pPlugin->hPlugin);
5888 RTStrFree(pPlugin->pszFilename);
5889 RTListNodeRemove(&pPlugin->NodePlugin);
5890 RTMemFree(pPlugin);
5891 }
5892#endif
5893
5894 return VINF_SUCCESS;
5895}
5896
5897/**
5898 * Loads a single plugin given by filename.
5899 *
5900 * @returns VBox status code.
5901 * @param pszFilename The plugin filename to load.
5902 */
5903VBOXDDU_DECL(int) VDPluginLoadFromFilename(const char *pszFilename)
5904{
5905 if (!g_apBackends)
5906 {
5907 int rc = VDInit();
5908 if (RT_FAILURE(rc))
5909 return rc;
5910 }
5911
5912 return vdPluginLoadFromFilename(pszFilename);
5913}
5914
5915/**
5916 * Load all plugins from a given path.
5917 *
5918 * @returns VBox statuse code.
5919 * @param pszPath The path to load plugins from.
5920 */
5921VBOXDDU_DECL(int) VDPluginLoadFromPath(const char *pszPath)
5922{
5923 if (!g_apBackends)
5924 {
5925 int rc = VDInit();
5926 if (RT_FAILURE(rc))
5927 return rc;
5928 }
5929
5930 return vdPluginLoadFromPath(pszPath);
5931}
5932
5933/**
5934 * Unloads a single plugin given by filename.
5935 *
5936 * @returns VBox status code.
5937 * @param pszFilename The plugin filename to unload.
5938 */
5939VBOXDDU_DECL(int) VDPluginUnloadFromFilename(const char *pszFilename)
5940{
5941 if (!g_apBackends)
5942 {
5943 int rc = VDInit();
5944 if (RT_FAILURE(rc))
5945 return rc;
5946 }
5947
5948 return vdPluginUnloadFromFilename(pszFilename);
5949}
5950
5951/**
5952 * Unload all plugins from a given path.
5953 *
5954 * @returns VBox statuse code.
5955 * @param pszPath The path to unload plugins from.
5956 */
5957VBOXDDU_DECL(int) VDPluginUnloadFromPath(const char *pszPath)
5958{
5959 if (!g_apBackends)
5960 {
5961 int rc = VDInit();
5962 if (RT_FAILURE(rc))
5963 return rc;
5964 }
5965
5966 return vdPluginUnloadFromPath(pszPath);
5967}
5968
5969/**
5970 * Lists all HDD backends and their capabilities in a caller-provided buffer.
5971 *
5972 * @returns VBox status code.
5973 * VERR_BUFFER_OVERFLOW if not enough space is passed.
5974 * @param cEntriesAlloc Number of list entries available.
5975 * @param pEntries Pointer to array for the entries.
5976 * @param pcEntriesUsed Number of entries returned.
5977 */
5978VBOXDDU_DECL(int) VDBackendInfo(unsigned cEntriesAlloc, PVDBACKENDINFO pEntries,
5979 unsigned *pcEntriesUsed)
5980{
5981 int rc = VINF_SUCCESS;
5982
5983 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
5984 /* Check arguments. */
5985 AssertMsgReturn(cEntriesAlloc,
5986 ("cEntriesAlloc=%u\n", cEntriesAlloc),
5987 VERR_INVALID_PARAMETER);
5988 AssertMsgReturn(VALID_PTR(pEntries),
5989 ("pEntries=%#p\n", pEntries),
5990 VERR_INVALID_PARAMETER);
5991 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
5992 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
5993 VERR_INVALID_PARAMETER);
5994 if (!g_apBackends)
5995 VDInit();
5996
5997 if (cEntriesAlloc < g_cBackends)
5998 {
5999 *pcEntriesUsed = g_cBackends;
6000 return VERR_BUFFER_OVERFLOW;
6001 }
6002
6003 for (unsigned i = 0; i < g_cBackends; i++)
6004 {
6005 pEntries[i].pszBackend = g_apBackends[i]->pszBackendName;
6006 pEntries[i].uBackendCaps = g_apBackends[i]->uBackendCaps;
6007 pEntries[i].paFileExtensions = g_apBackends[i]->paFileExtensions;
6008 pEntries[i].paConfigInfo = g_apBackends[i]->paConfigInfo;
6009 pEntries[i].pfnComposeLocation = g_apBackends[i]->pfnComposeLocation;
6010 pEntries[i].pfnComposeName = g_apBackends[i]->pfnComposeName;
6011 }
6012
6013 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cBackends));
6014 *pcEntriesUsed = g_cBackends;
6015 return rc;
6016}
6017
6018/**
6019 * Lists the capabilities of a backend identified by its name.
6020 *
6021 * @returns VBox status code.
6022 * @param pszBackend The backend name.
6023 * @param pEntry Pointer to an entry.
6024 */
6025VBOXDDU_DECL(int) VDBackendInfoOne(const char *pszBackend, PVDBACKENDINFO pEntry)
6026{
6027 LogFlowFunc(("pszBackend=%#p pEntry=%#p\n", pszBackend, pEntry));
6028 /* Check arguments. */
6029 AssertMsgReturn(VALID_PTR(pszBackend),
6030 ("pszBackend=%#p\n", pszBackend),
6031 VERR_INVALID_PARAMETER);
6032 AssertMsgReturn(VALID_PTR(pEntry),
6033 ("pEntry=%#p\n", pEntry),
6034 VERR_INVALID_PARAMETER);
6035 if (!g_apBackends)
6036 VDInit();
6037
6038 /* Go through loaded backends. */
6039 for (unsigned i = 0; i < g_cBackends; i++)
6040 {
6041 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
6042 {
6043 pEntry->pszBackend = g_apBackends[i]->pszBackendName;
6044 pEntry->uBackendCaps = g_apBackends[i]->uBackendCaps;
6045 pEntry->paFileExtensions = g_apBackends[i]->paFileExtensions;
6046 pEntry->paConfigInfo = g_apBackends[i]->paConfigInfo;
6047 return VINF_SUCCESS;
6048 }
6049 }
6050
6051 return VERR_NOT_FOUND;
6052}
6053
6054/**
6055 * Lists all filters and their capabilities in a caller-provided buffer.
6056 *
6057 * @return VBox status code.
6058 * VERR_BUFFER_OVERFLOW if not enough space is passed.
6059 * @param cEntriesAlloc Number of list entries available.
6060 * @param pEntries Pointer to array for the entries.
6061 * @param pcEntriesUsed Number of entries returned.
6062 */
6063VBOXDDU_DECL(int) VDFilterInfo(unsigned cEntriesAlloc, PVDFILTERINFO pEntries,
6064 unsigned *pcEntriesUsed)
6065{
6066 int rc = VINF_SUCCESS;
6067
6068 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
6069 /* Check arguments. */
6070 AssertMsgReturn(cEntriesAlloc,
6071 ("cEntriesAlloc=%u\n", cEntriesAlloc),
6072 VERR_INVALID_PARAMETER);
6073 AssertMsgReturn(VALID_PTR(pEntries),
6074 ("pEntries=%#p\n", pEntries),
6075 VERR_INVALID_PARAMETER);
6076 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
6077 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
6078 VERR_INVALID_PARAMETER);
6079 if (!g_apBackends)
6080 VDInit();
6081
6082 if (cEntriesAlloc < g_cFilterBackends)
6083 {
6084 *pcEntriesUsed = g_cFilterBackends;
6085 return VERR_BUFFER_OVERFLOW;
6086 }
6087
6088 for (unsigned i = 0; i < g_cFilterBackends; i++)
6089 {
6090 pEntries[i].pszFilter = g_apFilterBackends[i]->pszBackendName;
6091 pEntries[i].paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6092 }
6093
6094 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cFilterBackends));
6095 *pcEntriesUsed = g_cFilterBackends;
6096 return rc;
6097}
6098
6099/**
6100 * Lists the capabilities of a filter identified by its name.
6101 *
6102 * @return VBox status code.
6103 * @param pszFilter The filter name (case insensitive).
6104 * @param pEntry Pointer to an entry.
6105 */
6106VBOXDDU_DECL(int) VDFilterInfoOne(const char *pszFilter, PVDFILTERINFO pEntry)
6107{
6108 LogFlowFunc(("pszFilter=%#p pEntry=%#p\n", pszFilter, pEntry));
6109 /* Check arguments. */
6110 AssertMsgReturn(VALID_PTR(pszFilter),
6111 ("pszFilter=%#p\n", pszFilter),
6112 VERR_INVALID_PARAMETER);
6113 AssertMsgReturn(VALID_PTR(pEntry),
6114 ("pEntry=%#p\n", pEntry),
6115 VERR_INVALID_PARAMETER);
6116 if (!g_apBackends)
6117 VDInit();
6118
6119 /* Go through loaded backends. */
6120 for (unsigned i = 0; i < g_cFilterBackends; i++)
6121 {
6122 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
6123 {
6124 pEntry->pszFilter = g_apFilterBackends[i]->pszBackendName;
6125 pEntry->paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6126 return VINF_SUCCESS;
6127 }
6128 }
6129
6130 return VERR_NOT_FOUND;
6131}
6132
6133/**
6134 * Allocates and initializes an empty HDD container.
6135 * No image files are opened.
6136 *
6137 * @returns VBox status code.
6138 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6139 * @param enmType Type of the image container.
6140 * @param ppDisk Where to store the reference to HDD container.
6141 */
6142VBOXDDU_DECL(int) VDCreate(PVDINTERFACE pVDIfsDisk, VDTYPE enmType, PVBOXHDD *ppDisk)
6143{
6144 int rc = VINF_SUCCESS;
6145 PVBOXHDD pDisk = NULL;
6146
6147 LogFlowFunc(("pVDIfsDisk=%#p\n", pVDIfsDisk));
6148 do
6149 {
6150 /* Check arguments. */
6151 AssertMsgBreakStmt(VALID_PTR(ppDisk),
6152 ("ppDisk=%#p\n", ppDisk),
6153 rc = VERR_INVALID_PARAMETER);
6154
6155 pDisk = (PVBOXHDD)RTMemAllocZ(sizeof(VBOXHDD));
6156 if (pDisk)
6157 {
6158 pDisk->u32Signature = VBOXHDDDISK_SIGNATURE;
6159 pDisk->enmType = enmType;
6160 pDisk->cImages = 0;
6161 pDisk->pBase = NULL;
6162 pDisk->pLast = NULL;
6163 pDisk->cbSize = 0;
6164 pDisk->PCHSGeometry.cCylinders = 0;
6165 pDisk->PCHSGeometry.cHeads = 0;
6166 pDisk->PCHSGeometry.cSectors = 0;
6167 pDisk->LCHSGeometry.cCylinders = 0;
6168 pDisk->LCHSGeometry.cHeads = 0;
6169 pDisk->LCHSGeometry.cSectors = 0;
6170 pDisk->pVDIfsDisk = pVDIfsDisk;
6171 pDisk->pInterfaceError = NULL;
6172 pDisk->pInterfaceThreadSync = NULL;
6173 pDisk->pIoCtxLockOwner = NULL;
6174 pDisk->pIoCtxHead = NULL;
6175 pDisk->fLocked = false;
6176 pDisk->hMemCacheIoCtx = NIL_RTMEMCACHE;
6177 pDisk->hMemCacheIoTask = NIL_RTMEMCACHE;
6178 RTListInit(&pDisk->ListFilterChainWrite);
6179 RTListInit(&pDisk->ListFilterChainRead);
6180
6181 /* Create the I/O ctx cache */
6182 rc = RTMemCacheCreate(&pDisk->hMemCacheIoCtx, sizeof(VDIOCTX), 0, UINT32_MAX,
6183 NULL, NULL, NULL, 0);
6184 if (RT_FAILURE(rc))
6185 break;
6186
6187 /* Create the I/O task cache */
6188 rc = RTMemCacheCreate(&pDisk->hMemCacheIoTask, sizeof(VDIOTASK), 0, UINT32_MAX,
6189 NULL, NULL, NULL, 0);
6190 if (RT_FAILURE(rc))
6191 break;
6192
6193 pDisk->pInterfaceError = VDIfErrorGet(pVDIfsDisk);
6194 pDisk->pInterfaceThreadSync = VDIfThreadSyncGet(pVDIfsDisk);
6195
6196 *ppDisk = pDisk;
6197 }
6198 else
6199 {
6200 rc = VERR_NO_MEMORY;
6201 break;
6202 }
6203 } while (0);
6204
6205 if ( RT_FAILURE(rc)
6206 && pDisk)
6207 {
6208 if (pDisk->hMemCacheIoCtx != NIL_RTMEMCACHE)
6209 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6210 if (pDisk->hMemCacheIoTask != NIL_RTMEMCACHE)
6211 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6212 }
6213
6214 LogFlowFunc(("returns %Rrc (pDisk=%#p)\n", rc, pDisk));
6215 return rc;
6216}
6217
6218/**
6219 * Destroys HDD container.
6220 * If container has opened image files they will be closed.
6221 *
6222 * @returns VBox status code.
6223 * @param pDisk Pointer to HDD container.
6224 */
6225VBOXDDU_DECL(int) VDDestroy(PVBOXHDD pDisk)
6226{
6227 int rc = VINF_SUCCESS;
6228 LogFlowFunc(("pDisk=%#p\n", pDisk));
6229 do
6230 {
6231 /* sanity check */
6232 AssertPtrBreak(pDisk);
6233 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6234 Assert(!pDisk->fLocked);
6235
6236 rc = VDCloseAll(pDisk);
6237 int rc2 = VDFilterRemoveAll(pDisk);
6238 if (RT_SUCCESS(rc))
6239 rc = rc2;
6240
6241 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6242 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6243 RTMemFree(pDisk);
6244 } while (0);
6245 LogFlowFunc(("returns %Rrc\n", rc));
6246 return rc;
6247}
6248
6249/**
6250 * Try to get the backend name which can use this image.
6251 *
6252 * @returns VBox status code.
6253 * VINF_SUCCESS if a plugin was found.
6254 * ppszFormat contains the string which can be used as backend name.
6255 * VERR_NOT_SUPPORTED if no backend was found.
6256 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6257 * @param pVDIfsImage Pointer to the per-image VD interface list.
6258 * @param pszFilename Name of the image file for which the backend is queried.
6259 * @param ppszFormat Receives pointer of the UTF-8 string which contains the format name.
6260 * The returned pointer must be freed using RTStrFree().
6261 */
6262VBOXDDU_DECL(int) VDGetFormat(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6263 const char *pszFilename, char **ppszFormat, VDTYPE *penmType)
6264{
6265 int rc = VERR_NOT_SUPPORTED;
6266 VDINTERFACEIOINT VDIfIoInt;
6267 VDINTERFACEIO VDIfIoFallback;
6268 PVDINTERFACEIO pInterfaceIo;
6269
6270 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6271 /* Check arguments. */
6272 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
6273 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6274 VERR_INVALID_PARAMETER);
6275 AssertMsgReturn(VALID_PTR(ppszFormat),
6276 ("ppszFormat=%#p\n", ppszFormat),
6277 VERR_INVALID_PARAMETER);
6278 AssertMsgReturn(VALID_PTR(penmType),
6279 ("penmType=%#p\n", penmType),
6280 VERR_INVALID_PARAMETER);
6281
6282 if (!g_apBackends)
6283 VDInit();
6284
6285 pInterfaceIo = VDIfIoGet(pVDIfsImage);
6286 if (!pInterfaceIo)
6287 {
6288 /*
6289 * Caller doesn't provide an I/O interface, create our own using the
6290 * native file API.
6291 */
6292 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
6293 pInterfaceIo = &VDIfIoFallback;
6294 }
6295
6296 /* Set up the internal I/O interface. */
6297 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
6298 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
6299 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
6300 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
6301 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
6302 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
6303 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
6304 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
6305 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
6306 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
6307 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
6308 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
6309 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
6310 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
6311 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6312 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
6313 AssertRC(rc);
6314
6315 /* Find the backend supporting this file format. */
6316 for (unsigned i = 0; i < g_cBackends; i++)
6317 {
6318 if (g_apBackends[i]->pfnProbe)
6319 {
6320 rc = g_apBackends[i]->pfnProbe(pszFilename, pVDIfsDisk, pVDIfsImage, penmType);
6321 if ( RT_SUCCESS(rc)
6322 /* The correct backend has been found, but there is a small
6323 * incompatibility so that the file cannot be used. Stop here
6324 * and signal success - the actual open will of course fail,
6325 * but that will create a really sensible error message. */
6326 || ( rc != VERR_VD_GEN_INVALID_HEADER
6327 && rc != VERR_VD_VDI_INVALID_HEADER
6328 && rc != VERR_VD_VMDK_INVALID_HEADER
6329 && rc != VERR_VD_ISCSI_INVALID_HEADER
6330 && rc != VERR_VD_VHD_INVALID_HEADER
6331 && rc != VERR_VD_RAW_INVALID_HEADER
6332 && rc != VERR_VD_RAW_SIZE_MODULO_512
6333 && rc != VERR_VD_RAW_SIZE_MODULO_2048
6334 && rc != VERR_VD_RAW_SIZE_OPTICAL_TOO_SMALL
6335 && rc != VERR_VD_RAW_SIZE_FLOPPY_TOO_BIG
6336 && rc != VERR_VD_PARALLELS_INVALID_HEADER
6337 && rc != VERR_VD_DMG_INVALID_HEADER))
6338 {
6339 /* Copy the name into the new string. */
6340 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6341 if (!pszFormat)
6342 {
6343 rc = VERR_NO_MEMORY;
6344 break;
6345 }
6346 *ppszFormat = pszFormat;
6347 /* Do not consider the typical file access errors as success,
6348 * which allows the caller to deal with such issues. */
6349 if ( rc != VERR_ACCESS_DENIED
6350 && rc != VERR_PATH_NOT_FOUND
6351 && rc != VERR_FILE_NOT_FOUND)
6352 rc = VINF_SUCCESS;
6353 break;
6354 }
6355 rc = VERR_NOT_SUPPORTED;
6356 }
6357 }
6358
6359 /* Try the cache backends. */
6360 if (rc == VERR_NOT_SUPPORTED)
6361 {
6362 for (unsigned i = 0; i < g_cCacheBackends; i++)
6363 {
6364 if (g_apCacheBackends[i]->pfnProbe)
6365 {
6366 rc = g_apCacheBackends[i]->pfnProbe(pszFilename, pVDIfsDisk,
6367 pVDIfsImage);
6368 if ( RT_SUCCESS(rc)
6369 || (rc != VERR_VD_GEN_INVALID_HEADER))
6370 {
6371 /* Copy the name into the new string. */
6372 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6373 if (!pszFormat)
6374 {
6375 rc = VERR_NO_MEMORY;
6376 break;
6377 }
6378 *ppszFormat = pszFormat;
6379 rc = VINF_SUCCESS;
6380 break;
6381 }
6382 rc = VERR_NOT_SUPPORTED;
6383 }
6384 }
6385 }
6386
6387 LogFlowFunc(("returns %Rrc *ppszFormat=\"%s\"\n", rc, *ppszFormat));
6388 return rc;
6389}
6390
6391/**
6392 * Opens an image file.
6393 *
6394 * The first opened image file in HDD container must have a base image type,
6395 * others (next opened images) must be a differencing or undo images.
6396 * Linkage is checked for differencing image to be in consistence with the previously opened image.
6397 * When another differencing image is opened and the last image was opened in read/write access
6398 * mode, then the last image is reopened in read-only with deny write sharing mode. This allows
6399 * other processes to use images in read-only mode too.
6400 *
6401 * Note that the image is opened in read-only mode if a read/write open is not possible.
6402 * Use VDIsReadOnly to check open mode.
6403 *
6404 * @returns VBox status code.
6405 * @param pDisk Pointer to HDD container.
6406 * @param pszBackend Name of the image file backend to use.
6407 * @param pszFilename Name of the image file to open.
6408 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6409 * @param pVDIfsImage Pointer to the per-image VD interface list.
6410 */
6411VBOXDDU_DECL(int) VDOpen(PVBOXHDD pDisk, const char *pszBackend,
6412 const char *pszFilename, unsigned uOpenFlags,
6413 PVDINTERFACE pVDIfsImage)
6414{
6415 int rc = VINF_SUCCESS;
6416 int rc2;
6417 bool fLockWrite = false;
6418 PVDIMAGE pImage = NULL;
6419
6420 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsImage=%#p\n",
6421 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsImage));
6422
6423 do
6424 {
6425 /* sanity check */
6426 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6427 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6428
6429 /* Check arguments. */
6430 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6431 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6432 rc = VERR_INVALID_PARAMETER);
6433 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6434 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6435 rc = VERR_INVALID_PARAMETER);
6436 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6437 ("uOpenFlags=%#x\n", uOpenFlags),
6438 rc = VERR_INVALID_PARAMETER);
6439 AssertMsgBreakStmt( !(uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)
6440 || (uOpenFlags & VD_OPEN_FLAGS_READONLY),
6441 ("uOpenFlags=%#x\n", uOpenFlags),
6442 rc = VERR_INVALID_PARAMETER);
6443
6444 /*
6445 * Destroy the current discard state first which might still have pending blocks
6446 * for the currently opened image which will be switched to readonly mode.
6447 */
6448 /* Lock disk for writing, as we modify pDisk information below. */
6449 rc2 = vdThreadStartWrite(pDisk);
6450 AssertRC(rc2);
6451 fLockWrite = true;
6452 rc = vdDiscardStateDestroy(pDisk);
6453 if (RT_FAILURE(rc))
6454 break;
6455 rc2 = vdThreadFinishWrite(pDisk);
6456 AssertRC(rc2);
6457 fLockWrite = false;
6458
6459 /* Set up image descriptor. */
6460 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
6461 if (!pImage)
6462 {
6463 rc = VERR_NO_MEMORY;
6464 break;
6465 }
6466 pImage->pszFilename = RTStrDup(pszFilename);
6467 if (!pImage->pszFilename)
6468 {
6469 rc = VERR_NO_MEMORY;
6470 break;
6471 }
6472
6473 pImage->VDIo.pDisk = pDisk;
6474 pImage->pVDIfsImage = pVDIfsImage;
6475
6476 rc = vdFindBackend(pszBackend, &pImage->Backend);
6477 if (RT_FAILURE(rc))
6478 break;
6479 if (!pImage->Backend)
6480 {
6481 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6482 N_("VD: unknown backend name '%s'"), pszBackend);
6483 break;
6484 }
6485
6486 /*
6487 * Fail if the backend can't do async I/O but the
6488 * flag is set.
6489 */
6490 if ( !(pImage->Backend->uBackendCaps & VD_CAP_ASYNC)
6491 && (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO))
6492 {
6493 rc = vdError(pDisk, VERR_NOT_SUPPORTED, RT_SRC_POS,
6494 N_("VD: Backend '%s' does not support async I/O"), pszBackend);
6495 break;
6496 }
6497
6498 /*
6499 * Fail if the backend doesn't support the discard operation but the
6500 * flag is set.
6501 */
6502 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DISCARD)
6503 && (uOpenFlags & VD_OPEN_FLAGS_DISCARD))
6504 {
6505 rc = vdError(pDisk, VERR_VD_DISCARD_NOT_SUPPORTED, RT_SRC_POS,
6506 N_("VD: Backend '%s' does not support discard"), pszBackend);
6507 break;
6508 }
6509
6510 /* Set up the I/O interface. */
6511 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
6512 if (!pImage->VDIo.pInterfaceIo)
6513 {
6514 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
6515 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6516 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
6517 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
6518 }
6519
6520 /* Set up the internal I/O interface. */
6521 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
6522 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
6523 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6524 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
6525 AssertRC(rc);
6526
6527 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
6528 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
6529 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6530 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6531 pDisk->pVDIfsDisk,
6532 pImage->pVDIfsImage,
6533 pDisk->enmType,
6534 &pImage->pBackendData);
6535 /*
6536 * If the image is corrupted and there is a repair method try to repair it
6537 * first if it was openend in read-write mode and open again afterwards.
6538 */
6539 if ( RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED)
6540 && !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6541 && pImage->Backend->pfnRepair)
6542 {
6543 rc = pImage->Backend->pfnRepair(pszFilename, pDisk->pVDIfsDisk, pImage->pVDIfsImage, 0 /* fFlags */);
6544 if (RT_SUCCESS(rc))
6545 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6546 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6547 pDisk->pVDIfsDisk,
6548 pImage->pVDIfsImage,
6549 pDisk->enmType,
6550 &pImage->pBackendData);
6551 else
6552 {
6553 rc = vdError(pDisk, rc, RT_SRC_POS,
6554 N_("VD: error %Rrc repairing corrupted image file '%s'"), rc, pszFilename);
6555 break;
6556 }
6557 }
6558 else if (RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED))
6559 {
6560 rc = vdError(pDisk, rc, RT_SRC_POS,
6561 N_("VD: Image file '%s' is corrupted and can't be opened"), pszFilename);
6562 break;
6563 }
6564
6565 /* If the open in read-write mode failed, retry in read-only mode. */
6566 if (RT_FAILURE(rc))
6567 {
6568 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6569 && ( rc == VERR_ACCESS_DENIED
6570 || rc == VERR_PERMISSION_DENIED
6571 || rc == VERR_WRITE_PROTECT
6572 || rc == VERR_SHARING_VIOLATION
6573 || rc == VERR_FILE_LOCK_FAILED))
6574 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6575 (uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS))
6576 | VD_OPEN_FLAGS_READONLY,
6577 pDisk->pVDIfsDisk,
6578 pImage->pVDIfsImage,
6579 pDisk->enmType,
6580 &pImage->pBackendData);
6581 if (RT_FAILURE(rc))
6582 {
6583 rc = vdError(pDisk, rc, RT_SRC_POS,
6584 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6585 break;
6586 }
6587 }
6588
6589 /* Lock disk for writing, as we modify pDisk information below. */
6590 rc2 = vdThreadStartWrite(pDisk);
6591 AssertRC(rc2);
6592 fLockWrite = true;
6593
6594 pImage->VDIo.pBackendData = pImage->pBackendData;
6595
6596 /* Check image type. As the image itself has only partial knowledge
6597 * whether it's a base image or not, this info is derived here. The
6598 * base image can be fixed or normal, all others must be normal or
6599 * diff images. Some image formats don't distinguish between normal
6600 * and diff images, so this must be corrected here. */
6601 unsigned uImageFlags;
6602 uImageFlags = pImage->Backend->pfnGetImageFlags(pImage->pBackendData);
6603 if (RT_FAILURE(rc))
6604 uImageFlags = VD_IMAGE_FLAGS_NONE;
6605 if ( RT_SUCCESS(rc)
6606 && !(uOpenFlags & VD_OPEN_FLAGS_INFO))
6607 {
6608 if ( pDisk->cImages == 0
6609 && (uImageFlags & VD_IMAGE_FLAGS_DIFF))
6610 {
6611 rc = VERR_VD_INVALID_TYPE;
6612 break;
6613 }
6614 else if (pDisk->cImages != 0)
6615 {
6616 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6617 {
6618 rc = VERR_VD_INVALID_TYPE;
6619 break;
6620 }
6621 else
6622 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6623 }
6624 }
6625
6626 /* Ensure we always get correct diff information, even if the backend
6627 * doesn't actually have a stored flag for this. It must not return
6628 * bogus information for the parent UUID if it is not a diff image. */
6629 RTUUID parentUuid;
6630 RTUuidClear(&parentUuid);
6631 rc2 = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, &parentUuid);
6632 if (RT_SUCCESS(rc2) && !RTUuidIsNull(&parentUuid))
6633 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6634
6635 pImage->uImageFlags = uImageFlags;
6636
6637 /* Force sane optimization settings. It's not worth avoiding writes
6638 * to fixed size images. The overhead would have almost no payback. */
6639 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6640 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
6641
6642 /** @todo optionally check UUIDs */
6643
6644 /* Cache disk information. */
6645 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
6646
6647 /* Cache PCHS geometry. */
6648 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
6649 &pDisk->PCHSGeometry);
6650 if (RT_FAILURE(rc2))
6651 {
6652 pDisk->PCHSGeometry.cCylinders = 0;
6653 pDisk->PCHSGeometry.cHeads = 0;
6654 pDisk->PCHSGeometry.cSectors = 0;
6655 }
6656 else
6657 {
6658 /* Make sure the PCHS geometry is properly clipped. */
6659 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
6660 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
6661 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
6662 }
6663
6664 /* Cache LCHS geometry. */
6665 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
6666 &pDisk->LCHSGeometry);
6667 if (RT_FAILURE(rc2))
6668 {
6669 pDisk->LCHSGeometry.cCylinders = 0;
6670 pDisk->LCHSGeometry.cHeads = 0;
6671 pDisk->LCHSGeometry.cSectors = 0;
6672 }
6673 else
6674 {
6675 /* Make sure the LCHS geometry is properly clipped. */
6676 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
6677 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
6678 }
6679
6680 if (pDisk->cImages != 0)
6681 {
6682 /* Switch previous image to read-only mode. */
6683 unsigned uOpenFlagsPrevImg;
6684 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
6685 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
6686 {
6687 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
6688 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
6689 }
6690 }
6691
6692 if (RT_SUCCESS(rc))
6693 {
6694 /* Image successfully opened, make it the last image. */
6695 vdAddImageToList(pDisk, pImage);
6696 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
6697 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
6698 }
6699 else
6700 {
6701 /* Error detected, but image opened. Close image. */
6702 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
6703 AssertRC(rc2);
6704 pImage->pBackendData = NULL;
6705 }
6706 } while (0);
6707
6708 if (RT_UNLIKELY(fLockWrite))
6709 {
6710 rc2 = vdThreadFinishWrite(pDisk);
6711 AssertRC(rc2);
6712 }
6713
6714 if (RT_FAILURE(rc))
6715 {
6716 if (pImage)
6717 {
6718 if (pImage->pszFilename)
6719 RTStrFree(pImage->pszFilename);
6720 RTMemFree(pImage);
6721 }
6722 }
6723
6724 LogFlowFunc(("returns %Rrc\n", rc));
6725 return rc;
6726}
6727
6728/**
6729 * Opens a cache image.
6730 *
6731 * @return VBox status code.
6732 * @param pDisk Pointer to the HDD container which should use the cache image.
6733 * @param pszBackend Name of the cache file backend to use (case insensitive).
6734 * @param pszFilename Name of the cache image to open.
6735 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6736 * @param pVDIfsCache Pointer to the per-cache VD interface list.
6737 */
6738VBOXDDU_DECL(int) VDCacheOpen(PVBOXHDD pDisk, const char *pszBackend,
6739 const char *pszFilename, unsigned uOpenFlags,
6740 PVDINTERFACE pVDIfsCache)
6741{
6742 int rc = VINF_SUCCESS;
6743 int rc2;
6744 bool fLockWrite = false;
6745 PVDCACHE pCache = NULL;
6746
6747 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsCache=%#p\n",
6748 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsCache));
6749
6750 do
6751 {
6752 /* sanity check */
6753 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6754 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6755
6756 /* Check arguments. */
6757 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6758 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6759 rc = VERR_INVALID_PARAMETER);
6760 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6761 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6762 rc = VERR_INVALID_PARAMETER);
6763 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6764 ("uOpenFlags=%#x\n", uOpenFlags),
6765 rc = VERR_INVALID_PARAMETER);
6766
6767 /* Set up image descriptor. */
6768 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
6769 if (!pCache)
6770 {
6771 rc = VERR_NO_MEMORY;
6772 break;
6773 }
6774 pCache->pszFilename = RTStrDup(pszFilename);
6775 if (!pCache->pszFilename)
6776 {
6777 rc = VERR_NO_MEMORY;
6778 break;
6779 }
6780
6781 pCache->VDIo.pDisk = pDisk;
6782 pCache->pVDIfsCache = pVDIfsCache;
6783
6784 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
6785 if (RT_FAILURE(rc))
6786 break;
6787 if (!pCache->Backend)
6788 {
6789 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6790 N_("VD: unknown backend name '%s'"), pszBackend);
6791 break;
6792 }
6793
6794 /* Set up the I/O interface. */
6795 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
6796 if (!pCache->VDIo.pInterfaceIo)
6797 {
6798 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
6799 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6800 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
6801 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
6802 }
6803
6804 /* Set up the internal I/O interface. */
6805 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
6806 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
6807 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6808 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
6809 AssertRC(rc);
6810
6811 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
6812 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6813 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
6814 pDisk->pVDIfsDisk,
6815 pCache->pVDIfsCache,
6816 &pCache->pBackendData);
6817 /* If the open in read-write mode failed, retry in read-only mode. */
6818 if (RT_FAILURE(rc))
6819 {
6820 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6821 && ( rc == VERR_ACCESS_DENIED
6822 || rc == VERR_PERMISSION_DENIED
6823 || rc == VERR_WRITE_PROTECT
6824 || rc == VERR_SHARING_VIOLATION
6825 || rc == VERR_FILE_LOCK_FAILED))
6826 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6827 (uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME)
6828 | VD_OPEN_FLAGS_READONLY,
6829 pDisk->pVDIfsDisk,
6830 pCache->pVDIfsCache,
6831 &pCache->pBackendData);
6832 if (RT_FAILURE(rc))
6833 {
6834 rc = vdError(pDisk, rc, RT_SRC_POS,
6835 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6836 break;
6837 }
6838 }
6839
6840 /* Lock disk for writing, as we modify pDisk information below. */
6841 rc2 = vdThreadStartWrite(pDisk);
6842 AssertRC(rc2);
6843 fLockWrite = true;
6844
6845 /*
6846 * Check that the modification UUID of the cache and last image
6847 * match. If not the image was modified in-between without the cache.
6848 * The cache might contain stale data.
6849 */
6850 RTUUID UuidImage, UuidCache;
6851
6852 rc = pCache->Backend->pfnGetModificationUuid(pCache->pBackendData,
6853 &UuidCache);
6854 if (RT_SUCCESS(rc))
6855 {
6856 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
6857 &UuidImage);
6858 if (RT_SUCCESS(rc))
6859 {
6860 if (RTUuidCompare(&UuidImage, &UuidCache))
6861 rc = VERR_VD_CACHE_NOT_UP_TO_DATE;
6862 }
6863 }
6864
6865 /*
6866 * We assume that the user knows what he is doing if one of the images
6867 * doesn't support the modification uuid.
6868 */
6869 if (rc == VERR_NOT_SUPPORTED)
6870 rc = VINF_SUCCESS;
6871
6872 if (RT_SUCCESS(rc))
6873 {
6874 /* Cache successfully opened, make it the current one. */
6875 if (!pDisk->pCache)
6876 pDisk->pCache = pCache;
6877 else
6878 rc = VERR_VD_CACHE_ALREADY_EXISTS;
6879 }
6880
6881 if (RT_FAILURE(rc))
6882 {
6883 /* Error detected, but image opened. Close image. */
6884 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
6885 AssertRC(rc2);
6886 pCache->pBackendData = NULL;
6887 }
6888 } while (0);
6889
6890 if (RT_UNLIKELY(fLockWrite))
6891 {
6892 rc2 = vdThreadFinishWrite(pDisk);
6893 AssertRC(rc2);
6894 }
6895
6896 if (RT_FAILURE(rc))
6897 {
6898 if (pCache)
6899 {
6900 if (pCache->pszFilename)
6901 RTStrFree(pCache->pszFilename);
6902 RTMemFree(pCache);
6903 }
6904 }
6905
6906 LogFlowFunc(("returns %Rrc\n", rc));
6907 return rc;
6908}
6909
6910VBOXDDU_DECL(int) VDFilterAdd(PVBOXHDD pDisk, const char *pszFilter, uint32_t fFlags,
6911 PVDINTERFACE pVDIfsFilter)
6912{
6913 int rc = VINF_SUCCESS;
6914 int rc2;
6915 bool fLockWrite = false;
6916 PVDFILTER pFilter = NULL;
6917
6918 LogFlowFunc(("pDisk=%#p pszFilter=\"%s\" pVDIfsFilter=%#p\n",
6919 pDisk, pszFilter, pVDIfsFilter));
6920
6921 do
6922 {
6923 /* sanity check */
6924 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6925 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6926
6927 /* Check arguments. */
6928 AssertMsgBreakStmt(VALID_PTR(pszFilter) && *pszFilter,
6929 ("pszFilter=%#p \"%s\"\n", pszFilter, pszFilter),
6930 rc = VERR_INVALID_PARAMETER);
6931
6932 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
6933 ("Invalid flags set (fFlags=%#x)\n", fFlags),
6934 rc = VERR_INVALID_PARAMETER);
6935
6936 /* Set up image descriptor. */
6937 pFilter = (PVDFILTER)RTMemAllocZ(sizeof(VDFILTER));
6938 if (!pFilter)
6939 {
6940 rc = VERR_NO_MEMORY;
6941 break;
6942 }
6943
6944 rc = vdFindFilterBackend(pszFilter, &pFilter->pBackend);
6945 if (RT_FAILURE(rc))
6946 break;
6947 if (!pFilter->pBackend)
6948 {
6949 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6950 N_("VD: unknown filter backend name '%s'"), pszFilter);
6951 break;
6952 }
6953
6954 pFilter->VDIo.pDisk = pDisk;
6955 pFilter->pVDIfsFilter = pVDIfsFilter;
6956
6957 /* Set up the internal I/O interface. */
6958 AssertBreakStmt(!VDIfIoIntGet(pVDIfsFilter), rc = VERR_INVALID_PARAMETER);
6959 vdIfIoIntCallbacksSetup(&pFilter->VDIo.VDIfIoInt);
6960 rc = VDInterfaceAdd(&pFilter->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6961 &pFilter->VDIo, sizeof(VDINTERFACEIOINT), &pFilter->pVDIfsFilter);
6962 AssertRC(rc);
6963
6964 rc = pFilter->pBackend->pfnCreate(pDisk->pVDIfsDisk, fFlags & VD_FILTER_FLAGS_INFO,
6965 pFilter->pVDIfsFilter, &pFilter->pvBackendData);
6966 if (RT_FAILURE(rc))
6967 break;
6968
6969 /* Lock disk for writing, as we modify pDisk information below. */
6970 rc2 = vdThreadStartWrite(pDisk);
6971 AssertRC(rc2);
6972 fLockWrite = true;
6973
6974 /* Add filter to chains. */
6975 if (fFlags & VD_FILTER_FLAGS_WRITE)
6976 {
6977 RTListAppend(&pDisk->ListFilterChainWrite, &pFilter->ListNodeChainWrite);
6978 vdFilterRetain(pFilter);
6979 }
6980
6981 if (fFlags & VD_FILTER_FLAGS_READ)
6982 {
6983 RTListAppend(&pDisk->ListFilterChainRead, &pFilter->ListNodeChainRead);
6984 vdFilterRetain(pFilter);
6985 }
6986 } while (0);
6987
6988 if (RT_UNLIKELY(fLockWrite))
6989 {
6990 rc2 = vdThreadFinishWrite(pDisk);
6991 AssertRC(rc2);
6992 }
6993
6994 if (RT_FAILURE(rc))
6995 {
6996 if (pFilter)
6997 RTMemFree(pFilter);
6998 }
6999
7000 LogFlowFunc(("returns %Rrc\n", rc));
7001 return rc;
7002}
7003
7004/**
7005 * Creates and opens a new base image file.
7006 *
7007 * @returns VBox status code.
7008 * @param pDisk Pointer to HDD container.
7009 * @param pszBackend Name of the image file backend to use.
7010 * @param pszFilename Name of the image file to create.
7011 * @param cbSize Image size in bytes.
7012 * @param uImageFlags Flags specifying special image features.
7013 * @param pszComment Pointer to image comment. NULL is ok.
7014 * @param pPCHSGeometry Pointer to physical disk geometry <= (16383,16,63). Not NULL.
7015 * @param pLCHSGeometry Pointer to logical disk geometry <= (x,255,63). Not NULL.
7016 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7017 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7018 * @param pVDIfsImage Pointer to the per-image VD interface list.
7019 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7020 */
7021VBOXDDU_DECL(int) VDCreateBase(PVBOXHDD pDisk, const char *pszBackend,
7022 const char *pszFilename, uint64_t cbSize,
7023 unsigned uImageFlags, const char *pszComment,
7024 PCVDGEOMETRY pPCHSGeometry,
7025 PCVDGEOMETRY pLCHSGeometry,
7026 PCRTUUID pUuid, unsigned uOpenFlags,
7027 PVDINTERFACE pVDIfsImage,
7028 PVDINTERFACE pVDIfsOperation)
7029{
7030 int rc = VINF_SUCCESS;
7031 int rc2;
7032 bool fLockWrite = false, fLockRead = false;
7033 PVDIMAGE pImage = NULL;
7034 RTUUID uuid;
7035
7036 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" PCHS=%u/%u/%u LCHS=%u/%u/%u Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7037 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment,
7038 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7039 pPCHSGeometry->cSectors, pLCHSGeometry->cCylinders,
7040 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors, pUuid,
7041 uOpenFlags, pVDIfsImage, pVDIfsOperation));
7042
7043 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7044
7045 do
7046 {
7047 /* sanity check */
7048 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7049 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7050
7051 /* Check arguments. */
7052 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7053 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7054 rc = VERR_INVALID_PARAMETER);
7055 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7056 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7057 rc = VERR_INVALID_PARAMETER);
7058 AssertMsgBreakStmt(cbSize,
7059 ("cbSize=%llu\n", cbSize),
7060 rc = VERR_INVALID_PARAMETER);
7061 if (cbSize % 512)
7062 {
7063 rc = vdError(pDisk, VERR_VD_INVALID_SIZE, RT_SRC_POS,
7064 N_("VD: The given disk size %llu is not aligned on a sector boundary (512 bytes)"), cbSize);
7065 break;
7066 }
7067 AssertMsgBreakStmt( ((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0)
7068 || ((uImageFlags & (VD_IMAGE_FLAGS_FIXED | VD_IMAGE_FLAGS_DIFF)) != VD_IMAGE_FLAGS_FIXED),
7069 ("uImageFlags=%#x\n", uImageFlags),
7070 rc = VERR_INVALID_PARAMETER);
7071 /* The PCHS geometry fields may be 0 to leave it for later. */
7072 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
7073 && pPCHSGeometry->cHeads <= 16
7074 && pPCHSGeometry->cSectors <= 63,
7075 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
7076 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7077 pPCHSGeometry->cSectors),
7078 rc = VERR_INVALID_PARAMETER);
7079 /* The LCHS geometry fields may be 0 to leave it to later autodetection. */
7080 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
7081 && pLCHSGeometry->cHeads <= 255
7082 && pLCHSGeometry->cSectors <= 63,
7083 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
7084 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
7085 pLCHSGeometry->cSectors),
7086 rc = VERR_INVALID_PARAMETER);
7087 /* The UUID may be NULL. */
7088 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7089 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7090 rc = VERR_INVALID_PARAMETER);
7091 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7092 ("uOpenFlags=%#x\n", uOpenFlags),
7093 rc = VERR_INVALID_PARAMETER);
7094
7095 /* Check state. Needs a temporary read lock. Holding the write lock
7096 * all the time would be blocking other activities for too long. */
7097 rc2 = vdThreadStartRead(pDisk);
7098 AssertRC(rc2);
7099 fLockRead = true;
7100 AssertMsgBreakStmt(pDisk->cImages == 0,
7101 ("Create base image cannot be done with other images open\n"),
7102 rc = VERR_VD_INVALID_STATE);
7103 rc2 = vdThreadFinishRead(pDisk);
7104 AssertRC(rc2);
7105 fLockRead = false;
7106
7107 /* Set up image descriptor. */
7108 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7109 if (!pImage)
7110 {
7111 rc = VERR_NO_MEMORY;
7112 break;
7113 }
7114 pImage->pszFilename = RTStrDup(pszFilename);
7115 if (!pImage->pszFilename)
7116 {
7117 rc = VERR_NO_MEMORY;
7118 break;
7119 }
7120 pImage->VDIo.pDisk = pDisk;
7121 pImage->pVDIfsImage = pVDIfsImage;
7122
7123 /* Set up the I/O interface. */
7124 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7125 if (!pImage->VDIo.pInterfaceIo)
7126 {
7127 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7128 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7129 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7130 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7131 }
7132
7133 /* Set up the internal I/O interface. */
7134 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7135 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7136 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7137 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7138 AssertRC(rc);
7139
7140 rc = vdFindBackend(pszBackend, &pImage->Backend);
7141 if (RT_FAILURE(rc))
7142 break;
7143 if (!pImage->Backend)
7144 {
7145 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7146 N_("VD: unknown backend name '%s'"), pszBackend);
7147 break;
7148 }
7149 if (!(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7150 | VD_CAP_CREATE_DYNAMIC)))
7151 {
7152 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7153 N_("VD: backend '%s' cannot create base images"), pszBackend);
7154 break;
7155 }
7156 if ( ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7157 && !(pImage->Backend->uBackendCaps & VD_CAP_CREATE_SPLIT_2G))
7158 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7159 && RTStrICmp(pszBackend, "VMDK")))
7160 {
7161 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7162 N_("VD: backend '%s' does not support the selected image variant"), pszBackend);
7163 break;
7164 }
7165
7166 /* Create UUID if the caller didn't specify one. */
7167 if (!pUuid)
7168 {
7169 rc = RTUuidCreate(&uuid);
7170 if (RT_FAILURE(rc))
7171 {
7172 rc = vdError(pDisk, rc, RT_SRC_POS,
7173 N_("VD: cannot generate UUID for image '%s'"),
7174 pszFilename);
7175 break;
7176 }
7177 pUuid = &uuid;
7178 }
7179
7180 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7181 uImageFlags &= ~VD_IMAGE_FLAGS_DIFF;
7182 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7183 rc = pImage->Backend->pfnCreate(pImage->pszFilename, cbSize,
7184 uImageFlags, pszComment, pPCHSGeometry,
7185 pLCHSGeometry, pUuid,
7186 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7187 0, 99,
7188 pDisk->pVDIfsDisk,
7189 pImage->pVDIfsImage,
7190 pVDIfsOperation,
7191 pDisk->enmType,
7192 &pImage->pBackendData);
7193
7194 if (RT_SUCCESS(rc))
7195 {
7196 pImage->VDIo.pBackendData = pImage->pBackendData;
7197 pImage->uImageFlags = uImageFlags;
7198
7199 /* Force sane optimization settings. It's not worth avoiding writes
7200 * to fixed size images. The overhead would have almost no payback. */
7201 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
7202 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
7203
7204 /* Lock disk for writing, as we modify pDisk information below. */
7205 rc2 = vdThreadStartWrite(pDisk);
7206 AssertRC(rc2);
7207 fLockWrite = true;
7208
7209 /** @todo optionally check UUIDs */
7210
7211 /* Re-check state, as the lock wasn't held and another image
7212 * creation call could have been done by another thread. */
7213 AssertMsgStmt(pDisk->cImages == 0,
7214 ("Create base image cannot be done with other images open\n"),
7215 rc = VERR_VD_INVALID_STATE);
7216 }
7217
7218 if (RT_SUCCESS(rc))
7219 {
7220 /* Cache disk information. */
7221 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
7222
7223 /* Cache PCHS geometry. */
7224 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
7225 &pDisk->PCHSGeometry);
7226 if (RT_FAILURE(rc2))
7227 {
7228 pDisk->PCHSGeometry.cCylinders = 0;
7229 pDisk->PCHSGeometry.cHeads = 0;
7230 pDisk->PCHSGeometry.cSectors = 0;
7231 }
7232 else
7233 {
7234 /* Make sure the CHS geometry is properly clipped. */
7235 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
7236 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
7237 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
7238 }
7239
7240 /* Cache LCHS geometry. */
7241 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
7242 &pDisk->LCHSGeometry);
7243 if (RT_FAILURE(rc2))
7244 {
7245 pDisk->LCHSGeometry.cCylinders = 0;
7246 pDisk->LCHSGeometry.cHeads = 0;
7247 pDisk->LCHSGeometry.cSectors = 0;
7248 }
7249 else
7250 {
7251 /* Make sure the CHS geometry is properly clipped. */
7252 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
7253 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
7254 }
7255
7256 /* Image successfully opened, make it the last image. */
7257 vdAddImageToList(pDisk, pImage);
7258 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7259 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7260 }
7261 else
7262 {
7263 /* Error detected, image may or may not be opened. Close and delete
7264 * image if it was opened. */
7265 if (pImage->pBackendData)
7266 {
7267 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7268 AssertRC(rc2);
7269 pImage->pBackendData = NULL;
7270 }
7271 }
7272 } while (0);
7273
7274 if (RT_UNLIKELY(fLockWrite))
7275 {
7276 rc2 = vdThreadFinishWrite(pDisk);
7277 AssertRC(rc2);
7278 }
7279 else if (RT_UNLIKELY(fLockRead))
7280 {
7281 rc2 = vdThreadFinishRead(pDisk);
7282 AssertRC(rc2);
7283 }
7284
7285 if (RT_FAILURE(rc))
7286 {
7287 if (pImage)
7288 {
7289 if (pImage->pszFilename)
7290 RTStrFree(pImage->pszFilename);
7291 RTMemFree(pImage);
7292 }
7293 }
7294
7295 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7296 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7297
7298 LogFlowFunc(("returns %Rrc\n", rc));
7299 return rc;
7300}
7301
7302/**
7303 * Creates and opens a new differencing image file in HDD container.
7304 * See comments for VDOpen function about differencing images.
7305 *
7306 * @returns VBox status code.
7307 * @param pDisk Pointer to HDD container.
7308 * @param pszBackend Name of the image file backend to use.
7309 * @param pszFilename Name of the differencing image file to create.
7310 * @param uImageFlags Flags specifying special image features.
7311 * @param pszComment Pointer to image comment. NULL is ok.
7312 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7313 * @param pParentUuid New parent UUID of the image. If NULL, the UUID is queried automatically.
7314 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7315 * @param pVDIfsImage Pointer to the per-image VD interface list.
7316 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7317 */
7318VBOXDDU_DECL(int) VDCreateDiff(PVBOXHDD pDisk, const char *pszBackend,
7319 const char *pszFilename, unsigned uImageFlags,
7320 const char *pszComment, PCRTUUID pUuid,
7321 PCRTUUID pParentUuid, unsigned uOpenFlags,
7322 PVDINTERFACE pVDIfsImage,
7323 PVDINTERFACE pVDIfsOperation)
7324{
7325 int rc = VINF_SUCCESS;
7326 int rc2;
7327 bool fLockWrite = false, fLockRead = false;
7328 PVDIMAGE pImage = NULL;
7329 RTUUID uuid;
7330
7331 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7332 pDisk, pszBackend, pszFilename, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsImage, pVDIfsOperation));
7333
7334 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7335
7336 do
7337 {
7338 /* sanity check */
7339 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7340 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7341
7342 /* Check arguments. */
7343 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7344 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7345 rc = VERR_INVALID_PARAMETER);
7346 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7347 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7348 rc = VERR_INVALID_PARAMETER);
7349 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7350 ("uImageFlags=%#x\n", uImageFlags),
7351 rc = VERR_INVALID_PARAMETER);
7352 /* The UUID may be NULL. */
7353 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7354 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7355 rc = VERR_INVALID_PARAMETER);
7356 /* The parent UUID may be NULL. */
7357 AssertMsgBreakStmt(pParentUuid == NULL || VALID_PTR(pParentUuid),
7358 ("pParentUuid=%#p ParentUUID=%RTuuid\n", pParentUuid, pParentUuid),
7359 rc = VERR_INVALID_PARAMETER);
7360 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7361 ("uOpenFlags=%#x\n", uOpenFlags),
7362 rc = VERR_INVALID_PARAMETER);
7363
7364 /* Check state. Needs a temporary read lock. Holding the write lock
7365 * all the time would be blocking other activities for too long. */
7366 rc2 = vdThreadStartRead(pDisk);
7367 AssertRC(rc2);
7368 fLockRead = true;
7369 AssertMsgBreakStmt(pDisk->cImages != 0,
7370 ("Create diff image cannot be done without other images open\n"),
7371 rc = VERR_VD_INVALID_STATE);
7372 rc2 = vdThreadFinishRead(pDisk);
7373 AssertRC(rc2);
7374 fLockRead = false;
7375
7376 /*
7377 * Destroy the current discard state first which might still have pending blocks
7378 * for the currently opened image which will be switched to readonly mode.
7379 */
7380 /* Lock disk for writing, as we modify pDisk information below. */
7381 rc2 = vdThreadStartWrite(pDisk);
7382 AssertRC(rc2);
7383 fLockWrite = true;
7384 rc = vdDiscardStateDestroy(pDisk);
7385 if (RT_FAILURE(rc))
7386 break;
7387 rc2 = vdThreadFinishWrite(pDisk);
7388 AssertRC(rc2);
7389 fLockWrite = false;
7390
7391 /* Set up image descriptor. */
7392 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7393 if (!pImage)
7394 {
7395 rc = VERR_NO_MEMORY;
7396 break;
7397 }
7398 pImage->pszFilename = RTStrDup(pszFilename);
7399 if (!pImage->pszFilename)
7400 {
7401 rc = VERR_NO_MEMORY;
7402 break;
7403 }
7404
7405 rc = vdFindBackend(pszBackend, &pImage->Backend);
7406 if (RT_FAILURE(rc))
7407 break;
7408 if (!pImage->Backend)
7409 {
7410 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7411 N_("VD: unknown backend name '%s'"), pszBackend);
7412 break;
7413 }
7414 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DIFF)
7415 || !(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7416 | VD_CAP_CREATE_DYNAMIC)))
7417 {
7418 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7419 N_("VD: backend '%s' cannot create diff images"), pszBackend);
7420 break;
7421 }
7422
7423 pImage->VDIo.pDisk = pDisk;
7424 pImage->pVDIfsImage = pVDIfsImage;
7425
7426 /* Set up the I/O interface. */
7427 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7428 if (!pImage->VDIo.pInterfaceIo)
7429 {
7430 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7431 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7432 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7433 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7434 }
7435
7436 /* Set up the internal I/O interface. */
7437 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7438 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7439 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7440 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7441 AssertRC(rc);
7442
7443 /* Create UUID if the caller didn't specify one. */
7444 if (!pUuid)
7445 {
7446 rc = RTUuidCreate(&uuid);
7447 if (RT_FAILURE(rc))
7448 {
7449 rc = vdError(pDisk, rc, RT_SRC_POS,
7450 N_("VD: cannot generate UUID for image '%s'"),
7451 pszFilename);
7452 break;
7453 }
7454 pUuid = &uuid;
7455 }
7456
7457 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7458 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7459 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
7460 rc = pImage->Backend->pfnCreate(pImage->pszFilename, pDisk->cbSize,
7461 uImageFlags | VD_IMAGE_FLAGS_DIFF,
7462 pszComment, &pDisk->PCHSGeometry,
7463 &pDisk->LCHSGeometry, pUuid,
7464 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7465 0, 99,
7466 pDisk->pVDIfsDisk,
7467 pImage->pVDIfsImage,
7468 pVDIfsOperation,
7469 pDisk->enmType,
7470 &pImage->pBackendData);
7471
7472 if (RT_SUCCESS(rc))
7473 {
7474 pImage->VDIo.pBackendData = pImage->pBackendData;
7475 pImage->uImageFlags = uImageFlags;
7476
7477 /* Lock disk for writing, as we modify pDisk information below. */
7478 rc2 = vdThreadStartWrite(pDisk);
7479 AssertRC(rc2);
7480 fLockWrite = true;
7481
7482 /* Switch previous image to read-only mode. */
7483 unsigned uOpenFlagsPrevImg;
7484 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
7485 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
7486 {
7487 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
7488 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
7489 }
7490
7491 /** @todo optionally check UUIDs */
7492
7493 /* Re-check state, as the lock wasn't held and another image
7494 * creation call could have been done by another thread. */
7495 AssertMsgStmt(pDisk->cImages != 0,
7496 ("Create diff image cannot be done without other images open\n"),
7497 rc = VERR_VD_INVALID_STATE);
7498 }
7499
7500 if (RT_SUCCESS(rc))
7501 {
7502 RTUUID Uuid;
7503 RTTIMESPEC ts;
7504
7505 if (pParentUuid && !RTUuidIsNull(pParentUuid))
7506 {
7507 Uuid = *pParentUuid;
7508 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7509 }
7510 else
7511 {
7512 rc2 = pDisk->pLast->Backend->pfnGetUuid(pDisk->pLast->pBackendData,
7513 &Uuid);
7514 if (RT_SUCCESS(rc2))
7515 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7516 }
7517 rc2 = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7518 &Uuid);
7519 if (RT_SUCCESS(rc2))
7520 pImage->Backend->pfnSetParentModificationUuid(pImage->pBackendData,
7521 &Uuid);
7522 if (pDisk->pLast->Backend->pfnGetTimestamp)
7523 rc2 = pDisk->pLast->Backend->pfnGetTimestamp(pDisk->pLast->pBackendData,
7524 &ts);
7525 else
7526 rc2 = VERR_NOT_IMPLEMENTED;
7527 if (RT_SUCCESS(rc2) && pImage->Backend->pfnSetParentTimestamp)
7528 pImage->Backend->pfnSetParentTimestamp(pImage->pBackendData, &ts);
7529
7530 if (pImage->Backend->pfnSetParentFilename)
7531 rc2 = pImage->Backend->pfnSetParentFilename(pImage->pBackendData, pDisk->pLast->pszFilename);
7532 }
7533
7534 if (RT_SUCCESS(rc))
7535 {
7536 /* Image successfully opened, make it the last image. */
7537 vdAddImageToList(pDisk, pImage);
7538 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7539 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7540 }
7541 else
7542 {
7543 /* Error detected, but image opened. Close and delete image. */
7544 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7545 AssertRC(rc2);
7546 pImage->pBackendData = NULL;
7547 }
7548 } while (0);
7549
7550 if (RT_UNLIKELY(fLockWrite))
7551 {
7552 rc2 = vdThreadFinishWrite(pDisk);
7553 AssertRC(rc2);
7554 }
7555 else if (RT_UNLIKELY(fLockRead))
7556 {
7557 rc2 = vdThreadFinishRead(pDisk);
7558 AssertRC(rc2);
7559 }
7560
7561 if (RT_FAILURE(rc))
7562 {
7563 if (pImage)
7564 {
7565 if (pImage->pszFilename)
7566 RTStrFree(pImage->pszFilename);
7567 RTMemFree(pImage);
7568 }
7569 }
7570
7571 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7572 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7573
7574 LogFlowFunc(("returns %Rrc\n", rc));
7575 return rc;
7576}
7577
7578
7579/**
7580 * Creates and opens new cache image file in HDD container.
7581 *
7582 * @return VBox status code.
7583 * @param pDisk Name of the cache file backend to use (case insensitive).
7584 * @param pszFilename Name of the differencing cache file to create.
7585 * @param cbSize Maximum size of the cache.
7586 * @param uImageFlags Flags specifying special cache features.
7587 * @param pszComment Pointer to image comment. NULL is ok.
7588 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7589 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7590 * @param pVDIfsCache Pointer to the per-cache VD interface list.
7591 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7592 */
7593VBOXDDU_DECL(int) VDCreateCache(PVBOXHDD pDisk, const char *pszBackend,
7594 const char *pszFilename, uint64_t cbSize,
7595 unsigned uImageFlags, const char *pszComment,
7596 PCRTUUID pUuid, unsigned uOpenFlags,
7597 PVDINTERFACE pVDIfsCache, PVDINTERFACE pVDIfsOperation)
7598{
7599 int rc = VINF_SUCCESS;
7600 int rc2;
7601 bool fLockWrite = false, fLockRead = false;
7602 PVDCACHE pCache = NULL;
7603 RTUUID uuid;
7604
7605 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7606 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsCache, pVDIfsOperation));
7607
7608 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7609
7610 do
7611 {
7612 /* sanity check */
7613 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7614 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7615
7616 /* Check arguments. */
7617 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7618 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7619 rc = VERR_INVALID_PARAMETER);
7620 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7621 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7622 rc = VERR_INVALID_PARAMETER);
7623 AssertMsgBreakStmt(cbSize,
7624 ("cbSize=%llu\n", cbSize),
7625 rc = VERR_INVALID_PARAMETER);
7626 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7627 ("uImageFlags=%#x\n", uImageFlags),
7628 rc = VERR_INVALID_PARAMETER);
7629 /* The UUID may be NULL. */
7630 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7631 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7632 rc = VERR_INVALID_PARAMETER);
7633 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7634 ("uOpenFlags=%#x\n", uOpenFlags),
7635 rc = VERR_INVALID_PARAMETER);
7636
7637 /* Check state. Needs a temporary read lock. Holding the write lock
7638 * all the time would be blocking other activities for too long. */
7639 rc2 = vdThreadStartRead(pDisk);
7640 AssertRC(rc2);
7641 fLockRead = true;
7642 AssertMsgBreakStmt(!pDisk->pCache,
7643 ("Create cache image cannot be done with a cache already attached\n"),
7644 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7645 rc2 = vdThreadFinishRead(pDisk);
7646 AssertRC(rc2);
7647 fLockRead = false;
7648
7649 /* Set up image descriptor. */
7650 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
7651 if (!pCache)
7652 {
7653 rc = VERR_NO_MEMORY;
7654 break;
7655 }
7656 pCache->pszFilename = RTStrDup(pszFilename);
7657 if (!pCache->pszFilename)
7658 {
7659 rc = VERR_NO_MEMORY;
7660 break;
7661 }
7662
7663 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
7664 if (RT_FAILURE(rc))
7665 break;
7666 if (!pCache->Backend)
7667 {
7668 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7669 N_("VD: unknown backend name '%s'"), pszBackend);
7670 break;
7671 }
7672
7673 pCache->VDIo.pDisk = pDisk;
7674 pCache->pVDIfsCache = pVDIfsCache;
7675
7676 /* Set up the I/O interface. */
7677 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
7678 if (!pCache->VDIo.pInterfaceIo)
7679 {
7680 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
7681 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7682 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
7683 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
7684 }
7685
7686 /* Set up the internal I/O interface. */
7687 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
7688 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
7689 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7690 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
7691 AssertRC(rc);
7692
7693 /* Create UUID if the caller didn't specify one. */
7694 if (!pUuid)
7695 {
7696 rc = RTUuidCreate(&uuid);
7697 if (RT_FAILURE(rc))
7698 {
7699 rc = vdError(pDisk, rc, RT_SRC_POS,
7700 N_("VD: cannot generate UUID for image '%s'"),
7701 pszFilename);
7702 break;
7703 }
7704 pUuid = &uuid;
7705 }
7706
7707 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7708 pCache->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7709 rc = pCache->Backend->pfnCreate(pCache->pszFilename, cbSize,
7710 uImageFlags,
7711 pszComment, pUuid,
7712 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7713 0, 99,
7714 pDisk->pVDIfsDisk,
7715 pCache->pVDIfsCache,
7716 pVDIfsOperation,
7717 &pCache->pBackendData);
7718
7719 if (RT_SUCCESS(rc))
7720 {
7721 /* Lock disk for writing, as we modify pDisk information below. */
7722 rc2 = vdThreadStartWrite(pDisk);
7723 AssertRC(rc2);
7724 fLockWrite = true;
7725
7726 pCache->VDIo.pBackendData = pCache->pBackendData;
7727
7728 /* Re-check state, as the lock wasn't held and another image
7729 * creation call could have been done by another thread. */
7730 AssertMsgStmt(!pDisk->pCache,
7731 ("Create cache image cannot be done with another cache open\n"),
7732 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7733 }
7734
7735 if ( RT_SUCCESS(rc)
7736 && pDisk->pLast)
7737 {
7738 RTUUID UuidModification;
7739
7740 /* Set same modification Uuid as the last image. */
7741 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7742 &UuidModification);
7743 if (RT_SUCCESS(rc))
7744 {
7745 rc = pCache->Backend->pfnSetModificationUuid(pCache->pBackendData,
7746 &UuidModification);
7747 }
7748
7749 if (rc == VERR_NOT_SUPPORTED)
7750 rc = VINF_SUCCESS;
7751 }
7752
7753 if (RT_SUCCESS(rc))
7754 {
7755 /* Cache successfully created. */
7756 pDisk->pCache = pCache;
7757 }
7758 else
7759 {
7760 /* Error detected, but image opened. Close and delete image. */
7761 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, true);
7762 AssertRC(rc2);
7763 pCache->pBackendData = NULL;
7764 }
7765 } while (0);
7766
7767 if (RT_UNLIKELY(fLockWrite))
7768 {
7769 rc2 = vdThreadFinishWrite(pDisk);
7770 AssertRC(rc2);
7771 }
7772 else if (RT_UNLIKELY(fLockRead))
7773 {
7774 rc2 = vdThreadFinishRead(pDisk);
7775 AssertRC(rc2);
7776 }
7777
7778 if (RT_FAILURE(rc))
7779 {
7780 if (pCache)
7781 {
7782 if (pCache->pszFilename)
7783 RTStrFree(pCache->pszFilename);
7784 RTMemFree(pCache);
7785 }
7786 }
7787
7788 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7789 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7790
7791 LogFlowFunc(("returns %Rrc\n", rc));
7792 return rc;
7793}
7794
7795/**
7796 * Merges two images (not necessarily with direct parent/child relationship).
7797 * As a side effect the source image and potentially the other images which
7798 * are also merged to the destination are deleted from both the disk and the
7799 * images in the HDD container.
7800 *
7801 * @returns VBox status code.
7802 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
7803 * @param pDisk Pointer to HDD container.
7804 * @param nImageFrom Name of the image file to merge from.
7805 * @param nImageTo Name of the image file to merge to.
7806 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7807 */
7808VBOXDDU_DECL(int) VDMerge(PVBOXHDD pDisk, unsigned nImageFrom,
7809 unsigned nImageTo, PVDINTERFACE pVDIfsOperation)
7810{
7811 int rc = VINF_SUCCESS;
7812 int rc2;
7813 bool fLockWrite = false, fLockRead = false;
7814 void *pvBuf = NULL;
7815
7816 LogFlowFunc(("pDisk=%#p nImageFrom=%u nImageTo=%u pVDIfsOperation=%#p\n",
7817 pDisk, nImageFrom, nImageTo, pVDIfsOperation));
7818
7819 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7820
7821 do
7822 {
7823 /* sanity check */
7824 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7825 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7826
7827 /* For simplicity reasons lock for writing as the image reopen below
7828 * might need it. After all the reopen is usually needed. */
7829 rc2 = vdThreadStartWrite(pDisk);
7830 AssertRC(rc2);
7831 fLockWrite = true;
7832 PVDIMAGE pImageFrom = vdGetImageByNumber(pDisk, nImageFrom);
7833 PVDIMAGE pImageTo = vdGetImageByNumber(pDisk, nImageTo);
7834 if (!pImageFrom || !pImageTo)
7835 {
7836 rc = VERR_VD_IMAGE_NOT_FOUND;
7837 break;
7838 }
7839 AssertBreakStmt(pImageFrom != pImageTo, rc = VERR_INVALID_PARAMETER);
7840
7841 /* Make sure destination image is writable. */
7842 unsigned uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
7843 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7844 {
7845 /*
7846 * Clear skip consistency checks because the image is made writable now and
7847 * skipping consistency checks is only possible for readonly images.
7848 */
7849 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
7850 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
7851 uOpenFlags);
7852 if (RT_FAILURE(rc))
7853 break;
7854 }
7855
7856 /* Get size of destination image. */
7857 uint64_t cbSize = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
7858 rc2 = vdThreadFinishWrite(pDisk);
7859 AssertRC(rc2);
7860 fLockWrite = false;
7861
7862 /* Allocate tmp buffer. */
7863 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
7864 if (!pvBuf)
7865 {
7866 rc = VERR_NO_MEMORY;
7867 break;
7868 }
7869
7870 /* Merging is done directly on the images itself. This potentially
7871 * causes trouble if the disk is full in the middle of operation. */
7872 if (nImageFrom < nImageTo)
7873 {
7874 /* Merge parent state into child. This means writing all not
7875 * allocated blocks in the destination image which are allocated in
7876 * the images to be merged. */
7877 uint64_t uOffset = 0;
7878 uint64_t cbRemaining = cbSize;
7879 do
7880 {
7881 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7882 RTSGSEG SegmentBuf;
7883 RTSGBUF SgBuf;
7884 VDIOCTX IoCtx;
7885
7886 SegmentBuf.pvSeg = pvBuf;
7887 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7888 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7889 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7890 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7891
7892 /* Need to hold the write lock during a read-write operation. */
7893 rc2 = vdThreadStartWrite(pDisk);
7894 AssertRC(rc2);
7895 fLockWrite = true;
7896
7897 rc = pImageTo->Backend->pfnRead(pImageTo->pBackendData,
7898 uOffset, cbThisRead,
7899 &IoCtx, &cbThisRead);
7900 if (rc == VERR_VD_BLOCK_FREE)
7901 {
7902 /* Search for image with allocated block. Do not attempt to
7903 * read more than the previous reads marked as valid.
7904 * Otherwise this would return stale data when different
7905 * block sizes are used for the images. */
7906 for (PVDIMAGE pCurrImage = pImageTo->pPrev;
7907 pCurrImage != NULL && pCurrImage != pImageFrom->pPrev && rc == VERR_VD_BLOCK_FREE;
7908 pCurrImage = pCurrImage->pPrev)
7909 {
7910 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7911 uOffset, cbThisRead,
7912 &IoCtx, &cbThisRead);
7913 }
7914
7915 if (rc != VERR_VD_BLOCK_FREE)
7916 {
7917 if (RT_FAILURE(rc))
7918 break;
7919 /* Updating the cache is required because this might be a live merge. */
7920 rc = vdWriteHelperEx(pDisk, pImageTo, pImageFrom->pPrev,
7921 uOffset, pvBuf, cbThisRead,
7922 VDIOCTX_FLAGS_READ_UPDATE_CACHE, 0);
7923 if (RT_FAILURE(rc))
7924 break;
7925 }
7926 else
7927 rc = VINF_SUCCESS;
7928 }
7929 else if (RT_FAILURE(rc))
7930 break;
7931
7932 rc2 = vdThreadFinishWrite(pDisk);
7933 AssertRC(rc2);
7934 fLockWrite = false;
7935
7936 uOffset += cbThisRead;
7937 cbRemaining -= cbThisRead;
7938
7939 if (pIfProgress && pIfProgress->pfnProgress)
7940 {
7941 /** @todo r=klaus: this can update the progress to the same
7942 * percentage over and over again if the image format makes
7943 * relatively small increments. */
7944 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
7945 uOffset * 99 / cbSize);
7946 if (RT_FAILURE(rc))
7947 break;
7948 }
7949 } while (uOffset < cbSize);
7950 }
7951 else
7952 {
7953 /*
7954 * We may need to update the parent uuid of the child coming after
7955 * the last image to be merged. We have to reopen it read/write.
7956 *
7957 * This is done before we do the actual merge to prevent an
7958 * inconsistent chain if the mode change fails for some reason.
7959 */
7960 if (pImageFrom->pNext)
7961 {
7962 PVDIMAGE pImageChild = pImageFrom->pNext;
7963
7964 /* Take the write lock. */
7965 rc2 = vdThreadStartWrite(pDisk);
7966 AssertRC(rc2);
7967 fLockWrite = true;
7968
7969 /* We need to open the image in read/write mode. */
7970 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
7971
7972 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7973 {
7974 uOpenFlags &= ~VD_OPEN_FLAGS_READONLY;
7975 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
7976 uOpenFlags);
7977 if (RT_FAILURE(rc))
7978 break;
7979 }
7980
7981 rc2 = vdThreadFinishWrite(pDisk);
7982 AssertRC(rc2);
7983 fLockWrite = false;
7984 }
7985
7986 /* If the merge is from the last image we have to relay all writes
7987 * to the merge destination as well, so that concurrent writes
7988 * (in case of a live merge) are handled correctly. */
7989 if (!pImageFrom->pNext)
7990 {
7991 /* Take the write lock. */
7992 rc2 = vdThreadStartWrite(pDisk);
7993 AssertRC(rc2);
7994 fLockWrite = true;
7995
7996 pDisk->pImageRelay = pImageTo;
7997
7998 rc2 = vdThreadFinishWrite(pDisk);
7999 AssertRC(rc2);
8000 fLockWrite = false;
8001 }
8002
8003 /* Merge child state into parent. This means writing all blocks
8004 * which are allocated in the image up to the source image to the
8005 * destination image. */
8006 uint64_t uOffset = 0;
8007 uint64_t cbRemaining = cbSize;
8008 do
8009 {
8010 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8011 RTSGSEG SegmentBuf;
8012 RTSGBUF SgBuf;
8013 VDIOCTX IoCtx;
8014
8015 rc = VERR_VD_BLOCK_FREE;
8016
8017 SegmentBuf.pvSeg = pvBuf;
8018 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8019 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8020 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8021 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8022
8023 /* Need to hold the write lock during a read-write operation. */
8024 rc2 = vdThreadStartWrite(pDisk);
8025 AssertRC(rc2);
8026 fLockWrite = true;
8027
8028 /* Search for image with allocated block. Do not attempt to
8029 * read more than the previous reads marked as valid. Otherwise
8030 * this would return stale data when different block sizes are
8031 * used for the images. */
8032 for (PVDIMAGE pCurrImage = pImageFrom;
8033 pCurrImage != NULL && pCurrImage != pImageTo && rc == VERR_VD_BLOCK_FREE;
8034 pCurrImage = pCurrImage->pPrev)
8035 {
8036 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
8037 uOffset, cbThisRead,
8038 &IoCtx, &cbThisRead);
8039 }
8040
8041 if (rc != VERR_VD_BLOCK_FREE)
8042 {
8043 if (RT_FAILURE(rc))
8044 break;
8045 rc = vdWriteHelper(pDisk, pImageTo, uOffset, pvBuf,
8046 cbThisRead, VDIOCTX_FLAGS_READ_UPDATE_CACHE);
8047 if (RT_FAILURE(rc))
8048 break;
8049 }
8050 else
8051 rc = VINF_SUCCESS;
8052
8053 rc2 = vdThreadFinishWrite(pDisk);
8054 AssertRC(rc2);
8055 fLockWrite = false;
8056
8057 uOffset += cbThisRead;
8058 cbRemaining -= cbThisRead;
8059
8060 if (pIfProgress && pIfProgress->pfnProgress)
8061 {
8062 /** @todo r=klaus: this can update the progress to the same
8063 * percentage over and over again if the image format makes
8064 * relatively small increments. */
8065 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8066 uOffset * 99 / cbSize);
8067 if (RT_FAILURE(rc))
8068 break;
8069 }
8070 } while (uOffset < cbSize);
8071
8072 /* In case we set up a "write proxy" image above we must clear
8073 * this again now to prevent stray writes. Failure or not. */
8074 if (!pImageFrom->pNext)
8075 {
8076 /* Take the write lock. */
8077 rc2 = vdThreadStartWrite(pDisk);
8078 AssertRC(rc2);
8079 fLockWrite = true;
8080
8081 pDisk->pImageRelay = NULL;
8082
8083 rc2 = vdThreadFinishWrite(pDisk);
8084 AssertRC(rc2);
8085 fLockWrite = false;
8086 }
8087 }
8088
8089 /*
8090 * Leave in case of an error to avoid corrupted data in the image chain
8091 * (includes cancelling the operation by the user).
8092 */
8093 if (RT_FAILURE(rc))
8094 break;
8095
8096 /* Need to hold the write lock while finishing the merge. */
8097 rc2 = vdThreadStartWrite(pDisk);
8098 AssertRC(rc2);
8099 fLockWrite = true;
8100
8101 /* Update parent UUID so that image chain is consistent.
8102 * The two attempts work around the problem that some backends
8103 * (e.g. iSCSI) do not support UUIDs, so we exploit the fact that
8104 * so far there can only be one such image in the chain. */
8105 /** @todo needs a better long-term solution, passing the UUID
8106 * knowledge from the caller or some such */
8107 RTUUID Uuid;
8108 PVDIMAGE pImageChild = NULL;
8109 if (nImageFrom < nImageTo)
8110 {
8111 if (pImageFrom->pPrev)
8112 {
8113 /* plan A: ask the parent itself for its UUID */
8114 rc = pImageFrom->pPrev->Backend->pfnGetUuid(pImageFrom->pPrev->pBackendData,
8115 &Uuid);
8116 if (RT_FAILURE(rc))
8117 {
8118 /* plan B: ask the child of the parent for parent UUID */
8119 rc = pImageFrom->Backend->pfnGetParentUuid(pImageFrom->pBackendData,
8120 &Uuid);
8121 }
8122 AssertRC(rc);
8123 }
8124 else
8125 RTUuidClear(&Uuid);
8126 rc = pImageTo->Backend->pfnSetParentUuid(pImageTo->pBackendData,
8127 &Uuid);
8128 AssertRC(rc);
8129 }
8130 else
8131 {
8132 /* Update the parent uuid of the child of the last merged image. */
8133 if (pImageFrom->pNext)
8134 {
8135 /* plan A: ask the parent itself for its UUID */
8136 rc = pImageTo->Backend->pfnGetUuid(pImageTo->pBackendData,
8137 &Uuid);
8138 if (RT_FAILURE(rc))
8139 {
8140 /* plan B: ask the child of the parent for parent UUID */
8141 rc = pImageTo->pNext->Backend->pfnGetParentUuid(pImageTo->pNext->pBackendData,
8142 &Uuid);
8143 }
8144 AssertRC(rc);
8145
8146 rc = pImageFrom->Backend->pfnSetParentUuid(pImageFrom->pNext->pBackendData,
8147 &Uuid);
8148 AssertRC(rc);
8149
8150 pImageChild = pImageFrom->pNext;
8151 }
8152 }
8153
8154 /* Delete the no longer needed images. */
8155 PVDIMAGE pImg = pImageFrom, pTmp;
8156 while (pImg != pImageTo)
8157 {
8158 if (nImageFrom < nImageTo)
8159 pTmp = pImg->pNext;
8160 else
8161 pTmp = pImg->pPrev;
8162 vdRemoveImageFromList(pDisk, pImg);
8163 pImg->Backend->pfnClose(pImg->pBackendData, true);
8164 RTMemFree(pImg->pszFilename);
8165 RTMemFree(pImg);
8166 pImg = pTmp;
8167 }
8168
8169 /* Make sure destination image is back to read only if necessary. */
8170 if (pImageTo != pDisk->pLast)
8171 {
8172 uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
8173 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8174 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8175 uOpenFlags);
8176 if (RT_FAILURE(rc))
8177 break;
8178 }
8179
8180 /*
8181 * Make sure the child is readonly
8182 * for the child -> parent merge direction
8183 * if necessary.
8184 */
8185 if ( nImageFrom > nImageTo
8186 && pImageChild
8187 && pImageChild != pDisk->pLast)
8188 {
8189 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
8190 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8191 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
8192 uOpenFlags);
8193 if (RT_FAILURE(rc))
8194 break;
8195 }
8196 } while (0);
8197
8198 if (RT_UNLIKELY(fLockWrite))
8199 {
8200 rc2 = vdThreadFinishWrite(pDisk);
8201 AssertRC(rc2);
8202 }
8203 else if (RT_UNLIKELY(fLockRead))
8204 {
8205 rc2 = vdThreadFinishRead(pDisk);
8206 AssertRC(rc2);
8207 }
8208
8209 if (pvBuf)
8210 RTMemTmpFree(pvBuf);
8211
8212 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
8213 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8214
8215 LogFlowFunc(("returns %Rrc\n", rc));
8216 return rc;
8217}
8218
8219/**
8220 * Copies an image from one HDD container to another - extended version.
8221 * The copy is opened in the target HDD container.
8222 * It is possible to convert between different image formats, because the
8223 * backend for the destination may be different from the source.
8224 * If both the source and destination reference the same HDD container,
8225 * then the image is moved (by copying/deleting or renaming) to the new location.
8226 * The source container is unchanged if the move operation fails, otherwise
8227 * the image at the new location is opened in the same way as the old one was.
8228 *
8229 * @note The read/write accesses across disks are not synchronized, just the
8230 * accesses to each disk. Once there is a use case which requires a defined
8231 * read/write behavior in this situation this needs to be extended.
8232 *
8233 * @returns VBox status code.
8234 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8235 * @param pDiskFrom Pointer to source HDD container.
8236 * @param nImage Image number, counts from 0. 0 is always base image of container.
8237 * @param pDiskTo Pointer to destination HDD container.
8238 * @param pszBackend Name of the image file backend to use (may be NULL to use the same as the source, case insensitive).
8239 * @param pszFilename New name of the image (may be NULL to specify that the
8240 * copy destination is the destination container, or
8241 * if pDiskFrom == pDiskTo, i.e. when moving).
8242 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8243 * @param cbSize New image size (0 means leave unchanged).
8244 * @param nImageFromSame todo
8245 * @param nImageToSame todo
8246 * @param uImageFlags Flags specifying special destination image features.
8247 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8248 * This parameter is used if and only if a true copy is created.
8249 * In all rename/move cases or copy to existing image cases the modification UUIDs are copied over.
8250 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8251 * Only used if the destination image is created.
8252 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8253 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8254 * destination image.
8255 * @param pDstVDIfsOperation Pointer to the per-operation VD interface list,
8256 * for the destination operation.
8257 */
8258VBOXDDU_DECL(int) VDCopyEx(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8259 const char *pszBackend, const char *pszFilename,
8260 bool fMoveByRename, uint64_t cbSize,
8261 unsigned nImageFromSame, unsigned nImageToSame,
8262 unsigned uImageFlags, PCRTUUID pDstUuid,
8263 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8264 PVDINTERFACE pDstVDIfsImage,
8265 PVDINTERFACE pDstVDIfsOperation)
8266{
8267 int rc = VINF_SUCCESS;
8268 int rc2;
8269 bool fLockReadFrom = false, fLockWriteFrom = false, fLockWriteTo = false;
8270 PVDIMAGE pImageTo = NULL;
8271
8272 LogFlowFunc(("pDiskFrom=%#p nImage=%u pDiskTo=%#p pszBackend=\"%s\" pszFilename=\"%s\" fMoveByRename=%d cbSize=%llu nImageFromSame=%u nImageToSame=%u uImageFlags=%#x pDstUuid=%#p uOpenFlags=%#x pVDIfsOperation=%#p pDstVDIfsImage=%#p pDstVDIfsOperation=%#p\n",
8273 pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename, cbSize, nImageFromSame, nImageToSame, uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation, pDstVDIfsImage, pDstVDIfsOperation));
8274
8275 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8276 PVDINTERFACEPROGRESS pDstIfProgress = VDIfProgressGet(pDstVDIfsOperation);
8277
8278 do {
8279 /* Check arguments. */
8280 AssertMsgBreakStmt(VALID_PTR(pDiskFrom), ("pDiskFrom=%#p\n", pDiskFrom),
8281 rc = VERR_INVALID_PARAMETER);
8282 AssertMsg(pDiskFrom->u32Signature == VBOXHDDDISK_SIGNATURE,
8283 ("u32Signature=%08x\n", pDiskFrom->u32Signature));
8284
8285 rc2 = vdThreadStartRead(pDiskFrom);
8286 AssertRC(rc2);
8287 fLockReadFrom = true;
8288 PVDIMAGE pImageFrom = vdGetImageByNumber(pDiskFrom, nImage);
8289 AssertPtrBreakStmt(pImageFrom, rc = VERR_VD_IMAGE_NOT_FOUND);
8290 AssertMsgBreakStmt(VALID_PTR(pDiskTo), ("pDiskTo=%#p\n", pDiskTo),
8291 rc = VERR_INVALID_PARAMETER);
8292 AssertMsg(pDiskTo->u32Signature == VBOXHDDDISK_SIGNATURE,
8293 ("u32Signature=%08x\n", pDiskTo->u32Signature));
8294 AssertMsgBreakStmt( (nImageFromSame < nImage || nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8295 && (nImageToSame < pDiskTo->cImages || nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8296 && ( (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN && nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8297 || (nImageFromSame != VD_IMAGE_CONTENT_UNKNOWN && nImageToSame != VD_IMAGE_CONTENT_UNKNOWN)),
8298 ("nImageFromSame=%u nImageToSame=%u\n", nImageFromSame, nImageToSame),
8299 rc = VERR_INVALID_PARAMETER);
8300
8301 /* Move the image. */
8302 if (pDiskFrom == pDiskTo)
8303 {
8304 /* Rename only works when backends are the same, are file based
8305 * and the rename method is implemented. */
8306 if ( fMoveByRename
8307 && !RTStrICmp(pszBackend, pImageFrom->Backend->pszBackendName)
8308 && pImageFrom->Backend->uBackendCaps & VD_CAP_FILE
8309 && pImageFrom->Backend->pfnRename)
8310 {
8311 rc2 = vdThreadFinishRead(pDiskFrom);
8312 AssertRC(rc2);
8313 fLockReadFrom = false;
8314
8315 rc2 = vdThreadStartWrite(pDiskFrom);
8316 AssertRC(rc2);
8317 fLockWriteFrom = true;
8318 rc = pImageFrom->Backend->pfnRename(pImageFrom->pBackendData, pszFilename ? pszFilename : pImageFrom->pszFilename);
8319 break;
8320 }
8321
8322 /** @todo Moving (including shrinking/growing) of the image is
8323 * requested, but the rename attempt failed or it wasn't possible.
8324 * Must now copy image to temp location. */
8325 AssertReleaseMsgFailed(("VDCopy: moving by copy/delete not implemented\n"));
8326 }
8327
8328 /* pszFilename is allowed to be NULL, as this indicates copy to the existing image. */
8329 AssertMsgBreakStmt(pszFilename == NULL || (VALID_PTR(pszFilename) && *pszFilename),
8330 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
8331 rc = VERR_INVALID_PARAMETER);
8332
8333 uint64_t cbSizeFrom;
8334 cbSizeFrom = pImageFrom->Backend->pfnGetSize(pImageFrom->pBackendData);
8335 if (cbSizeFrom == 0)
8336 {
8337 rc = VERR_VD_VALUE_NOT_FOUND;
8338 break;
8339 }
8340
8341 VDGEOMETRY PCHSGeometryFrom = {0, 0, 0};
8342 VDGEOMETRY LCHSGeometryFrom = {0, 0, 0};
8343 pImageFrom->Backend->pfnGetPCHSGeometry(pImageFrom->pBackendData, &PCHSGeometryFrom);
8344 pImageFrom->Backend->pfnGetLCHSGeometry(pImageFrom->pBackendData, &LCHSGeometryFrom);
8345
8346 RTUUID ImageUuid, ImageModificationUuid;
8347 if (pDiskFrom != pDiskTo)
8348 {
8349 if (pDstUuid)
8350 ImageUuid = *pDstUuid;
8351 else
8352 RTUuidCreate(&ImageUuid);
8353 }
8354 else
8355 {
8356 rc = pImageFrom->Backend->pfnGetUuid(pImageFrom->pBackendData, &ImageUuid);
8357 if (RT_FAILURE(rc))
8358 RTUuidCreate(&ImageUuid);
8359 }
8360 rc = pImageFrom->Backend->pfnGetModificationUuid(pImageFrom->pBackendData, &ImageModificationUuid);
8361 if (RT_FAILURE(rc))
8362 RTUuidClear(&ImageModificationUuid);
8363
8364 char szComment[1024];
8365 rc = pImageFrom->Backend->pfnGetComment(pImageFrom->pBackendData, szComment, sizeof(szComment));
8366 if (RT_FAILURE(rc))
8367 szComment[0] = '\0';
8368 else
8369 szComment[sizeof(szComment) - 1] = '\0';
8370
8371 rc2 = vdThreadFinishRead(pDiskFrom);
8372 AssertRC(rc2);
8373 fLockReadFrom = false;
8374
8375 rc2 = vdThreadStartRead(pDiskTo);
8376 AssertRC(rc2);
8377 unsigned cImagesTo = pDiskTo->cImages;
8378 rc2 = vdThreadFinishRead(pDiskTo);
8379 AssertRC(rc2);
8380
8381 if (pszFilename)
8382 {
8383 if (cbSize == 0)
8384 cbSize = cbSizeFrom;
8385
8386 /* Create destination image with the properties of source image. */
8387 /** @todo replace the VDCreateDiff/VDCreateBase calls by direct
8388 * calls to the backend. Unifies the code and reduces the API
8389 * dependencies. Would also make the synchronization explicit. */
8390 if (cImagesTo > 0)
8391 {
8392 rc = VDCreateDiff(pDiskTo, pszBackend, pszFilename,
8393 uImageFlags, szComment, &ImageUuid,
8394 NULL /* pParentUuid */,
8395 uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8396 pDstVDIfsImage, NULL);
8397
8398 rc2 = vdThreadStartWrite(pDiskTo);
8399 AssertRC(rc2);
8400 fLockWriteTo = true;
8401 } else {
8402 /** @todo hack to force creation of a fixed image for
8403 * the RAW backend, which can't handle anything else. */
8404 if (!RTStrICmp(pszBackend, "RAW"))
8405 uImageFlags |= VD_IMAGE_FLAGS_FIXED;
8406
8407 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8408 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8409
8410 rc = VDCreateBase(pDiskTo, pszBackend, pszFilename, cbSize,
8411 uImageFlags, szComment,
8412 &PCHSGeometryFrom, &LCHSGeometryFrom,
8413 NULL, uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8414 pDstVDIfsImage, NULL);
8415
8416 rc2 = vdThreadStartWrite(pDiskTo);
8417 AssertRC(rc2);
8418 fLockWriteTo = true;
8419
8420 if (RT_SUCCESS(rc) && !RTUuidIsNull(&ImageUuid))
8421 pDiskTo->pLast->Backend->pfnSetUuid(pDiskTo->pLast->pBackendData, &ImageUuid);
8422 }
8423 if (RT_FAILURE(rc))
8424 break;
8425
8426 pImageTo = pDiskTo->pLast;
8427 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8428
8429 cbSize = RT_MIN(cbSize, cbSizeFrom);
8430 }
8431 else
8432 {
8433 pImageTo = pDiskTo->pLast;
8434 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8435
8436 uint64_t cbSizeTo;
8437 cbSizeTo = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
8438 if (cbSizeTo == 0)
8439 {
8440 rc = VERR_VD_VALUE_NOT_FOUND;
8441 break;
8442 }
8443
8444 if (cbSize == 0)
8445 cbSize = RT_MIN(cbSizeFrom, cbSizeTo);
8446
8447 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8448 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8449
8450 /* Update the geometry in the destination image. */
8451 pImageTo->Backend->pfnSetPCHSGeometry(pImageTo->pBackendData, &PCHSGeometryFrom);
8452 pImageTo->Backend->pfnSetLCHSGeometry(pImageTo->pBackendData, &LCHSGeometryFrom);
8453 }
8454
8455 rc2 = vdThreadFinishWrite(pDiskTo);
8456 AssertRC(rc2);
8457 fLockWriteTo = false;
8458
8459 /* Whether we can take the optimized copy path (false) or not.
8460 * Don't optimize if the image existed or if it is a child image. */
8461 bool fSuppressRedundantIo = ( !(pszFilename == NULL || cImagesTo > 0)
8462 || (nImageToSame != VD_IMAGE_CONTENT_UNKNOWN));
8463 unsigned cImagesFromReadBack, cImagesToReadBack;
8464
8465 if (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8466 cImagesFromReadBack = 0;
8467 else
8468 {
8469 if (nImage == VD_LAST_IMAGE)
8470 cImagesFromReadBack = pDiskFrom->cImages - nImageFromSame - 1;
8471 else
8472 cImagesFromReadBack = nImage - nImageFromSame;
8473 }
8474
8475 if (nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8476 cImagesToReadBack = 0;
8477 else
8478 cImagesToReadBack = pDiskTo->cImages - nImageToSame - 1;
8479
8480 /* Copy the data. */
8481 rc = vdCopyHelper(pDiskFrom, pImageFrom, pDiskTo, cbSize,
8482 cImagesFromReadBack, cImagesToReadBack,
8483 fSuppressRedundantIo, pIfProgress, pDstIfProgress);
8484
8485 if (RT_SUCCESS(rc))
8486 {
8487 rc2 = vdThreadStartWrite(pDiskTo);
8488 AssertRC(rc2);
8489 fLockWriteTo = true;
8490
8491 /* Only set modification UUID if it is non-null, since the source
8492 * backend might not provide a valid modification UUID. */
8493 if (!RTUuidIsNull(&ImageModificationUuid))
8494 pImageTo->Backend->pfnSetModificationUuid(pImageTo->pBackendData, &ImageModificationUuid);
8495
8496 /* Set the requested open flags if they differ from the value
8497 * required for creating the image and copying the contents. */
8498 if ( pImageTo && pszFilename
8499 && uOpenFlags != (uOpenFlags & ~VD_OPEN_FLAGS_READONLY))
8500 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8501 uOpenFlags);
8502 }
8503 } while (0);
8504
8505 if (RT_FAILURE(rc) && pImageTo && pszFilename)
8506 {
8507 /* Take the write lock only if it is not taken. Not worth making the
8508 * above code even more complicated. */
8509 if (RT_UNLIKELY(!fLockWriteTo))
8510 {
8511 rc2 = vdThreadStartWrite(pDiskTo);
8512 AssertRC(rc2);
8513 fLockWriteTo = true;
8514 }
8515 /* Error detected, but new image created. Remove image from list. */
8516 vdRemoveImageFromList(pDiskTo, pImageTo);
8517
8518 /* Close and delete image. */
8519 rc2 = pImageTo->Backend->pfnClose(pImageTo->pBackendData, true);
8520 AssertRC(rc2);
8521 pImageTo->pBackendData = NULL;
8522
8523 /* Free remaining resources. */
8524 if (pImageTo->pszFilename)
8525 RTStrFree(pImageTo->pszFilename);
8526
8527 RTMemFree(pImageTo);
8528 }
8529
8530 if (RT_UNLIKELY(fLockWriteTo))
8531 {
8532 rc2 = vdThreadFinishWrite(pDiskTo);
8533 AssertRC(rc2);
8534 }
8535 if (RT_UNLIKELY(fLockWriteFrom))
8536 {
8537 rc2 = vdThreadFinishWrite(pDiskFrom);
8538 AssertRC(rc2);
8539 }
8540 else if (RT_UNLIKELY(fLockReadFrom))
8541 {
8542 rc2 = vdThreadFinishRead(pDiskFrom);
8543 AssertRC(rc2);
8544 }
8545
8546 if (RT_SUCCESS(rc))
8547 {
8548 if (pIfProgress && pIfProgress->pfnProgress)
8549 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8550 if (pDstIfProgress && pDstIfProgress->pfnProgress)
8551 pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser, 100);
8552 }
8553
8554 LogFlowFunc(("returns %Rrc\n", rc));
8555 return rc;
8556}
8557
8558/**
8559 * Copies an image from one HDD container to another.
8560 * The copy is opened in the target HDD container.
8561 * It is possible to convert between different image formats, because the
8562 * backend for the destination may be different from the source.
8563 * If both the source and destination reference the same HDD container,
8564 * then the image is moved (by copying/deleting or renaming) to the new location.
8565 * The source container is unchanged if the move operation fails, otherwise
8566 * the image at the new location is opened in the same way as the old one was.
8567 *
8568 * @returns VBox status code.
8569 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8570 * @param pDiskFrom Pointer to source HDD container.
8571 * @param nImage Image number, counts from 0. 0 is always base image of container.
8572 * @param pDiskTo Pointer to destination HDD container.
8573 * @param pszBackend Name of the image file backend to use.
8574 * @param pszFilename New name of the image (may be NULL if pDiskFrom == pDiskTo).
8575 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8576 * @param cbSize New image size (0 means leave unchanged).
8577 * @param uImageFlags Flags specifying special destination image features.
8578 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8579 * This parameter is used if and only if a true copy is created.
8580 * In all rename/move cases the UUIDs are copied over.
8581 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8582 * Only used if the destination image is created.
8583 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8584 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8585 * destination image.
8586 * @param pDstVDIfsOperation Pointer to the per-image VD interface list,
8587 * for the destination image.
8588 */
8589VBOXDDU_DECL(int) VDCopy(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8590 const char *pszBackend, const char *pszFilename,
8591 bool fMoveByRename, uint64_t cbSize,
8592 unsigned uImageFlags, PCRTUUID pDstUuid,
8593 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8594 PVDINTERFACE pDstVDIfsImage,
8595 PVDINTERFACE pDstVDIfsOperation)
8596{
8597 return VDCopyEx(pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename,
8598 cbSize, VD_IMAGE_CONTENT_UNKNOWN, VD_IMAGE_CONTENT_UNKNOWN,
8599 uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation,
8600 pDstVDIfsImage, pDstVDIfsOperation);
8601}
8602
8603/**
8604 * Optimizes the storage consumption of an image. Typically the unused blocks
8605 * have to be wiped with zeroes to achieve a substantial reduced storage use.
8606 * Another optimization done is reordering the image blocks, which can provide
8607 * a significant performance boost, as reads and writes tend to use less random
8608 * file offsets.
8609 *
8610 * @return VBox status code.
8611 * @return VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8612 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8613 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8614 * the code for this isn't implemented yet.
8615 * @param pDisk Pointer to HDD container.
8616 * @param nImage Image number, counts from 0. 0 is always base image of container.
8617 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8618 */
8619VBOXDDU_DECL(int) VDCompact(PVBOXHDD pDisk, unsigned nImage,
8620 PVDINTERFACE pVDIfsOperation)
8621{
8622 int rc = VINF_SUCCESS;
8623 int rc2;
8624 bool fLockRead = false, fLockWrite = false;
8625 void *pvBuf = NULL;
8626 void *pvTmp = NULL;
8627
8628 LogFlowFunc(("pDisk=%#p nImage=%u pVDIfsOperation=%#p\n",
8629 pDisk, nImage, pVDIfsOperation));
8630
8631 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8632
8633 do {
8634 /* Check arguments. */
8635 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8636 rc = VERR_INVALID_PARAMETER);
8637 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8638 ("u32Signature=%08x\n", pDisk->u32Signature));
8639
8640 rc2 = vdThreadStartRead(pDisk);
8641 AssertRC(rc2);
8642 fLockRead = true;
8643
8644 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
8645 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
8646
8647 /* If there is no compact callback for not file based backends then
8648 * the backend doesn't need compaction. No need to make much fuss about
8649 * this. For file based ones signal this as not yet supported. */
8650 if (!pImage->Backend->pfnCompact)
8651 {
8652 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8653 rc = VERR_NOT_SUPPORTED;
8654 else
8655 rc = VINF_SUCCESS;
8656 break;
8657 }
8658
8659 /* Insert interface for reading parent state into per-operation list,
8660 * if there is a parent image. */
8661 VDINTERFACEPARENTSTATE VDIfParent;
8662 VDPARENTSTATEDESC ParentUser;
8663 if (pImage->pPrev)
8664 {
8665 VDIfParent.pfnParentRead = vdParentRead;
8666 ParentUser.pDisk = pDisk;
8667 ParentUser.pImage = pImage->pPrev;
8668 rc = VDInterfaceAdd(&VDIfParent.Core, "VDCompact_ParentState", VDINTERFACETYPE_PARENTSTATE,
8669 &ParentUser, sizeof(VDINTERFACEPARENTSTATE), &pVDIfsOperation);
8670 AssertRC(rc);
8671 }
8672
8673 rc2 = vdThreadFinishRead(pDisk);
8674 AssertRC(rc2);
8675 fLockRead = false;
8676
8677 rc2 = vdThreadStartWrite(pDisk);
8678 AssertRC(rc2);
8679 fLockWrite = true;
8680
8681 rc = pImage->Backend->pfnCompact(pImage->pBackendData,
8682 0, 99,
8683 pDisk->pVDIfsDisk,
8684 pImage->pVDIfsImage,
8685 pVDIfsOperation);
8686 } while (0);
8687
8688 if (RT_UNLIKELY(fLockWrite))
8689 {
8690 rc2 = vdThreadFinishWrite(pDisk);
8691 AssertRC(rc2);
8692 }
8693 else if (RT_UNLIKELY(fLockRead))
8694 {
8695 rc2 = vdThreadFinishRead(pDisk);
8696 AssertRC(rc2);
8697 }
8698
8699 if (pvBuf)
8700 RTMemTmpFree(pvBuf);
8701 if (pvTmp)
8702 RTMemTmpFree(pvTmp);
8703
8704 if (RT_SUCCESS(rc))
8705 {
8706 if (pIfProgress && pIfProgress->pfnProgress)
8707 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8708 }
8709
8710 LogFlowFunc(("returns %Rrc\n", rc));
8711 return rc;
8712}
8713
8714/**
8715 * Resizes the given disk image to the given size.
8716 *
8717 * @return VBox status
8718 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8719 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8720 *
8721 * @param pDisk Pointer to the HDD container.
8722 * @param cbSize New size of the image.
8723 * @param pPCHSGeometry Pointer to the new physical disk geometry <= (16383,16,63). Not NULL.
8724 * @param pLCHSGeometry Pointer to the new logical disk geometry <= (x,255,63). Not NULL.
8725 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8726 */
8727VBOXDDU_DECL(int) VDResize(PVBOXHDD pDisk, uint64_t cbSize,
8728 PCVDGEOMETRY pPCHSGeometry,
8729 PCVDGEOMETRY pLCHSGeometry,
8730 PVDINTERFACE pVDIfsOperation)
8731{
8732 /** @todo r=klaus resizing was designed to be part of VDCopy, so having a separate function is not desirable. */
8733 int rc = VINF_SUCCESS;
8734 int rc2;
8735 bool fLockRead = false, fLockWrite = false;
8736
8737 LogFlowFunc(("pDisk=%#p cbSize=%llu pVDIfsOperation=%#p\n",
8738 pDisk, cbSize, pVDIfsOperation));
8739
8740 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8741
8742 do {
8743 /* Check arguments. */
8744 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8745 rc = VERR_INVALID_PARAMETER);
8746 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8747 ("u32Signature=%08x\n", pDisk->u32Signature));
8748
8749 rc2 = vdThreadStartRead(pDisk);
8750 AssertRC(rc2);
8751 fLockRead = true;
8752
8753 /* Must have at least one image in the chain, will resize last. */
8754 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8755 rc = VERR_NOT_SUPPORTED);
8756
8757 PVDIMAGE pImage = pDisk->pLast;
8758
8759 /* If there is no compact callback for not file based backends then
8760 * the backend doesn't need compaction. No need to make much fuss about
8761 * this. For file based ones signal this as not yet supported. */
8762 if (!pImage->Backend->pfnResize)
8763 {
8764 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8765 rc = VERR_NOT_SUPPORTED;
8766 else
8767 rc = VINF_SUCCESS;
8768 break;
8769 }
8770
8771 rc2 = vdThreadFinishRead(pDisk);
8772 AssertRC(rc2);
8773 fLockRead = false;
8774
8775 rc2 = vdThreadStartWrite(pDisk);
8776 AssertRC(rc2);
8777 fLockWrite = true;
8778
8779 VDGEOMETRY PCHSGeometryOld;
8780 VDGEOMETRY LCHSGeometryOld;
8781 PCVDGEOMETRY pPCHSGeometryNew;
8782 PCVDGEOMETRY pLCHSGeometryNew;
8783
8784 if (pPCHSGeometry->cCylinders == 0)
8785 {
8786 /* Auto-detect marker, calculate new value ourself. */
8787 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData, &PCHSGeometryOld);
8788 if (RT_SUCCESS(rc) && (PCHSGeometryOld.cCylinders != 0))
8789 PCHSGeometryOld.cCylinders = RT_MIN(cbSize / 512 / PCHSGeometryOld.cHeads / PCHSGeometryOld.cSectors, 16383);
8790 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8791 rc = VINF_SUCCESS;
8792
8793 pPCHSGeometryNew = &PCHSGeometryOld;
8794 }
8795 else
8796 pPCHSGeometryNew = pPCHSGeometry;
8797
8798 if (pLCHSGeometry->cCylinders == 0)
8799 {
8800 /* Auto-detect marker, calculate new value ourself. */
8801 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData, &LCHSGeometryOld);
8802 if (RT_SUCCESS(rc) && (LCHSGeometryOld.cCylinders != 0))
8803 LCHSGeometryOld.cCylinders = cbSize / 512 / LCHSGeometryOld.cHeads / LCHSGeometryOld.cSectors;
8804 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8805 rc = VINF_SUCCESS;
8806
8807 pLCHSGeometryNew = &LCHSGeometryOld;
8808 }
8809 else
8810 pLCHSGeometryNew = pLCHSGeometry;
8811
8812 if (RT_SUCCESS(rc))
8813 rc = pImage->Backend->pfnResize(pImage->pBackendData,
8814 cbSize,
8815 pPCHSGeometryNew,
8816 pLCHSGeometryNew,
8817 0, 99,
8818 pDisk->pVDIfsDisk,
8819 pImage->pVDIfsImage,
8820 pVDIfsOperation);
8821 } while (0);
8822
8823 if (RT_UNLIKELY(fLockWrite))
8824 {
8825 rc2 = vdThreadFinishWrite(pDisk);
8826 AssertRC(rc2);
8827 }
8828 else if (RT_UNLIKELY(fLockRead))
8829 {
8830 rc2 = vdThreadFinishRead(pDisk);
8831 AssertRC(rc2);
8832 }
8833
8834 if (RT_SUCCESS(rc))
8835 {
8836 if (pIfProgress && pIfProgress->pfnProgress)
8837 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8838
8839 pDisk->cbSize = cbSize;
8840 }
8841
8842 LogFlowFunc(("returns %Rrc\n", rc));
8843 return rc;
8844}
8845
8846VBOXDDU_DECL(int) VDPrepareWithFilters(PVBOXHDD pDisk, PVDINTERFACE pVDIfsOperation)
8847{
8848 int rc = VINF_SUCCESS;
8849 int rc2;
8850 bool fLockRead = false, fLockWrite = false;
8851
8852 LogFlowFunc(("pDisk=%#p pVDIfsOperation=%#p\n", pDisk, pVDIfsOperation));
8853
8854 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8855
8856 do {
8857 /* Check arguments. */
8858 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8859 rc = VERR_INVALID_PARAMETER);
8860 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8861 ("u32Signature=%08x\n", pDisk->u32Signature));
8862
8863 rc2 = vdThreadStartRead(pDisk);
8864 AssertRC(rc2);
8865 fLockRead = true;
8866
8867 /* Must have at least one image in the chain. */
8868 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8869 rc = VERR_VD_NOT_OPENED);
8870
8871 unsigned uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
8872 AssertMsgBreakStmt(!(uOpenFlags & VD_OPEN_FLAGS_READONLY),
8873 ("Last image should be read write"),
8874 rc = VERR_VD_IMAGE_READ_ONLY);
8875
8876 rc2 = vdThreadFinishRead(pDisk);
8877 AssertRC(rc2);
8878 fLockRead = false;
8879
8880 rc2 = vdThreadStartWrite(pDisk);
8881 AssertRC(rc2);
8882 fLockWrite = true;
8883
8884 /*
8885 * Open all images in the chain in read write mode first to avoid running
8886 * into an error in the middle of the process.
8887 */
8888 PVDIMAGE pImage = pDisk->pBase;
8889
8890 while (pImage)
8891 {
8892 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8893 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
8894 {
8895 /*
8896 * Clear skip consistency checks because the image is made writable now and
8897 * skipping consistency checks is only possible for readonly images.
8898 */
8899 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
8900 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8901 if (RT_FAILURE(rc))
8902 break;
8903 }
8904 pImage = pImage->pNext;
8905 }
8906
8907 if (RT_SUCCESS(rc))
8908 {
8909 unsigned cImgCur = 0;
8910 unsigned uPercentStart = 0;
8911 unsigned uPercentSpan = 100 / pDisk->cImages - 1;
8912
8913 /* Allocate tmp buffer. */
8914 void *pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
8915 if (!pvBuf)
8916 {
8917 rc = VERR_NO_MEMORY;
8918 break;
8919 }
8920
8921 pImage = pDisk->pBase;
8922 pDisk->fLocked = true;
8923
8924 while ( pImage
8925 && RT_SUCCESS(rc))
8926 {
8927 /* Get size of image. */
8928 uint64_t cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
8929 uint64_t cbSizeFile = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
8930 uint64_t cbFileWritten = 0;
8931 uint64_t uOffset = 0;
8932 uint64_t cbRemaining = cbSize;
8933
8934 do
8935 {
8936 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8937 RTSGSEG SegmentBuf;
8938 RTSGBUF SgBuf;
8939 VDIOCTX IoCtx;
8940
8941 SegmentBuf.pvSeg = pvBuf;
8942 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8943 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8944 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8945 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8946
8947 rc = pImage->Backend->pfnRead(pImage->pBackendData, uOffset,
8948 cbThisRead, &IoCtx, &cbThisRead);
8949 if (rc != VERR_VD_BLOCK_FREE)
8950 {
8951 if (RT_FAILURE(rc))
8952 break;
8953
8954 /* Apply filter chains. */
8955 rc = vdFilterChainApplyRead(pDisk, uOffset, cbThisRead, &IoCtx);
8956 if (RT_FAILURE(rc))
8957 break;
8958
8959 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbThisRead, &IoCtx);
8960 if (RT_FAILURE(rc))
8961 break;
8962
8963 RTSgBufReset(&SgBuf);
8964 size_t cbThisWrite = 0;
8965 size_t cbPreRead = 0;
8966 size_t cbPostRead = 0;
8967 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset,
8968 cbThisRead, &IoCtx, &cbThisWrite,
8969 &cbPreRead, &cbPostRead, 0);
8970 if (RT_FAILURE(rc))
8971 break;
8972 Assert(cbThisWrite == cbThisRead);
8973 cbFileWritten += cbThisWrite;
8974 }
8975 else
8976 rc = VINF_SUCCESS;
8977
8978 uOffset += cbThisRead;
8979 cbRemaining -= cbThisRead;
8980
8981 if (pIfProgress && pIfProgress->pfnProgress)
8982 {
8983 rc2 = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8984 uPercentStart + cbFileWritten * uPercentSpan / cbSizeFile);
8985 AssertRC(rc2); /* Cancelling this operation without leaving an inconsistent state is not possible. */
8986 }
8987 } while (uOffset < cbSize);
8988
8989 pImage = pImage->pNext;
8990 cImgCur++;
8991 uPercentStart += uPercentSpan;
8992 }
8993
8994 pDisk->fLocked = false;
8995 if (pvBuf)
8996 RTMemTmpFree(pvBuf);
8997 }
8998
8999 /* Change images except last one back to readonly. */
9000 pImage = pDisk->pBase;
9001 while ( pImage != pDisk->pLast
9002 && pImage)
9003 {
9004 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9005 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
9006 rc2 = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9007 if (RT_FAILURE(rc2))
9008 {
9009 if (RT_SUCCESS(rc))
9010 rc = rc2;
9011 break;
9012 }
9013 pImage = pImage->pNext;
9014 }
9015 } while (0);
9016
9017 if (RT_UNLIKELY(fLockWrite))
9018 {
9019 rc2 = vdThreadFinishWrite(pDisk);
9020 AssertRC(rc2);
9021 }
9022 else if (RT_UNLIKELY(fLockRead))
9023 {
9024 rc2 = vdThreadFinishRead(pDisk);
9025 AssertRC(rc2);
9026 }
9027
9028 if ( RT_SUCCESS(rc)
9029 && pIfProgress
9030 && pIfProgress->pfnProgress)
9031 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
9032
9033 LogFlowFunc(("returns %Rrc\n", rc));
9034 return rc;
9035}
9036
9037/**
9038 * Closes the last opened image file in HDD container.
9039 * If previous image file was opened in read-only mode (the normal case) and
9040 * the last opened image is in read-write mode then the previous image will be
9041 * reopened in read/write mode.
9042 *
9043 * @returns VBox status code.
9044 * @returns VERR_VD_NOT_OPENED if no image is opened in HDD container.
9045 * @param pDisk Pointer to HDD container.
9046 * @param fDelete If true, delete the image from the host disk.
9047 */
9048VBOXDDU_DECL(int) VDClose(PVBOXHDD pDisk, bool fDelete)
9049{
9050 int rc = VINF_SUCCESS;
9051 int rc2;
9052 bool fLockWrite = false;
9053
9054 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9055 do
9056 {
9057 /* sanity check */
9058 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9059 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9060
9061 /* Not worth splitting this up into a read lock phase and write
9062 * lock phase, as closing an image is a relatively fast operation
9063 * dominated by the part which needs the write lock. */
9064 rc2 = vdThreadStartWrite(pDisk);
9065 AssertRC(rc2);
9066 fLockWrite = true;
9067
9068 PVDIMAGE pImage = pDisk->pLast;
9069 if (!pImage)
9070 {
9071 rc = VERR_VD_NOT_OPENED;
9072 break;
9073 }
9074
9075 /* Destroy the current discard state first which might still have pending blocks. */
9076 rc = vdDiscardStateDestroy(pDisk);
9077 if (RT_FAILURE(rc))
9078 break;
9079
9080 unsigned uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9081 /* Remove image from list of opened images. */
9082 vdRemoveImageFromList(pDisk, pImage);
9083 /* Close (and optionally delete) image. */
9084 rc = pImage->Backend->pfnClose(pImage->pBackendData, fDelete);
9085 /* Free remaining resources related to the image. */
9086 RTStrFree(pImage->pszFilename);
9087 RTMemFree(pImage);
9088
9089 pImage = pDisk->pLast;
9090 if (!pImage)
9091 break;
9092
9093 /* If disk was previously in read/write mode, make sure it will stay
9094 * like this (if possible) after closing this image. Set the open flags
9095 * accordingly. */
9096 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
9097 {
9098 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9099 uOpenFlags &= ~ VD_OPEN_FLAGS_READONLY;
9100 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9101 }
9102
9103 /* Cache disk information. */
9104 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9105
9106 /* Cache PCHS geometry. */
9107 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9108 &pDisk->PCHSGeometry);
9109 if (RT_FAILURE(rc2))
9110 {
9111 pDisk->PCHSGeometry.cCylinders = 0;
9112 pDisk->PCHSGeometry.cHeads = 0;
9113 pDisk->PCHSGeometry.cSectors = 0;
9114 }
9115 else
9116 {
9117 /* Make sure the PCHS geometry is properly clipped. */
9118 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
9119 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
9120 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9121 }
9122
9123 /* Cache LCHS geometry. */
9124 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9125 &pDisk->LCHSGeometry);
9126 if (RT_FAILURE(rc2))
9127 {
9128 pDisk->LCHSGeometry.cCylinders = 0;
9129 pDisk->LCHSGeometry.cHeads = 0;
9130 pDisk->LCHSGeometry.cSectors = 0;
9131 }
9132 else
9133 {
9134 /* Make sure the LCHS geometry is properly clipped. */
9135 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
9136 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
9137 }
9138 } while (0);
9139
9140 if (RT_UNLIKELY(fLockWrite))
9141 {
9142 rc2 = vdThreadFinishWrite(pDisk);
9143 AssertRC(rc2);
9144 }
9145
9146 LogFlowFunc(("returns %Rrc\n", rc));
9147 return rc;
9148}
9149
9150/**
9151 * Closes the currently opened cache image file in HDD container.
9152 *
9153 * @return VBox status code.
9154 * @return VERR_VD_NOT_OPENED if no cache is opened in HDD container.
9155 * @param pDisk Pointer to HDD container.
9156 * @param fDelete If true, delete the image from the host disk.
9157 */
9158VBOXDDU_DECL(int) VDCacheClose(PVBOXHDD pDisk, bool fDelete)
9159{
9160 int rc = VINF_SUCCESS;
9161 int rc2;
9162 bool fLockWrite = false;
9163 PVDCACHE pCache = NULL;
9164
9165 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9166
9167 do
9168 {
9169 /* sanity check */
9170 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9171 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9172
9173 rc2 = vdThreadStartWrite(pDisk);
9174 AssertRC(rc2);
9175 fLockWrite = true;
9176
9177 AssertPtrBreakStmt(pDisk->pCache, rc = VERR_VD_CACHE_NOT_FOUND);
9178
9179 pCache = pDisk->pCache;
9180 pDisk->pCache = NULL;
9181
9182 pCache->Backend->pfnClose(pCache->pBackendData, fDelete);
9183 if (pCache->pszFilename)
9184 RTStrFree(pCache->pszFilename);
9185 RTMemFree(pCache);
9186 } while (0);
9187
9188 if (RT_LIKELY(fLockWrite))
9189 {
9190 rc2 = vdThreadFinishWrite(pDisk);
9191 AssertRC(rc2);
9192 }
9193
9194 LogFlowFunc(("returns %Rrc\n", rc));
9195 return rc;
9196}
9197
9198VBOXDDU_DECL(int) VDFilterRemove(PVBOXHDD pDisk, uint32_t fFlags)
9199{
9200 int rc = VINF_SUCCESS;
9201 int rc2;
9202 bool fLockWrite = false;
9203 PVDFILTER pFilter = NULL;
9204
9205 LogFlowFunc(("pDisk=%#p\n", pDisk));
9206
9207 do
9208 {
9209 /* sanity check */
9210 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9211 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9212
9213 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
9214 ("Invalid flags set (fFlags=%#x)\n", fFlags),
9215 rc = VERR_INVALID_PARAMETER);
9216
9217 rc2 = vdThreadStartWrite(pDisk);
9218 AssertRC(rc2);
9219 fLockWrite = true;
9220
9221 if (fFlags & VD_FILTER_FLAGS_WRITE)
9222 {
9223 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainWrite), rc = VERR_VD_NOT_OPENED);
9224 pFilter = RTListGetLast(&pDisk->ListFilterChainWrite, VDFILTER, ListNodeChainWrite);
9225 AssertPtr(pFilter);
9226 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9227 vdFilterRelease(pFilter);
9228 }
9229
9230 if (fFlags & VD_FILTER_FLAGS_READ)
9231 {
9232 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainRead), rc = VERR_VD_NOT_OPENED);
9233 pFilter = RTListGetLast(&pDisk->ListFilterChainRead, VDFILTER, ListNodeChainRead);
9234 AssertPtr(pFilter);
9235 RTListNodeRemove(&pFilter->ListNodeChainRead);
9236 vdFilterRelease(pFilter);
9237 }
9238 } while (0);
9239
9240 if (RT_LIKELY(fLockWrite))
9241 {
9242 rc2 = vdThreadFinishWrite(pDisk);
9243 AssertRC(rc2);
9244 }
9245
9246 LogFlowFunc(("returns %Rrc\n", rc));
9247 return rc;
9248}
9249
9250/**
9251 * Closes all opened image files in HDD container.
9252 *
9253 * @returns VBox status code.
9254 * @param pDisk Pointer to HDD container.
9255 */
9256VBOXDDU_DECL(int) VDCloseAll(PVBOXHDD pDisk)
9257{
9258 int rc = VINF_SUCCESS;
9259 int rc2;
9260 bool fLockWrite = false;
9261
9262 LogFlowFunc(("pDisk=%#p\n", pDisk));
9263 do
9264 {
9265 /* sanity check */
9266 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9267 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9268
9269 /* Lock the entire operation. */
9270 rc2 = vdThreadStartWrite(pDisk);
9271 AssertRC(rc2);
9272 fLockWrite = true;
9273
9274 PVDCACHE pCache = pDisk->pCache;
9275 if (pCache)
9276 {
9277 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
9278 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9279 rc = rc2;
9280
9281 if (pCache->pszFilename)
9282 RTStrFree(pCache->pszFilename);
9283 RTMemFree(pCache);
9284 }
9285
9286 PVDIMAGE pImage = pDisk->pLast;
9287 while (VALID_PTR(pImage))
9288 {
9289 PVDIMAGE pPrev = pImage->pPrev;
9290 /* Remove image from list of opened images. */
9291 vdRemoveImageFromList(pDisk, pImage);
9292 /* Close image. */
9293 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
9294 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9295 rc = rc2;
9296 /* Free remaining resources related to the image. */
9297 RTStrFree(pImage->pszFilename);
9298 RTMemFree(pImage);
9299 pImage = pPrev;
9300 }
9301 Assert(!VALID_PTR(pDisk->pLast));
9302 } while (0);
9303
9304 if (RT_UNLIKELY(fLockWrite))
9305 {
9306 rc2 = vdThreadFinishWrite(pDisk);
9307 AssertRC(rc2);
9308 }
9309
9310 LogFlowFunc(("returns %Rrc\n", rc));
9311 return rc;
9312}
9313
9314/**
9315 * Removes all filters of the given HDD container.
9316 *
9317 * @return VBox status code.
9318 * @param pDisk Pointer to HDD container.
9319 */
9320VBOXDDU_DECL(int) VDFilterRemoveAll(PVBOXHDD pDisk)
9321{
9322 int rc = VINF_SUCCESS;
9323 int rc2;
9324 bool fLockWrite = false;
9325
9326 LogFlowFunc(("pDisk=%#p\n", pDisk));
9327 do
9328 {
9329 /* sanity check */
9330 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9331 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9332
9333 /* Lock the entire operation. */
9334 rc2 = vdThreadStartWrite(pDisk);
9335 AssertRC(rc2);
9336 fLockWrite = true;
9337
9338 PVDFILTER pFilter, pFilterNext;
9339 RTListForEachSafe(&pDisk->ListFilterChainWrite, pFilter, pFilterNext, VDFILTER, ListNodeChainWrite)
9340 {
9341 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9342 vdFilterRelease(pFilter);
9343 }
9344
9345 RTListForEachSafe(&pDisk->ListFilterChainRead, pFilter, pFilterNext, VDFILTER, ListNodeChainRead)
9346 {
9347 RTListNodeRemove(&pFilter->ListNodeChainRead);
9348 vdFilterRelease(pFilter);
9349 }
9350 Assert(RTListIsEmpty(&pDisk->ListFilterChainRead));
9351 Assert(RTListIsEmpty(&pDisk->ListFilterChainWrite));
9352 } while (0);
9353
9354 if (RT_UNLIKELY(fLockWrite))
9355 {
9356 rc2 = vdThreadFinishWrite(pDisk);
9357 AssertRC(rc2);
9358 }
9359
9360 LogFlowFunc(("returns %Rrc\n", rc));
9361 return rc;
9362}
9363
9364/**
9365 * Read data from virtual HDD.
9366 *
9367 * @returns VBox status code.
9368 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9369 * @param pDisk Pointer to HDD container.
9370 * @param uOffset Offset of first reading byte from start of disk.
9371 * @param pvBuf Pointer to buffer for reading data.
9372 * @param cbRead Number of bytes to read.
9373 */
9374VBOXDDU_DECL(int) VDRead(PVBOXHDD pDisk, uint64_t uOffset, void *pvBuf,
9375 size_t cbRead)
9376{
9377 int rc = VINF_SUCCESS;
9378 int rc2;
9379 bool fLockRead = false;
9380
9381 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbRead=%zu\n",
9382 pDisk, uOffset, pvBuf, cbRead));
9383 do
9384 {
9385 /* sanity check */
9386 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9387 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9388
9389 /* Check arguments. */
9390 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9391 ("pvBuf=%#p\n", pvBuf),
9392 rc = VERR_INVALID_PARAMETER);
9393 AssertMsgBreakStmt(cbRead,
9394 ("cbRead=%zu\n", cbRead),
9395 rc = VERR_INVALID_PARAMETER);
9396
9397 rc2 = vdThreadStartRead(pDisk);
9398 AssertRC(rc2);
9399 fLockRead = true;
9400
9401 PVDIMAGE pImage = pDisk->pLast;
9402 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9403
9404 if (uOffset + cbRead > pDisk->cbSize)
9405 {
9406 /* Floppy images might be smaller than the standard expected by
9407 the floppy controller code. So, we won't fail here. */
9408 AssertMsgBreakStmt(pDisk->enmType == VDTYPE_FLOPPY,
9409 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
9410 uOffset, cbRead, pDisk->cbSize),
9411 rc = VERR_EOF);
9412 memset(pvBuf, 0xf6, cbRead); /* f6h = format.com filler byte */
9413 if (uOffset >= pDisk->cbSize)
9414 break;
9415 cbRead = pDisk->cbSize - uOffset;
9416 }
9417
9418 rc = vdReadHelper(pDisk, pImage, uOffset, pvBuf, cbRead,
9419 true /* fUpdateCache */);
9420 } while (0);
9421
9422 if (RT_UNLIKELY(fLockRead))
9423 {
9424 rc2 = vdThreadFinishRead(pDisk);
9425 AssertRC(rc2);
9426 }
9427
9428 LogFlowFunc(("returns %Rrc\n", rc));
9429 return rc;
9430}
9431
9432/**
9433 * Write data to virtual HDD.
9434 *
9435 * @returns VBox status code.
9436 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9437 * @param pDisk Pointer to HDD container.
9438 * @param uOffset Offset of the first byte being
9439 * written from start of disk.
9440 * @param pvBuf Pointer to buffer for writing data.
9441 * @param cbWrite Number of bytes to write.
9442 */
9443VBOXDDU_DECL(int) VDWrite(PVBOXHDD pDisk, uint64_t uOffset, const void *pvBuf,
9444 size_t cbWrite)
9445{
9446 int rc = VINF_SUCCESS;
9447 int rc2;
9448 bool fLockWrite = false;
9449
9450 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbWrite=%zu\n",
9451 pDisk, uOffset, pvBuf, cbWrite));
9452 do
9453 {
9454 /* sanity check */
9455 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9456 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9457
9458 /* Check arguments. */
9459 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9460 ("pvBuf=%#p\n", pvBuf),
9461 rc = VERR_INVALID_PARAMETER);
9462 AssertMsgBreakStmt(cbWrite,
9463 ("cbWrite=%zu\n", cbWrite),
9464 rc = VERR_INVALID_PARAMETER);
9465
9466 rc2 = vdThreadStartWrite(pDisk);
9467 AssertRC(rc2);
9468 fLockWrite = true;
9469
9470 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
9471 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
9472 uOffset, cbWrite, pDisk->cbSize),
9473 rc = VERR_INVALID_PARAMETER);
9474
9475 PVDIMAGE pImage = pDisk->pLast;
9476 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9477
9478 vdSetModifiedFlag(pDisk);
9479 rc = vdWriteHelper(pDisk, pImage, uOffset, pvBuf, cbWrite,
9480 VDIOCTX_FLAGS_READ_UPDATE_CACHE);
9481 if (RT_FAILURE(rc))
9482 break;
9483
9484 /* If there is a merge (in the direction towards a parent) running
9485 * concurrently then we have to also "relay" the write to this parent,
9486 * as the merge position might be already past the position where
9487 * this write is going. The "context" of the write can come from the
9488 * natural chain, since merging either already did or will take care
9489 * of the "other" content which is might be needed to fill the block
9490 * to a full allocation size. The cache doesn't need to be touched
9491 * as this write is covered by the previous one. */
9492 if (RT_UNLIKELY(pDisk->pImageRelay))
9493 rc = vdWriteHelper(pDisk, pDisk->pImageRelay, uOffset,
9494 pvBuf, cbWrite, VDIOCTX_FLAGS_DEFAULT);
9495 } while (0);
9496
9497 if (RT_UNLIKELY(fLockWrite))
9498 {
9499 rc2 = vdThreadFinishWrite(pDisk);
9500 AssertRC(rc2);
9501 }
9502
9503 LogFlowFunc(("returns %Rrc\n", rc));
9504 return rc;
9505}
9506
9507/**
9508 * Make sure the on disk representation of a virtual HDD is up to date.
9509 *
9510 * @returns VBox status code.
9511 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9512 * @param pDisk Pointer to HDD container.
9513 */
9514VBOXDDU_DECL(int) VDFlush(PVBOXHDD pDisk)
9515{
9516 int rc = VINF_SUCCESS;
9517 int rc2;
9518 bool fLockWrite = false;
9519
9520 LogFlowFunc(("pDisk=%#p\n", pDisk));
9521 do
9522 {
9523 /* sanity check */
9524 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9525 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9526
9527 rc2 = vdThreadStartWrite(pDisk);
9528 AssertRC(rc2);
9529 fLockWrite = true;
9530
9531 PVDIMAGE pImage = pDisk->pLast;
9532 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9533
9534 VDIOCTX IoCtx;
9535 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
9536
9537 rc = RTSemEventCreate(&hEventComplete);
9538 if (RT_FAILURE(rc))
9539 break;
9540
9541 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, pImage, NULL,
9542 NULL, vdFlushHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
9543
9544 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
9545 IoCtx.Type.Root.pvUser1 = pDisk;
9546 IoCtx.Type.Root.pvUser2 = hEventComplete;
9547 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
9548
9549 RTSemEventDestroy(hEventComplete);
9550 } while (0);
9551
9552 if (RT_UNLIKELY(fLockWrite))
9553 {
9554 rc2 = vdThreadFinishWrite(pDisk);
9555 AssertRC(rc2);
9556 }
9557
9558 LogFlowFunc(("returns %Rrc\n", rc));
9559 return rc;
9560}
9561
9562/**
9563 * Get number of opened images in HDD container.
9564 *
9565 * @returns Number of opened images for HDD container. 0 if no images have been opened.
9566 * @param pDisk Pointer to HDD container.
9567 */
9568VBOXDDU_DECL(unsigned) VDGetCount(PVBOXHDD pDisk)
9569{
9570 unsigned cImages;
9571 int rc2;
9572 bool fLockRead = false;
9573
9574 LogFlowFunc(("pDisk=%#p\n", pDisk));
9575 do
9576 {
9577 /* sanity check */
9578 AssertPtrBreakStmt(pDisk, cImages = 0);
9579 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9580
9581 rc2 = vdThreadStartRead(pDisk);
9582 AssertRC(rc2);
9583 fLockRead = true;
9584
9585 cImages = pDisk->cImages;
9586 } while (0);
9587
9588 if (RT_UNLIKELY(fLockRead))
9589 {
9590 rc2 = vdThreadFinishRead(pDisk);
9591 AssertRC(rc2);
9592 }
9593
9594 LogFlowFunc(("returns %u\n", cImages));
9595 return cImages;
9596}
9597
9598/**
9599 * Get read/write mode of HDD container.
9600 *
9601 * @returns Virtual disk ReadOnly status.
9602 * @returns true if no image is opened in HDD container.
9603 * @param pDisk Pointer to HDD container.
9604 */
9605VBOXDDU_DECL(bool) VDIsReadOnly(PVBOXHDD pDisk)
9606{
9607 bool fReadOnly;
9608 int rc2;
9609 bool fLockRead = false;
9610
9611 LogFlowFunc(("pDisk=%#p\n", pDisk));
9612 do
9613 {
9614 /* sanity check */
9615 AssertPtrBreakStmt(pDisk, fReadOnly = false);
9616 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9617
9618 rc2 = vdThreadStartRead(pDisk);
9619 AssertRC(rc2);
9620 fLockRead = true;
9621
9622 PVDIMAGE pImage = pDisk->pLast;
9623 AssertPtrBreakStmt(pImage, fReadOnly = true);
9624
9625 unsigned uOpenFlags;
9626 uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
9627 fReadOnly = !!(uOpenFlags & VD_OPEN_FLAGS_READONLY);
9628 } while (0);
9629
9630 if (RT_UNLIKELY(fLockRead))
9631 {
9632 rc2 = vdThreadFinishRead(pDisk);
9633 AssertRC(rc2);
9634 }
9635
9636 LogFlowFunc(("returns %d\n", fReadOnly));
9637 return fReadOnly;
9638}
9639
9640/**
9641 * Get sector size of an image in HDD container.
9642 *
9643 * @return Virtual disk sector size in bytes.
9644 * @return 0 if image with specified number was not opened.
9645 * @param pDisk Pointer to HDD container.
9646 * @param nImage Image number, counts from 0. 0 is always base image of container.
9647 */
9648VBOXDDU_DECL(uint32_t) VDGetSectorSize(PVBOXHDD pDisk, unsigned nImage)
9649{
9650 uint64_t cbSector;
9651 int rc2;
9652 bool fLockRead = false;
9653
9654 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9655 do
9656 {
9657 /* sanity check */
9658 AssertPtrBreakStmt(pDisk, cbSector = 0);
9659 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9660
9661 rc2 = vdThreadStartRead(pDisk);
9662 AssertRC(rc2);
9663 fLockRead = true;
9664
9665 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9666 AssertPtrBreakStmt(pImage, cbSector = 0);
9667 cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
9668 } while (0);
9669
9670 if (RT_UNLIKELY(fLockRead))
9671 {
9672 rc2 = vdThreadFinishRead(pDisk);
9673 AssertRC(rc2);
9674 }
9675
9676 LogFlowFunc(("returns %u\n", cbSector));
9677 return cbSector;
9678}
9679
9680/**
9681 * Get total capacity of an image in HDD container.
9682 *
9683 * @returns Virtual disk size in bytes.
9684 * @returns 0 if no image with specified number was not opened.
9685 * @param pDisk Pointer to HDD container.
9686 * @param nImage Image number, counts from 0. 0 is always base image of container.
9687 */
9688VBOXDDU_DECL(uint64_t) VDGetSize(PVBOXHDD pDisk, unsigned nImage)
9689{
9690 uint64_t cbSize;
9691 int rc2;
9692 bool fLockRead = false;
9693
9694 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9695 do
9696 {
9697 /* sanity check */
9698 AssertPtrBreakStmt(pDisk, cbSize = 0);
9699 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9700
9701 rc2 = vdThreadStartRead(pDisk);
9702 AssertRC(rc2);
9703 fLockRead = true;
9704
9705 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9706 AssertPtrBreakStmt(pImage, cbSize = 0);
9707 cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9708 } while (0);
9709
9710 if (RT_UNLIKELY(fLockRead))
9711 {
9712 rc2 = vdThreadFinishRead(pDisk);
9713 AssertRC(rc2);
9714 }
9715
9716 LogFlowFunc(("returns %llu\n", cbSize));
9717 return cbSize;
9718}
9719
9720/**
9721 * Get total file size of an image in HDD container.
9722 *
9723 * @returns Virtual disk size in bytes.
9724 * @returns 0 if no image is opened in HDD container.
9725 * @param pDisk Pointer to HDD container.
9726 * @param nImage Image number, counts from 0. 0 is always base image of container.
9727 */
9728VBOXDDU_DECL(uint64_t) VDGetFileSize(PVBOXHDD pDisk, unsigned nImage)
9729{
9730 uint64_t cbSize;
9731 int rc2;
9732 bool fLockRead = false;
9733
9734 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9735 do
9736 {
9737 /* sanity check */
9738 AssertPtrBreakStmt(pDisk, cbSize = 0);
9739 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9740
9741 rc2 = vdThreadStartRead(pDisk);
9742 AssertRC(rc2);
9743 fLockRead = true;
9744
9745 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9746 AssertPtrBreakStmt(pImage, cbSize = 0);
9747 cbSize = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
9748 } while (0);
9749
9750 if (RT_UNLIKELY(fLockRead))
9751 {
9752 rc2 = vdThreadFinishRead(pDisk);
9753 AssertRC(rc2);
9754 }
9755
9756 LogFlowFunc(("returns %llu\n", cbSize));
9757 return cbSize;
9758}
9759
9760/**
9761 * Get virtual disk PCHS geometry stored in HDD container.
9762 *
9763 * @returns VBox status code.
9764 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9765 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9766 * @param pDisk Pointer to HDD container.
9767 * @param nImage Image number, counts from 0. 0 is always base image of container.
9768 * @param pPCHSGeometry Where to store PCHS geometry. Not NULL.
9769 */
9770VBOXDDU_DECL(int) VDGetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9771 PVDGEOMETRY pPCHSGeometry)
9772{
9773 int rc = VINF_SUCCESS;
9774 int rc2;
9775 bool fLockRead = false;
9776
9777 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p\n",
9778 pDisk, nImage, pPCHSGeometry));
9779 do
9780 {
9781 /* sanity check */
9782 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9783 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9784
9785 /* Check arguments. */
9786 AssertMsgBreakStmt(VALID_PTR(pPCHSGeometry),
9787 ("pPCHSGeometry=%#p\n", pPCHSGeometry),
9788 rc = VERR_INVALID_PARAMETER);
9789
9790 rc2 = vdThreadStartRead(pDisk);
9791 AssertRC(rc2);
9792 fLockRead = true;
9793
9794 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9795 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9796
9797 if (pImage == pDisk->pLast)
9798 {
9799 /* Use cached information if possible. */
9800 if (pDisk->PCHSGeometry.cCylinders != 0)
9801 *pPCHSGeometry = pDisk->PCHSGeometry;
9802 else
9803 rc = VERR_VD_GEOMETRY_NOT_SET;
9804 }
9805 else
9806 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9807 pPCHSGeometry);
9808 } while (0);
9809
9810 if (RT_UNLIKELY(fLockRead))
9811 {
9812 rc2 = vdThreadFinishRead(pDisk);
9813 AssertRC(rc2);
9814 }
9815
9816 LogFlowFunc(("%Rrc (PCHS=%u/%u/%u)\n", rc,
9817 pDisk->PCHSGeometry.cCylinders, pDisk->PCHSGeometry.cHeads,
9818 pDisk->PCHSGeometry.cSectors));
9819 return rc;
9820}
9821
9822/**
9823 * Store virtual disk PCHS geometry in HDD container.
9824 *
9825 * Note that in case of unrecoverable error all images in HDD container will be closed.
9826 *
9827 * @returns VBox status code.
9828 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9829 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9830 * @param pDisk Pointer to HDD container.
9831 * @param nImage Image number, counts from 0. 0 is always base image of container.
9832 * @param pPCHSGeometry Where to load PCHS geometry from. Not NULL.
9833 */
9834VBOXDDU_DECL(int) VDSetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9835 PCVDGEOMETRY pPCHSGeometry)
9836{
9837 int rc = VINF_SUCCESS;
9838 int rc2;
9839 bool fLockWrite = false;
9840
9841 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
9842 pDisk, nImage, pPCHSGeometry, pPCHSGeometry->cCylinders,
9843 pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
9844 do
9845 {
9846 /* sanity check */
9847 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9848 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9849
9850 /* Check arguments. */
9851 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
9852 && pPCHSGeometry->cHeads <= 16
9853 && pPCHSGeometry->cSectors <= 63,
9854 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
9855 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
9856 pPCHSGeometry->cSectors),
9857 rc = VERR_INVALID_PARAMETER);
9858
9859 rc2 = vdThreadStartWrite(pDisk);
9860 AssertRC(rc2);
9861 fLockWrite = true;
9862
9863 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9864 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9865
9866 if (pImage == pDisk->pLast)
9867 {
9868 if ( pPCHSGeometry->cCylinders != pDisk->PCHSGeometry.cCylinders
9869 || pPCHSGeometry->cHeads != pDisk->PCHSGeometry.cHeads
9870 || pPCHSGeometry->cSectors != pDisk->PCHSGeometry.cSectors)
9871 {
9872 /* Only update geometry if it is changed. Avoids similar checks
9873 * in every backend. Most of the time the new geometry is set
9874 * to the previous values, so no need to go through the hassle
9875 * of updating an image which could be opened in read-only mode
9876 * right now. */
9877 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9878 pPCHSGeometry);
9879
9880 /* Cache new geometry values in any case. */
9881 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9882 &pDisk->PCHSGeometry);
9883 if (RT_FAILURE(rc2))
9884 {
9885 pDisk->PCHSGeometry.cCylinders = 0;
9886 pDisk->PCHSGeometry.cHeads = 0;
9887 pDisk->PCHSGeometry.cSectors = 0;
9888 }
9889 else
9890 {
9891 /* Make sure the CHS geometry is properly clipped. */
9892 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 255);
9893 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9894 }
9895 }
9896 }
9897 else
9898 {
9899 VDGEOMETRY PCHS;
9900 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9901 &PCHS);
9902 if ( RT_FAILURE(rc)
9903 || pPCHSGeometry->cCylinders != PCHS.cCylinders
9904 || pPCHSGeometry->cHeads != PCHS.cHeads
9905 || pPCHSGeometry->cSectors != PCHS.cSectors)
9906 {
9907 /* Only update geometry if it is changed. Avoids similar checks
9908 * in every backend. Most of the time the new geometry is set
9909 * to the previous values, so no need to go through the hassle
9910 * of updating an image which could be opened in read-only mode
9911 * right now. */
9912 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9913 pPCHSGeometry);
9914 }
9915 }
9916 } while (0);
9917
9918 if (RT_UNLIKELY(fLockWrite))
9919 {
9920 rc2 = vdThreadFinishWrite(pDisk);
9921 AssertRC(rc2);
9922 }
9923
9924 LogFlowFunc(("returns %Rrc\n", rc));
9925 return rc;
9926}
9927
9928/**
9929 * Get virtual disk LCHS geometry stored in HDD container.
9930 *
9931 * @returns VBox status code.
9932 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9933 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9934 * @param pDisk Pointer to HDD container.
9935 * @param nImage Image number, counts from 0. 0 is always base image of container.
9936 * @param pLCHSGeometry Where to store LCHS geometry. Not NULL.
9937 */
9938VBOXDDU_DECL(int) VDGetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9939 PVDGEOMETRY pLCHSGeometry)
9940{
9941 int rc = VINF_SUCCESS;
9942 int rc2;
9943 bool fLockRead = false;
9944
9945 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p\n",
9946 pDisk, nImage, pLCHSGeometry));
9947 do
9948 {
9949 /* sanity check */
9950 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9951 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9952
9953 /* Check arguments. */
9954 AssertMsgBreakStmt(VALID_PTR(pLCHSGeometry),
9955 ("pLCHSGeometry=%#p\n", pLCHSGeometry),
9956 rc = VERR_INVALID_PARAMETER);
9957
9958 rc2 = vdThreadStartRead(pDisk);
9959 AssertRC(rc2);
9960 fLockRead = true;
9961
9962 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9963 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9964
9965 if (pImage == pDisk->pLast)
9966 {
9967 /* Use cached information if possible. */
9968 if (pDisk->LCHSGeometry.cCylinders != 0)
9969 *pLCHSGeometry = pDisk->LCHSGeometry;
9970 else
9971 rc = VERR_VD_GEOMETRY_NOT_SET;
9972 }
9973 else
9974 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9975 pLCHSGeometry);
9976 } while (0);
9977
9978 if (RT_UNLIKELY(fLockRead))
9979 {
9980 rc2 = vdThreadFinishRead(pDisk);
9981 AssertRC(rc2);
9982 }
9983
9984 LogFlowFunc((": %Rrc (LCHS=%u/%u/%u)\n", rc,
9985 pDisk->LCHSGeometry.cCylinders, pDisk->LCHSGeometry.cHeads,
9986 pDisk->LCHSGeometry.cSectors));
9987 return rc;
9988}
9989
9990/**
9991 * Store virtual disk LCHS geometry in HDD container.
9992 *
9993 * Note that in case of unrecoverable error all images in HDD container will be closed.
9994 *
9995 * @returns VBox status code.
9996 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9997 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9998 * @param pDisk Pointer to HDD container.
9999 * @param nImage Image number, counts from 0. 0 is always base image of container.
10000 * @param pLCHSGeometry Where to load LCHS geometry from. Not NULL.
10001 */
10002VBOXDDU_DECL(int) VDSetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
10003 PCVDGEOMETRY pLCHSGeometry)
10004{
10005 int rc = VINF_SUCCESS;
10006 int rc2;
10007 bool fLockWrite = false;
10008
10009 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
10010 pDisk, nImage, pLCHSGeometry, pLCHSGeometry->cCylinders,
10011 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
10012 do
10013 {
10014 /* sanity check */
10015 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10016 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10017
10018 /* Check arguments. */
10019 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
10020 && pLCHSGeometry->cHeads <= 255
10021 && pLCHSGeometry->cSectors <= 63,
10022 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
10023 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
10024 pLCHSGeometry->cSectors),
10025 rc = VERR_INVALID_PARAMETER);
10026
10027 rc2 = vdThreadStartWrite(pDisk);
10028 AssertRC(rc2);
10029 fLockWrite = true;
10030
10031 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10032 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10033
10034 if (pImage == pDisk->pLast)
10035 {
10036 if ( pLCHSGeometry->cCylinders != pDisk->LCHSGeometry.cCylinders
10037 || pLCHSGeometry->cHeads != pDisk->LCHSGeometry.cHeads
10038 || pLCHSGeometry->cSectors != pDisk->LCHSGeometry.cSectors)
10039 {
10040 /* Only update geometry if it is changed. Avoids similar checks
10041 * in every backend. Most of the time the new geometry is set
10042 * to the previous values, so no need to go through the hassle
10043 * of updating an image which could be opened in read-only mode
10044 * right now. */
10045 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10046 pLCHSGeometry);
10047
10048 /* Cache new geometry values in any case. */
10049 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10050 &pDisk->LCHSGeometry);
10051 if (RT_FAILURE(rc2))
10052 {
10053 pDisk->LCHSGeometry.cCylinders = 0;
10054 pDisk->LCHSGeometry.cHeads = 0;
10055 pDisk->LCHSGeometry.cSectors = 0;
10056 }
10057 else
10058 {
10059 /* Make sure the CHS geometry is properly clipped. */
10060 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
10061 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
10062 }
10063 }
10064 }
10065 else
10066 {
10067 VDGEOMETRY LCHS;
10068 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10069 &LCHS);
10070 if ( RT_FAILURE(rc)
10071 || pLCHSGeometry->cCylinders != LCHS.cCylinders
10072 || pLCHSGeometry->cHeads != LCHS.cHeads
10073 || pLCHSGeometry->cSectors != LCHS.cSectors)
10074 {
10075 /* Only update geometry if it is changed. Avoids similar checks
10076 * in every backend. Most of the time the new geometry is set
10077 * to the previous values, so no need to go through the hassle
10078 * of updating an image which could be opened in read-only mode
10079 * right now. */
10080 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10081 pLCHSGeometry);
10082 }
10083 }
10084 } while (0);
10085
10086 if (RT_UNLIKELY(fLockWrite))
10087 {
10088 rc2 = vdThreadFinishWrite(pDisk);
10089 AssertRC(rc2);
10090 }
10091
10092 LogFlowFunc(("returns %Rrc\n", rc));
10093 return rc;
10094}
10095
10096/**
10097 * Queries the available regions of an image in the given VD container.
10098 *
10099 * @return VBox status code.
10100 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10101 * @retval VERR_NOT_SUPPORTED if the image backend doesn't support region lists.
10102 * @param pDisk Pointer to HDD container.
10103 * @param nImage Image number, counts from 0. 0 is always base image of container.
10104 * @param fFlags Combination of VD_REGION_LIST_F_* flags.
10105 * @param ppRegionList Where to store the pointer to the region list on success, must be freed
10106 * with VDRegionListFree().
10107 */
10108VBOXDDU_DECL(int) VDQueryRegions(PVBOXHDD pDisk, unsigned nImage, uint32_t fFlags,
10109 PPVDREGIONLIST ppRegionList)
10110{
10111 int rc = VINF_SUCCESS;
10112 int rc2;
10113 bool fLockRead = false;
10114
10115 LogFlowFunc(("pDisk=%#p nImage=%u fFlags=%#x ppRegionList=%#p\n",
10116 pDisk, nImage, fFlags, ppRegionList));
10117 do
10118 {
10119 /* sanity check */
10120 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10121 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10122
10123 /* Check arguments. */
10124 AssertMsgBreakStmt(VALID_PTR(ppRegionList),
10125 ("ppRegionList=%#p\n", ppRegionList),
10126 rc = VERR_INVALID_PARAMETER);
10127
10128 rc2 = vdThreadStartRead(pDisk);
10129 AssertRC(rc2);
10130 fLockRead = true;
10131
10132 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10133 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10134
10135 if (pImage->Backend->pfnQueryRegions)
10136 {
10137 PCVDREGIONLIST pRegionList = NULL;
10138 rc = pImage->Backend->pfnQueryRegions(pImage->pBackendData, &pRegionList);
10139 if (RT_SUCCESS(rc))
10140 {
10141 rc = vdRegionListConv(pRegionList, fFlags, ppRegionList);
10142
10143 AssertPtr(pImage->Backend->pfnRegionListRelease);
10144 pImage->Backend->pfnRegionListRelease(pImage->pBackendData, pRegionList);
10145 }
10146 }
10147 else
10148 rc = VERR_NOT_SUPPORTED;
10149
10150 if (rc == VERR_NOT_SUPPORTED)
10151 {
10152 /*
10153 * Create a list with a single region containing the data gathered from the
10154 * image and sector size.
10155 */
10156 PVDREGIONLIST pRegionList = (PVDREGIONLIST)RTMemAllocZ(RT_UOFFSETOF(VDREGIONLIST, aRegions[1]));
10157 if (RT_LIKELY(pRegionList))
10158 {
10159 uint32_t cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
10160 uint64_t cbImage = pImage->Backend->pfnGetSize(pImage->pBackendData);
10161
10162 pRegionList->cRegions = 1;
10163 pRegionList->fFlags = fFlags;
10164
10165 /*
10166 * Single region starting at the first byte/block covering the whole image,
10167 * block size equals sector size and contains no metadata.
10168 */
10169 PVDREGIONDESC pRegion = &pRegionList->aRegions[0];
10170 pRegion->offRegion = 0; /* Disk start. */
10171 pRegion->cbBlock = cbSector;
10172 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
10173 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
10174 pRegion->cbData = cbSector;
10175 pRegion->cbMetadata = 0;
10176 if (fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS)
10177 pRegion->cRegionBlocksOrBytes = cbImage / cbSector;
10178 else
10179 pRegion->cRegionBlocksOrBytes = cbImage;
10180
10181 *ppRegionList = pRegionList;
10182 }
10183 else
10184 rc = VERR_NO_MEMORY;
10185 }
10186 } while (0);
10187
10188 if (RT_UNLIKELY(fLockRead))
10189 {
10190 rc2 = vdThreadFinishRead(pDisk);
10191 AssertRC(rc2);
10192 }
10193
10194 LogFlowFunc((": %Rrc\n", rc));
10195 return rc;
10196}
10197
10198/**
10199 * Frees a region list previously queried with VDQueryRegions().
10200 *
10201 * @return nothing.
10202 * @param pRegionList The region list to free.
10203 */
10204VBOXDDU_DECL(void) VDRegionListFree(PVDREGIONLIST pRegionList)
10205{
10206 RTMemFree(pRegionList);
10207}
10208
10209/**
10210 * Get version of image in HDD container.
10211 *
10212 * @returns VBox status code.
10213 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10214 * @param pDisk Pointer to HDD container.
10215 * @param nImage Image number, counts from 0. 0 is always base image of container.
10216 * @param puVersion Where to store the image version.
10217 */
10218VBOXDDU_DECL(int) VDGetVersion(PVBOXHDD pDisk, unsigned nImage,
10219 unsigned *puVersion)
10220{
10221 int rc = VINF_SUCCESS;
10222 int rc2;
10223 bool fLockRead = false;
10224
10225 LogFlowFunc(("pDisk=%#p nImage=%u puVersion=%#p\n",
10226 pDisk, nImage, puVersion));
10227 do
10228 {
10229 /* sanity check */
10230 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10231 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10232
10233 /* Check arguments. */
10234 AssertMsgBreakStmt(VALID_PTR(puVersion),
10235 ("puVersion=%#p\n", puVersion),
10236 rc = VERR_INVALID_PARAMETER);
10237
10238 rc2 = vdThreadStartRead(pDisk);
10239 AssertRC(rc2);
10240 fLockRead = true;
10241
10242 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10243 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10244
10245 *puVersion = pImage->Backend->pfnGetVersion(pImage->pBackendData);
10246 } while (0);
10247
10248 if (RT_UNLIKELY(fLockRead))
10249 {
10250 rc2 = vdThreadFinishRead(pDisk);
10251 AssertRC(rc2);
10252 }
10253
10254 LogFlowFunc(("returns %Rrc uVersion=%#x\n", rc, *puVersion));
10255 return rc;
10256}
10257
10258/**
10259 * List the capabilities of image backend in HDD container.
10260 *
10261 * @returns VBox status code.
10262 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10263 * @param pDisk Pointer to the HDD container.
10264 * @param nImage Image number, counts from 0. 0 is always base image of container.
10265 * @param pBackendInfo Where to store the backend information.
10266 */
10267VBOXDDU_DECL(int) VDBackendInfoSingle(PVBOXHDD pDisk, unsigned nImage,
10268 PVDBACKENDINFO pBackendInfo)
10269{
10270 int rc = VINF_SUCCESS;
10271 int rc2;
10272 bool fLockRead = false;
10273
10274 LogFlowFunc(("pDisk=%#p nImage=%u pBackendInfo=%#p\n",
10275 pDisk, nImage, pBackendInfo));
10276 do
10277 {
10278 /* sanity check */
10279 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10280 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10281
10282 /* Check arguments. */
10283 AssertMsgBreakStmt(VALID_PTR(pBackendInfo),
10284 ("pBackendInfo=%#p\n", pBackendInfo),
10285 rc = VERR_INVALID_PARAMETER);
10286
10287 rc2 = vdThreadStartRead(pDisk);
10288 AssertRC(rc2);
10289 fLockRead = true;
10290
10291 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10292 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10293
10294 pBackendInfo->pszBackend = pImage->Backend->pszBackendName;
10295 pBackendInfo->uBackendCaps = pImage->Backend->uBackendCaps;
10296 pBackendInfo->paFileExtensions = pImage->Backend->paFileExtensions;
10297 pBackendInfo->paConfigInfo = pImage->Backend->paConfigInfo;
10298 } while (0);
10299
10300 if (RT_UNLIKELY(fLockRead))
10301 {
10302 rc2 = vdThreadFinishRead(pDisk);
10303 AssertRC(rc2);
10304 }
10305
10306 LogFlowFunc(("returns %Rrc\n", rc));
10307 return rc;
10308}
10309
10310/**
10311 * Get flags of image in HDD container.
10312 *
10313 * @returns VBox status code.
10314 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10315 * @param pDisk Pointer to HDD container.
10316 * @param nImage Image number, counts from 0. 0 is always base image of container.
10317 * @param puImageFlags Where to store the image flags.
10318 */
10319VBOXDDU_DECL(int) VDGetImageFlags(PVBOXHDD pDisk, unsigned nImage,
10320 unsigned *puImageFlags)
10321{
10322 int rc = VINF_SUCCESS;
10323 int rc2;
10324 bool fLockRead = false;
10325
10326 LogFlowFunc(("pDisk=%#p nImage=%u puImageFlags=%#p\n",
10327 pDisk, nImage, puImageFlags));
10328 do
10329 {
10330 /* sanity check */
10331 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10332 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10333
10334 /* Check arguments. */
10335 AssertMsgBreakStmt(VALID_PTR(puImageFlags),
10336 ("puImageFlags=%#p\n", puImageFlags),
10337 rc = VERR_INVALID_PARAMETER);
10338
10339 rc2 = vdThreadStartRead(pDisk);
10340 AssertRC(rc2);
10341 fLockRead = true;
10342
10343 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10344 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10345
10346 *puImageFlags = pImage->uImageFlags;
10347 } while (0);
10348
10349 if (RT_UNLIKELY(fLockRead))
10350 {
10351 rc2 = vdThreadFinishRead(pDisk);
10352 AssertRC(rc2);
10353 }
10354
10355 LogFlowFunc(("returns %Rrc uImageFlags=%#x\n", rc, *puImageFlags));
10356 return rc;
10357}
10358
10359/**
10360 * Get open flags of image in HDD container.
10361 *
10362 * @returns VBox status code.
10363 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10364 * @param pDisk Pointer to HDD container.
10365 * @param nImage Image number, counts from 0. 0 is always base image of container.
10366 * @param puOpenFlags Where to store the image open flags.
10367 */
10368VBOXDDU_DECL(int) VDGetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10369 unsigned *puOpenFlags)
10370{
10371 int rc = VINF_SUCCESS;
10372 int rc2;
10373 bool fLockRead = false;
10374
10375 LogFlowFunc(("pDisk=%#p nImage=%u puOpenFlags=%#p\n",
10376 pDisk, nImage, puOpenFlags));
10377 do
10378 {
10379 /* sanity check */
10380 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10381 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10382
10383 /* Check arguments. */
10384 AssertMsgBreakStmt(VALID_PTR(puOpenFlags),
10385 ("puOpenFlags=%#p\n", puOpenFlags),
10386 rc = VERR_INVALID_PARAMETER);
10387
10388 rc2 = vdThreadStartRead(pDisk);
10389 AssertRC(rc2);
10390 fLockRead = true;
10391
10392 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10393 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10394
10395 *puOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
10396 } while (0);
10397
10398 if (RT_UNLIKELY(fLockRead))
10399 {
10400 rc2 = vdThreadFinishRead(pDisk);
10401 AssertRC(rc2);
10402 }
10403
10404 LogFlowFunc(("returns %Rrc uOpenFlags=%#x\n", rc, *puOpenFlags));
10405 return rc;
10406}
10407
10408/**
10409 * Set open flags of image in HDD container.
10410 * This operation may cause file locking changes and/or files being reopened.
10411 * Note that in case of unrecoverable error all images in HDD container will be closed.
10412 *
10413 * @returns VBox status code.
10414 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10415 * @param pDisk Pointer to HDD container.
10416 * @param nImage Image number, counts from 0. 0 is always base image of container.
10417 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
10418 */
10419VBOXDDU_DECL(int) VDSetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10420 unsigned uOpenFlags)
10421{
10422 int rc;
10423 int rc2;
10424 bool fLockWrite = false;
10425
10426 LogFlowFunc(("pDisk=%#p uOpenFlags=%#u\n", pDisk, uOpenFlags));
10427 do
10428 {
10429 /* sanity check */
10430 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10431 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10432
10433 /* Check arguments. */
10434 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
10435 ("uOpenFlags=%#x\n", uOpenFlags),
10436 rc = VERR_INVALID_PARAMETER);
10437
10438 rc2 = vdThreadStartWrite(pDisk);
10439 AssertRC(rc2);
10440 fLockWrite = true;
10441
10442 /* Destroy any discard state because the image might be changed to readonly mode. */
10443 rc = vdDiscardStateDestroy(pDisk);
10444 if (RT_FAILURE(rc))
10445 break;
10446
10447 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10448 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10449
10450 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData,
10451 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS));
10452 if (RT_SUCCESS(rc))
10453 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
10454 } while (0);
10455
10456 if (RT_UNLIKELY(fLockWrite))
10457 {
10458 rc2 = vdThreadFinishWrite(pDisk);
10459 AssertRC(rc2);
10460 }
10461
10462 LogFlowFunc(("returns %Rrc\n", rc));
10463 return rc;
10464}
10465
10466/**
10467 * Get base filename of image in HDD container. Some image formats use
10468 * other filenames as well, so don't use this for anything but informational
10469 * purposes.
10470 *
10471 * @returns VBox status code.
10472 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10473 * @returns VERR_BUFFER_OVERFLOW if pszFilename buffer too small to hold filename.
10474 * @param pDisk Pointer to HDD container.
10475 * @param nImage Image number, counts from 0. 0 is always base image of container.
10476 * @param pszFilename Where to store the image file name.
10477 * @param cbFilename Size of buffer pszFilename points to.
10478 */
10479VBOXDDU_DECL(int) VDGetFilename(PVBOXHDD pDisk, unsigned nImage,
10480 char *pszFilename, unsigned cbFilename)
10481{
10482 int rc;
10483 int rc2;
10484 bool fLockRead = false;
10485
10486 LogFlowFunc(("pDisk=%#p nImage=%u pszFilename=%#p cbFilename=%u\n",
10487 pDisk, nImage, pszFilename, cbFilename));
10488 do
10489 {
10490 /* sanity check */
10491 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10492 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10493
10494 /* Check arguments. */
10495 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
10496 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
10497 rc = VERR_INVALID_PARAMETER);
10498 AssertMsgBreakStmt(cbFilename,
10499 ("cbFilename=%u\n", cbFilename),
10500 rc = VERR_INVALID_PARAMETER);
10501
10502 rc2 = vdThreadStartRead(pDisk);
10503 AssertRC(rc2);
10504 fLockRead = true;
10505
10506 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10507 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10508
10509 size_t cb = strlen(pImage->pszFilename);
10510 if (cb <= cbFilename)
10511 {
10512 strcpy(pszFilename, pImage->pszFilename);
10513 rc = VINF_SUCCESS;
10514 }
10515 else
10516 {
10517 strncpy(pszFilename, pImage->pszFilename, cbFilename - 1);
10518 pszFilename[cbFilename - 1] = '\0';
10519 rc = VERR_BUFFER_OVERFLOW;
10520 }
10521 } while (0);
10522
10523 if (RT_UNLIKELY(fLockRead))
10524 {
10525 rc2 = vdThreadFinishRead(pDisk);
10526 AssertRC(rc2);
10527 }
10528
10529 LogFlowFunc(("returns %Rrc, pszFilename=\"%s\"\n", rc, pszFilename));
10530 return rc;
10531}
10532
10533/**
10534 * Get the comment line of image in HDD container.
10535 *
10536 * @returns VBox status code.
10537 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10538 * @returns VERR_BUFFER_OVERFLOW if pszComment buffer too small to hold comment text.
10539 * @param pDisk Pointer to HDD container.
10540 * @param nImage Image number, counts from 0. 0 is always base image of container.
10541 * @param pszComment Where to store the comment string of image. NULL is ok.
10542 * @param cbComment The size of pszComment buffer. 0 is ok.
10543 */
10544VBOXDDU_DECL(int) VDGetComment(PVBOXHDD pDisk, unsigned nImage,
10545 char *pszComment, unsigned cbComment)
10546{
10547 int rc;
10548 int rc2;
10549 bool fLockRead = false;
10550
10551 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p cbComment=%u\n",
10552 pDisk, nImage, pszComment, cbComment));
10553 do
10554 {
10555 /* sanity check */
10556 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10557 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10558
10559 /* Check arguments. */
10560 AssertMsgBreakStmt(VALID_PTR(pszComment),
10561 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10562 rc = VERR_INVALID_PARAMETER);
10563 AssertMsgBreakStmt(cbComment,
10564 ("cbComment=%u\n", cbComment),
10565 rc = VERR_INVALID_PARAMETER);
10566
10567 rc2 = vdThreadStartRead(pDisk);
10568 AssertRC(rc2);
10569 fLockRead = true;
10570
10571 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10572 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10573
10574 rc = pImage->Backend->pfnGetComment(pImage->pBackendData, pszComment,
10575 cbComment);
10576 } while (0);
10577
10578 if (RT_UNLIKELY(fLockRead))
10579 {
10580 rc2 = vdThreadFinishRead(pDisk);
10581 AssertRC(rc2);
10582 }
10583
10584 LogFlowFunc(("returns %Rrc, pszComment=\"%s\"\n", rc, pszComment));
10585 return rc;
10586}
10587
10588/**
10589 * Changes the comment line of image in HDD container.
10590 *
10591 * @returns VBox status code.
10592 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10593 * @param pDisk Pointer to HDD container.
10594 * @param nImage Image number, counts from 0. 0 is always base image of container.
10595 * @param pszComment New comment string (UTF-8). NULL is allowed to reset the comment.
10596 */
10597VBOXDDU_DECL(int) VDSetComment(PVBOXHDD pDisk, unsigned nImage,
10598 const char *pszComment)
10599{
10600 int rc;
10601 int rc2;
10602 bool fLockWrite = false;
10603
10604 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p \"%s\"\n",
10605 pDisk, nImage, pszComment, pszComment));
10606 do
10607 {
10608 /* sanity check */
10609 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10610 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10611
10612 /* Check arguments. */
10613 AssertMsgBreakStmt(VALID_PTR(pszComment) || pszComment == NULL,
10614 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10615 rc = VERR_INVALID_PARAMETER);
10616
10617 rc2 = vdThreadStartWrite(pDisk);
10618 AssertRC(rc2);
10619 fLockWrite = true;
10620
10621 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10622 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10623
10624 rc = pImage->Backend->pfnSetComment(pImage->pBackendData, pszComment);
10625 } while (0);
10626
10627 if (RT_UNLIKELY(fLockWrite))
10628 {
10629 rc2 = vdThreadFinishWrite(pDisk);
10630 AssertRC(rc2);
10631 }
10632
10633 LogFlowFunc(("returns %Rrc\n", rc));
10634 return rc;
10635}
10636
10637
10638/**
10639 * Get UUID of image in HDD container.
10640 *
10641 * @returns VBox status code.
10642 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10643 * @param pDisk Pointer to HDD container.
10644 * @param nImage Image number, counts from 0. 0 is always base image of container.
10645 * @param pUuid Where to store the image creation UUID.
10646 */
10647VBOXDDU_DECL(int) VDGetUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10648{
10649 int rc;
10650 int rc2;
10651 bool fLockRead = false;
10652
10653 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10654 do
10655 {
10656 /* sanity check */
10657 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10658 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10659
10660 /* Check arguments. */
10661 AssertMsgBreakStmt(VALID_PTR(pUuid),
10662 ("pUuid=%#p\n", pUuid),
10663 rc = VERR_INVALID_PARAMETER);
10664
10665 rc2 = vdThreadStartRead(pDisk);
10666 AssertRC(rc2);
10667 fLockRead = true;
10668
10669 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10670 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10671
10672 rc = pImage->Backend->pfnGetUuid(pImage->pBackendData, pUuid);
10673 } while (0);
10674
10675 if (RT_UNLIKELY(fLockRead))
10676 {
10677 rc2 = vdThreadFinishRead(pDisk);
10678 AssertRC(rc2);
10679 }
10680
10681 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10682 return rc;
10683}
10684
10685/**
10686 * Set the image's UUID. Should not be used by normal applications.
10687 *
10688 * @returns VBox status code.
10689 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10690 * @param pDisk Pointer to HDD container.
10691 * @param nImage Image number, counts from 0. 0 is always base image of container.
10692 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
10693 */
10694VBOXDDU_DECL(int) VDSetUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10695{
10696 int rc;
10697 int rc2;
10698 bool fLockWrite = false;
10699
10700 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10701 pDisk, nImage, pUuid, pUuid));
10702 do
10703 {
10704 /* sanity check */
10705 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10706 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10707
10708 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10709 ("pUuid=%#p\n", pUuid),
10710 rc = VERR_INVALID_PARAMETER);
10711
10712 rc2 = vdThreadStartWrite(pDisk);
10713 AssertRC(rc2);
10714 fLockWrite = true;
10715
10716 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10717 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10718
10719 RTUUID Uuid;
10720 if (!pUuid)
10721 {
10722 RTUuidCreate(&Uuid);
10723 pUuid = &Uuid;
10724 }
10725 rc = pImage->Backend->pfnSetUuid(pImage->pBackendData, pUuid);
10726 } while (0);
10727
10728 if (RT_UNLIKELY(fLockWrite))
10729 {
10730 rc2 = vdThreadFinishWrite(pDisk);
10731 AssertRC(rc2);
10732 }
10733
10734 LogFlowFunc(("returns %Rrc\n", rc));
10735 return rc;
10736}
10737
10738/**
10739 * Get last modification UUID of image in HDD container.
10740 *
10741 * @returns VBox status code.
10742 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10743 * @param pDisk Pointer to HDD container.
10744 * @param nImage Image number, counts from 0. 0 is always base image of container.
10745 * @param pUuid Where to store the image modification UUID.
10746 */
10747VBOXDDU_DECL(int) VDGetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10748{
10749 int rc = VINF_SUCCESS;
10750 int rc2;
10751 bool fLockRead = false;
10752
10753 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10754 do
10755 {
10756 /* sanity check */
10757 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10758 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10759
10760 /* Check arguments. */
10761 AssertMsgBreakStmt(VALID_PTR(pUuid),
10762 ("pUuid=%#p\n", pUuid),
10763 rc = VERR_INVALID_PARAMETER);
10764
10765 rc2 = vdThreadStartRead(pDisk);
10766 AssertRC(rc2);
10767 fLockRead = true;
10768
10769 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10770 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10771
10772 rc = pImage->Backend->pfnGetModificationUuid(pImage->pBackendData,
10773 pUuid);
10774 } while (0);
10775
10776 if (RT_UNLIKELY(fLockRead))
10777 {
10778 rc2 = vdThreadFinishRead(pDisk);
10779 AssertRC(rc2);
10780 }
10781
10782 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10783 return rc;
10784}
10785
10786/**
10787 * Set the image's last modification UUID. Should not be used by normal applications.
10788 *
10789 * @returns VBox status code.
10790 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10791 * @param pDisk Pointer to HDD container.
10792 * @param nImage Image number, counts from 0. 0 is always base image of container.
10793 * @param pUuid New modification UUID of the image. If NULL, a new UUID is created.
10794 */
10795VBOXDDU_DECL(int) VDSetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10796{
10797 int rc;
10798 int rc2;
10799 bool fLockWrite = false;
10800
10801 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10802 pDisk, nImage, pUuid, pUuid));
10803 do
10804 {
10805 /* sanity check */
10806 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10807 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10808
10809 /* Check arguments. */
10810 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10811 ("pUuid=%#p\n", pUuid),
10812 rc = VERR_INVALID_PARAMETER);
10813
10814 rc2 = vdThreadStartWrite(pDisk);
10815 AssertRC(rc2);
10816 fLockWrite = true;
10817
10818 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10819 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10820
10821 RTUUID Uuid;
10822 if (!pUuid)
10823 {
10824 RTUuidCreate(&Uuid);
10825 pUuid = &Uuid;
10826 }
10827 rc = pImage->Backend->pfnSetModificationUuid(pImage->pBackendData,
10828 pUuid);
10829 } while (0);
10830
10831 if (RT_UNLIKELY(fLockWrite))
10832 {
10833 rc2 = vdThreadFinishWrite(pDisk);
10834 AssertRC(rc2);
10835 }
10836
10837 LogFlowFunc(("returns %Rrc\n", rc));
10838 return rc;
10839}
10840
10841/**
10842 * Get parent UUID of image in HDD container.
10843 *
10844 * @returns VBox status code.
10845 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10846 * @param pDisk Pointer to HDD container.
10847 * @param nImage Image number, counts from 0. 0 is always base image of container.
10848 * @param pUuid Where to store the parent image UUID.
10849 */
10850VBOXDDU_DECL(int) VDGetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10851 PRTUUID pUuid)
10852{
10853 int rc = VINF_SUCCESS;
10854 int rc2;
10855 bool fLockRead = false;
10856
10857 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10858 do
10859 {
10860 /* sanity check */
10861 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10862 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10863
10864 /* Check arguments. */
10865 AssertMsgBreakStmt(VALID_PTR(pUuid),
10866 ("pUuid=%#p\n", pUuid),
10867 rc = VERR_INVALID_PARAMETER);
10868
10869 rc2 = vdThreadStartRead(pDisk);
10870 AssertRC(rc2);
10871 fLockRead = true;
10872
10873 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10874 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10875
10876 rc = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, pUuid);
10877 } while (0);
10878
10879 if (RT_UNLIKELY(fLockRead))
10880 {
10881 rc2 = vdThreadFinishRead(pDisk);
10882 AssertRC(rc2);
10883 }
10884
10885 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10886 return rc;
10887}
10888
10889/**
10890 * Set the image's parent UUID. Should not be used by normal applications.
10891 *
10892 * @returns VBox status code.
10893 * @param pDisk Pointer to HDD container.
10894 * @param nImage Image number, counts from 0. 0 is always base image of container.
10895 * @param pUuid New parent UUID of the image. If NULL, a new UUID is created.
10896 */
10897VBOXDDU_DECL(int) VDSetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10898 PCRTUUID pUuid)
10899{
10900 int rc;
10901 int rc2;
10902 bool fLockWrite = false;
10903
10904 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10905 pDisk, nImage, pUuid, pUuid));
10906 do
10907 {
10908 /* sanity check */
10909 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10910 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10911
10912 /* Check arguments. */
10913 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10914 ("pUuid=%#p\n", pUuid),
10915 rc = VERR_INVALID_PARAMETER);
10916
10917 rc2 = vdThreadStartWrite(pDisk);
10918 AssertRC(rc2);
10919 fLockWrite = true;
10920
10921 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10922 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10923
10924 RTUUID Uuid;
10925 if (!pUuid)
10926 {
10927 RTUuidCreate(&Uuid);
10928 pUuid = &Uuid;
10929 }
10930 rc = pImage->Backend->pfnSetParentUuid(pImage->pBackendData, pUuid);
10931 } while (0);
10932
10933 if (RT_UNLIKELY(fLockWrite))
10934 {
10935 rc2 = vdThreadFinishWrite(pDisk);
10936 AssertRC(rc2);
10937 }
10938
10939 LogFlowFunc(("returns %Rrc\n", rc));
10940 return rc;
10941}
10942
10943
10944/**
10945 * Debug helper - dumps all opened images in HDD container into the log file.
10946 *
10947 * @param pDisk Pointer to HDD container.
10948 */
10949VBOXDDU_DECL(void) VDDumpImages(PVBOXHDD pDisk)
10950{
10951 int rc2;
10952 bool fLockRead = false;
10953
10954 do
10955 {
10956 /* sanity check */
10957 AssertPtrBreak(pDisk);
10958 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10959
10960 if (!pDisk->pInterfaceError || !VALID_PTR(pDisk->pInterfaceError->pfnMessage))
10961 pDisk->pInterfaceError->pfnMessage = vdLogMessage;
10962
10963 rc2 = vdThreadStartRead(pDisk);
10964 AssertRC(rc2);
10965 fLockRead = true;
10966
10967 vdMessageWrapper(pDisk, "--- Dumping VD Disk, Images=%u\n", pDisk->cImages);
10968 for (PVDIMAGE pImage = pDisk->pBase; pImage; pImage = pImage->pNext)
10969 {
10970 vdMessageWrapper(pDisk, "Dumping VD image \"%s\" (Backend=%s)\n",
10971 pImage->pszFilename, pImage->Backend->pszBackendName);
10972 pImage->Backend->pfnDump(pImage->pBackendData);
10973 }
10974 } while (0);
10975
10976 if (RT_UNLIKELY(fLockRead))
10977 {
10978 rc2 = vdThreadFinishRead(pDisk);
10979 AssertRC(rc2);
10980 }
10981}
10982
10983
10984VBOXDDU_DECL(int) VDDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges)
10985{
10986 int rc;
10987 int rc2;
10988 bool fLockWrite = false;
10989
10990 LogFlowFunc(("pDisk=%#p paRanges=%#p cRanges=%u\n",
10991 pDisk, paRanges, cRanges));
10992 do
10993 {
10994 /* sanity check */
10995 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10996 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10997
10998 /* Check arguments. */
10999 AssertMsgBreakStmt(cRanges,
11000 ("cRanges=%u\n", cRanges),
11001 rc = VERR_INVALID_PARAMETER);
11002 AssertMsgBreakStmt(VALID_PTR(paRanges),
11003 ("paRanges=%#p\n", paRanges),
11004 rc = VERR_INVALID_PARAMETER);
11005
11006 rc2 = vdThreadStartWrite(pDisk);
11007 AssertRC(rc2);
11008 fLockWrite = true;
11009
11010 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11011
11012 AssertMsgBreakStmt(pDisk->pLast->uOpenFlags & VD_OPEN_FLAGS_DISCARD,
11013 ("Discarding not supported\n"),
11014 rc = VERR_NOT_SUPPORTED);
11015
11016 VDIOCTX IoCtx;
11017 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
11018
11019 rc = RTSemEventCreate(&hEventComplete);
11020 if (RT_FAILURE(rc))
11021 break;
11022
11023 vdIoCtxDiscardInit(&IoCtx, pDisk, paRanges, cRanges,
11024 vdIoCtxSyncComplete, pDisk, hEventComplete, NULL,
11025 vdDiscardHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
11026 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
11027
11028 RTSemEventDestroy(hEventComplete);
11029 } while (0);
11030
11031 if (RT_UNLIKELY(fLockWrite))
11032 {
11033 rc2 = vdThreadFinishWrite(pDisk);
11034 AssertRC(rc2);
11035 }
11036
11037 LogFlowFunc(("returns %Rrc\n", rc));
11038 return rc;
11039}
11040
11041
11042VBOXDDU_DECL(int) VDAsyncRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
11043 PCRTSGBUF pcSgBuf,
11044 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11045 void *pvUser1, void *pvUser2)
11046{
11047 int rc = VERR_VD_BLOCK_FREE;
11048 int rc2;
11049 bool fLockRead = false;
11050 PVDIOCTX pIoCtx = NULL;
11051
11052 LogFlowFunc(("pDisk=%#p uOffset=%llu pcSgBuf=%#p cbRead=%zu pvUser1=%#p pvUser2=%#p\n",
11053 pDisk, uOffset, pcSgBuf, cbRead, pvUser1, pvUser2));
11054
11055 do
11056 {
11057 /* sanity check */
11058 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11059 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11060
11061 /* Check arguments. */
11062 AssertMsgBreakStmt(cbRead,
11063 ("cbRead=%zu\n", cbRead),
11064 rc = VERR_INVALID_PARAMETER);
11065 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
11066 ("pcSgBuf=%#p\n", pcSgBuf),
11067 rc = VERR_INVALID_PARAMETER);
11068
11069 rc2 = vdThreadStartRead(pDisk);
11070 AssertRC(rc2);
11071 fLockRead = true;
11072
11073 AssertMsgBreakStmt(uOffset + cbRead <= pDisk->cbSize,
11074 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
11075 uOffset, cbRead, pDisk->cbSize),
11076 rc = VERR_INVALID_PARAMETER);
11077 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11078
11079 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_READ, uOffset,
11080 cbRead, pDisk->pLast, pcSgBuf,
11081 pfnComplete, pvUser1, pvUser2,
11082 NULL, vdReadHelperAsync,
11083 VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
11084 if (!pIoCtx)
11085 {
11086 rc = VERR_NO_MEMORY;
11087 break;
11088 }
11089
11090 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11091 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11092 {
11093 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11094 vdIoCtxFree(pDisk, pIoCtx);
11095 else
11096 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11097 }
11098 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11099 vdIoCtxFree(pDisk, pIoCtx);
11100
11101 } while (0);
11102
11103 if (RT_UNLIKELY(fLockRead) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11104 {
11105 rc2 = vdThreadFinishRead(pDisk);
11106 AssertRC(rc2);
11107 }
11108
11109 LogFlowFunc(("returns %Rrc\n", rc));
11110 return rc;
11111}
11112
11113
11114VBOXDDU_DECL(int) VDAsyncWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
11115 PCRTSGBUF pcSgBuf,
11116 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11117 void *pvUser1, void *pvUser2)
11118{
11119 int rc;
11120 int rc2;
11121 bool fLockWrite = false;
11122 PVDIOCTX pIoCtx = NULL;
11123
11124 LogFlowFunc(("pDisk=%#p uOffset=%llu cSgBuf=%#p cbWrite=%zu pvUser1=%#p pvUser2=%#p\n",
11125 pDisk, uOffset, pcSgBuf, cbWrite, pvUser1, pvUser2));
11126 do
11127 {
11128 /* sanity check */
11129 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11130 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11131
11132 /* Check arguments. */
11133 AssertMsgBreakStmt(cbWrite,
11134 ("cbWrite=%zu\n", cbWrite),
11135 rc = VERR_INVALID_PARAMETER);
11136 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
11137 ("pcSgBuf=%#p\n", pcSgBuf),
11138 rc = VERR_INVALID_PARAMETER);
11139
11140 rc2 = vdThreadStartWrite(pDisk);
11141 AssertRC(rc2);
11142 fLockWrite = true;
11143
11144 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
11145 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
11146 uOffset, cbWrite, pDisk->cbSize),
11147 rc = VERR_INVALID_PARAMETER);
11148 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11149
11150 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_WRITE, uOffset,
11151 cbWrite, pDisk->pLast, pcSgBuf,
11152 pfnComplete, pvUser1, pvUser2,
11153 NULL, vdWriteHelperAsync,
11154 VDIOCTX_FLAGS_DEFAULT);
11155 if (!pIoCtx)
11156 {
11157 rc = VERR_NO_MEMORY;
11158 break;
11159 }
11160
11161 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11162 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11163 {
11164 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11165 vdIoCtxFree(pDisk, pIoCtx);
11166 else
11167 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11168 }
11169 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11170 vdIoCtxFree(pDisk, pIoCtx);
11171 } while (0);
11172
11173 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11174 {
11175 rc2 = vdThreadFinishWrite(pDisk);
11176 AssertRC(rc2);
11177 }
11178
11179 LogFlowFunc(("returns %Rrc\n", rc));
11180 return rc;
11181}
11182
11183
11184VBOXDDU_DECL(int) VDAsyncFlush(PVBOXHDD pDisk, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11185 void *pvUser1, void *pvUser2)
11186{
11187 int rc;
11188 int rc2;
11189 bool fLockWrite = false;
11190 PVDIOCTX pIoCtx = NULL;
11191
11192 LogFlowFunc(("pDisk=%#p\n", pDisk));
11193
11194 do
11195 {
11196 /* sanity check */
11197 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11198 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11199
11200 rc2 = vdThreadStartWrite(pDisk);
11201 AssertRC(rc2);
11202 fLockWrite = true;
11203
11204 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11205
11206 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_FLUSH, 0,
11207 0, pDisk->pLast, NULL,
11208 pfnComplete, pvUser1, pvUser2,
11209 NULL, vdFlushHelperAsync,
11210 VDIOCTX_FLAGS_DEFAULT);
11211 if (!pIoCtx)
11212 {
11213 rc = VERR_NO_MEMORY;
11214 break;
11215 }
11216
11217 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11218 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11219 {
11220 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11221 vdIoCtxFree(pDisk, pIoCtx);
11222 else
11223 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11224 }
11225 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11226 vdIoCtxFree(pDisk, pIoCtx);
11227 } while (0);
11228
11229 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11230 {
11231 rc2 = vdThreadFinishWrite(pDisk);
11232 AssertRC(rc2);
11233 }
11234
11235 LogFlowFunc(("returns %Rrc\n", rc));
11236 return rc;
11237}
11238
11239VBOXDDU_DECL(int) VDAsyncDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges,
11240 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11241 void *pvUser1, void *pvUser2)
11242{
11243 int rc;
11244 int rc2;
11245 bool fLockWrite = false;
11246 PVDIOCTX pIoCtx = NULL;
11247
11248 LogFlowFunc(("pDisk=%#p\n", pDisk));
11249
11250 do
11251 {
11252 /* sanity check */
11253 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11254 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11255
11256 rc2 = vdThreadStartWrite(pDisk);
11257 AssertRC(rc2);
11258 fLockWrite = true;
11259
11260 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11261
11262 pIoCtx = vdIoCtxDiscardAlloc(pDisk, paRanges, cRanges,
11263 pfnComplete, pvUser1, pvUser2, NULL,
11264 vdDiscardHelperAsync,
11265 VDIOCTX_FLAGS_DEFAULT);
11266 if (!pIoCtx)
11267 {
11268 rc = VERR_NO_MEMORY;
11269 break;
11270 }
11271
11272 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11273 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11274 {
11275 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11276 vdIoCtxFree(pDisk, pIoCtx);
11277 else
11278 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11279 }
11280 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11281 vdIoCtxFree(pDisk, pIoCtx);
11282 } while (0);
11283
11284 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11285 {
11286 rc2 = vdThreadFinishWrite(pDisk);
11287 AssertRC(rc2);
11288 }
11289
11290 LogFlowFunc(("returns %Rrc\n", rc));
11291 return rc;
11292}
11293
11294VBOXDDU_DECL(int) VDRepair(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
11295 const char *pszFilename, const char *pszBackend,
11296 uint32_t fFlags)
11297{
11298 int rc = VERR_NOT_SUPPORTED;
11299 PCVDIMAGEBACKEND pBackend = NULL;
11300 VDINTERFACEIOINT VDIfIoInt;
11301 VDINTERFACEIO VDIfIoFallback;
11302 PVDINTERFACEIO pInterfaceIo;
11303
11304 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
11305 /* Check arguments. */
11306 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
11307 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
11308 VERR_INVALID_PARAMETER);
11309 AssertMsgReturn(VALID_PTR(pszBackend),
11310 ("pszBackend=%#p\n", pszBackend),
11311 VERR_INVALID_PARAMETER);
11312 AssertMsgReturn((fFlags & ~VD_REPAIR_FLAGS_MASK) == 0,
11313 ("fFlags=%#x\n", fFlags),
11314 VERR_INVALID_PARAMETER);
11315
11316 pInterfaceIo = VDIfIoGet(pVDIfsImage);
11317 if (!pInterfaceIo)
11318 {
11319 /*
11320 * Caller doesn't provide an I/O interface, create our own using the
11321 * native file API.
11322 */
11323 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
11324 pInterfaceIo = &VDIfIoFallback;
11325 }
11326
11327 /* Set up the internal I/O interface. */
11328 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
11329 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
11330 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
11331 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
11332 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
11333 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
11334 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
11335 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
11336 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
11337 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
11338 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
11339 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
11340 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
11341 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
11342 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
11343 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
11344 AssertRC(rc);
11345
11346 rc = vdFindBackend(pszBackend, &pBackend);
11347 if (RT_SUCCESS(rc))
11348 {
11349 if (pBackend->pfnRepair)
11350 rc = pBackend->pfnRepair(pszFilename, pVDIfsDisk, pVDIfsImage, fFlags);
11351 else
11352 rc = VERR_VD_IMAGE_REPAIR_NOT_SUPPORTED;
11353 }
11354
11355 LogFlowFunc(("returns %Rrc\n", rc));
11356 return rc;
11357}
11358
11359
11360/*
11361 * generic plugin functions
11362 */
11363
11364/**
11365 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeLocation}
11366 */
11367DECLCALLBACK(int) genericFileComposeLocation(PVDINTERFACE pConfig, char **pszLocation)
11368{
11369 RT_NOREF1(pConfig);
11370 *pszLocation = NULL;
11371 return VINF_SUCCESS;
11372}
11373
11374/**
11375 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeName}
11376 */
11377DECLCALLBACK(int) genericFileComposeName(PVDINTERFACE pConfig, char **pszName)
11378{
11379 RT_NOREF1(pConfig);
11380 *pszName = NULL;
11381 return VINF_SUCCESS;
11382}
11383
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette