VirtualBox

source: vbox/trunk/src/VBox/Storage/VD.cpp@ 66110

Last change on this file since 66110 was 66110, checked in by vboxsync, 8 years ago

Storage/VD: Implement infrastructure for region lists to be able to support CD/DVD image formats which can contain multiple tracks in the future

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 403.1 KB
Line 
1/* $Id: VD.cpp 66110 2017-03-15 12:18:31Z vboxsync $ */
2/** @file
3 * VBoxHDD - VBox HDD Container implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD
23#include <VBox/vd.h>
24#include <VBox/err.h>
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/uuid.h>
31#include <iprt/file.h>
32#include <iprt/string.h>
33#include <iprt/asm.h>
34#include <iprt/ldr.h>
35#include <iprt/dir.h>
36#include <iprt/path.h>
37#include <iprt/param.h>
38#include <iprt/memcache.h>
39#include <iprt/sg.h>
40#include <iprt/list.h>
41#include <iprt/avl.h>
42#include <iprt/semaphore.h>
43
44#include <VBox/vd-plugin.h>
45
46#include "VDBackends.h"
47
48/** Disable dynamic backends on non x86 architectures. This feature
49 * requires the SUPR3 library which is not available there.
50 */
51#if !defined(VBOX_HDD_NO_DYNAMIC_BACKENDS) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)
52# define VBOX_HDD_NO_DYNAMIC_BACKENDS
53#endif
54
55#define VBOXHDDDISK_SIGNATURE 0x6f0e2a7d
56
57/** Buffer size used for merging images. */
58#define VD_MERGE_BUFFER_SIZE (16 * _1M)
59
60/** Maximum number of segments in one I/O task. */
61#define VD_IO_TASK_SEGMENTS_MAX 64
62
63/** Threshold after not recently used blocks are removed from the list. */
64#define VD_DISCARD_REMOVE_THRESHOLD (10 * _1M) /** @todo experiment */
65
66/**
67 * VD async I/O interface storage descriptor.
68 */
69typedef struct VDIIOFALLBACKSTORAGE
70{
71 /** File handle. */
72 RTFILE File;
73 /** Completion callback. */
74 PFNVDCOMPLETED pfnCompleted;
75 /** Thread for async access. */
76 RTTHREAD ThreadAsync;
77} VDIIOFALLBACKSTORAGE, *PVDIIOFALLBACKSTORAGE;
78
79/**
80 * Structure containing everything I/O related
81 * for the image and cache descriptors.
82 */
83typedef struct VDIO
84{
85 /** I/O interface to the upper layer. */
86 PVDINTERFACEIO pInterfaceIo;
87
88 /** Per image internal I/O interface. */
89 VDINTERFACEIOINT VDIfIoInt;
90
91 /** Fallback I/O interface, only used if the caller doesn't provide it. */
92 VDINTERFACEIO VDIfIo;
93
94 /** Opaque backend data. */
95 void *pBackendData;
96 /** Disk this image is part of */
97 PVBOXHDD pDisk;
98 /** Flag whether to ignore flush requests. */
99 bool fIgnoreFlush;
100} VDIO, *PVDIO;
101
102/** Forward declaration of an I/O task */
103typedef struct VDIOTASK *PVDIOTASK;
104
105/**
106 * VBox HDD Container image descriptor.
107 */
108typedef struct VDIMAGE
109{
110 /** Link to parent image descriptor, if any. */
111 struct VDIMAGE *pPrev;
112 /** Link to child image descriptor, if any. */
113 struct VDIMAGE *pNext;
114 /** Container base filename. (UTF-8) */
115 char *pszFilename;
116 /** Data managed by the backend which keeps the actual info. */
117 void *pBackendData;
118 /** Cached sanitized image flags. */
119 unsigned uImageFlags;
120 /** Image open flags (only those handled generically in this code and which
121 * the backends will never ever see). */
122 unsigned uOpenFlags;
123
124 /** Function pointers for the various backend methods. */
125 PCVDIMAGEBACKEND Backend;
126 /** Pointer to list of VD interfaces, per-image. */
127 PVDINTERFACE pVDIfsImage;
128 /** I/O related things. */
129 VDIO VDIo;
130} VDIMAGE, *PVDIMAGE;
131
132/**
133 * uModified bit flags.
134 */
135#define VD_IMAGE_MODIFIED_FLAG RT_BIT(0)
136#define VD_IMAGE_MODIFIED_FIRST RT_BIT(1)
137#define VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE RT_BIT(2)
138
139
140/**
141 * VBox HDD Cache image descriptor.
142 */
143typedef struct VDCACHE
144{
145 /** Cache base filename. (UTF-8) */
146 char *pszFilename;
147 /** Data managed by the backend which keeps the actual info. */
148 void *pBackendData;
149 /** Cached sanitized image flags. */
150 unsigned uImageFlags;
151 /** Image open flags (only those handled generically in this code and which
152 * the backends will never ever see). */
153 unsigned uOpenFlags;
154
155 /** Function pointers for the various backend methods. */
156 PCVDCACHEBACKEND Backend;
157
158 /** Pointer to list of VD interfaces, per-cache. */
159 PVDINTERFACE pVDIfsCache;
160 /** I/O related things. */
161 VDIO VDIo;
162} VDCACHE, *PVDCACHE;
163
164/**
165 * A block waiting for a discard.
166 */
167typedef struct VDDISCARDBLOCK
168{
169 /** AVL core. */
170 AVLRU64NODECORE Core;
171 /** LRU list node. */
172 RTLISTNODE NodeLru;
173 /** Number of bytes to discard. */
174 size_t cbDiscard;
175 /** Bitmap of allocated sectors. */
176 void *pbmAllocated;
177} VDDISCARDBLOCK, *PVDDISCARDBLOCK;
178
179/**
180 * VD discard state.
181 */
182typedef struct VDDISCARDSTATE
183{
184 /** Number of bytes waiting for a discard. */
185 size_t cbDiscarding;
186 /** AVL tree with blocks waiting for a discard.
187 * The uOffset + cbDiscard range is the search key. */
188 PAVLRU64TREE pTreeBlocks;
189 /** LRU list of the least frequently discarded blocks.
190 * If there are to many blocks waiting the least frequently used
191 * will be removed and the range will be set to 0.
192 */
193 RTLISTNODE ListLru;
194} VDDISCARDSTATE, *PVDDISCARDSTATE;
195
196/**
197 * VD filter instance.
198 */
199typedef struct VDFILTER
200{
201 /** List node for the read filter chain. */
202 RTLISTNODE ListNodeChainRead;
203 /** List node for the write filter chain. */
204 RTLISTNODE ListNodeChainWrite;
205 /** Number of references to this filter. */
206 uint32_t cRefs;
207 /** Opaque VD filter backend instance data. */
208 void *pvBackendData;
209 /** Pointer to the filter backend interface. */
210 PCVDFILTERBACKEND pBackend;
211 /** Pointer to list of VD interfaces, per-filter. */
212 PVDINTERFACE pVDIfsFilter;
213 /** I/O related things. */
214 VDIO VDIo;
215} VDFILTER;
216/** Pointer to a VD filter instance. */
217typedef VDFILTER *PVDFILTER;
218
219/**
220 * VBox HDD Container main structure, private part.
221 */
222struct VBOXHDD
223{
224 /** Structure signature (VBOXHDDDISK_SIGNATURE). */
225 uint32_t u32Signature;
226
227 /** Image type. */
228 VDTYPE enmType;
229
230 /** Number of opened images. */
231 unsigned cImages;
232
233 /** Base image. */
234 PVDIMAGE pBase;
235
236 /** Last opened image in the chain.
237 * The same as pBase if only one image is used. */
238 PVDIMAGE pLast;
239
240 /** If a merge to one of the parents is running this may be non-NULL
241 * to indicate to what image the writes should be additionally relayed. */
242 PVDIMAGE pImageRelay;
243
244 /** Flags representing the modification state. */
245 unsigned uModified;
246
247 /** Cached size of this disk. */
248 uint64_t cbSize;
249 /** Cached PCHS geometry for this disk. */
250 VDGEOMETRY PCHSGeometry;
251 /** Cached LCHS geometry for this disk. */
252 VDGEOMETRY LCHSGeometry;
253
254 /** Pointer to list of VD interfaces, per-disk. */
255 PVDINTERFACE pVDIfsDisk;
256 /** Pointer to the common interface structure for error reporting. */
257 PVDINTERFACEERROR pInterfaceError;
258 /** Pointer to the optional thread synchronization callbacks. */
259 PVDINTERFACETHREADSYNC pInterfaceThreadSync;
260
261 /** Memory cache for I/O contexts */
262 RTMEMCACHE hMemCacheIoCtx;
263 /** Memory cache for I/O tasks. */
264 RTMEMCACHE hMemCacheIoTask;
265 /** An I/O context is currently using the disk structures
266 * Every I/O context must be placed on one of the lists below. */
267 volatile bool fLocked;
268 /** Head of pending I/O tasks waiting for completion - LIFO order. */
269 volatile PVDIOTASK pIoTasksPendingHead;
270 /** Head of newly queued I/O contexts - LIFO order. */
271 volatile PVDIOCTX pIoCtxHead;
272 /** Head of halted I/O contexts which are given back to generic
273 * disk framework by the backend. - LIFO order. */
274 volatile PVDIOCTX pIoCtxHaltedHead;
275
276 /** Head of blocked I/O contexts, processed only
277 * after pIoCtxLockOwner was freed - LIFO order. */
278 volatile PVDIOCTX pIoCtxBlockedHead;
279 /** I/O context which locked the disk for a growing write or flush request.
280 * Other flush or growing write requests need to wait until
281 * the current one completes. - NIL_VDIOCTX if unlocked. */
282 volatile PVDIOCTX pIoCtxLockOwner;
283 /** If the disk was locked by a growing write, flush or discard request this
284 * contains the start offset to check for interfering I/O while it is in progress. */
285 uint64_t uOffsetStartLocked;
286 /** If the disk was locked by a growing write, flush or discard request this contains
287 * the first non affected offset to check for interfering I/O while it is in progress. */
288 uint64_t uOffsetEndLocked;
289
290 /** Pointer to the L2 disk cache if any. */
291 PVDCACHE pCache;
292 /** Pointer to the discard state if any. */
293 PVDDISCARDSTATE pDiscard;
294
295 /** Read filter chain - PVDFILTER. */
296 RTLISTANCHOR ListFilterChainRead;
297 /** Write filter chain - PVDFILTER. */
298 RTLISTANCHOR ListFilterChainWrite;
299};
300
301# define VD_IS_LOCKED(a_pDisk) \
302 do \
303 { \
304 NOREF(a_pDisk); \
305 AssertMsg((a_pDisk)->fLocked, \
306 ("Lock not held\n"));\
307 } while(0)
308
309/**
310 * VBox parent read descriptor, used internally for compaction.
311 */
312typedef struct VDPARENTSTATEDESC
313{
314 /** Pointer to disk descriptor. */
315 PVBOXHDD pDisk;
316 /** Pointer to image descriptor. */
317 PVDIMAGE pImage;
318} VDPARENTSTATEDESC, *PVDPARENTSTATEDESC;
319
320/**
321 * Transfer direction.
322 */
323typedef enum VDIOCTXTXDIR
324{
325 /** Read */
326 VDIOCTXTXDIR_READ = 0,
327 /** Write */
328 VDIOCTXTXDIR_WRITE,
329 /** Flush */
330 VDIOCTXTXDIR_FLUSH,
331 /** Discard */
332 VDIOCTXTXDIR_DISCARD,
333 /** 32bit hack */
334 VDIOCTXTXDIR_32BIT_HACK = 0x7fffffff
335} VDIOCTXTXDIR, *PVDIOCTXTXDIR;
336
337/** Transfer function */
338typedef DECLCALLBACK(int) FNVDIOCTXTRANSFER (PVDIOCTX pIoCtx);
339/** Pointer to a transfer function. */
340typedef FNVDIOCTXTRANSFER *PFNVDIOCTXTRANSFER;
341
342/**
343 * I/O context
344 */
345typedef struct VDIOCTX
346{
347 /** Pointer to the next I/O context. */
348 struct VDIOCTX * volatile pIoCtxNext;
349 /** Disk this is request is for. */
350 PVBOXHDD pDisk;
351 /** Return code. */
352 int rcReq;
353 /** Various flags for the I/O context. */
354 uint32_t fFlags;
355 /** Number of data transfers currently pending. */
356 volatile uint32_t cDataTransfersPending;
357 /** How many meta data transfers are pending. */
358 volatile uint32_t cMetaTransfersPending;
359 /** Flag whether the request finished */
360 volatile bool fComplete;
361 /** Temporary allocated memory which is freed
362 * when the context completes. */
363 void *pvAllocation;
364 /** Transfer function. */
365 PFNVDIOCTXTRANSFER pfnIoCtxTransfer;
366 /** Next transfer part after the current one completed. */
367 PFNVDIOCTXTRANSFER pfnIoCtxTransferNext;
368 /** Transfer direction */
369 VDIOCTXTXDIR enmTxDir;
370 /** Request type dependent data. */
371 union
372 {
373 /** I/O request (read/write). */
374 struct
375 {
376 /** Number of bytes left until this context completes. */
377 volatile uint32_t cbTransferLeft;
378 /** Current offset */
379 volatile uint64_t uOffset;
380 /** Number of bytes to transfer */
381 volatile size_t cbTransfer;
382 /** Current image in the chain. */
383 PVDIMAGE pImageCur;
384 /** Start image to read from. pImageCur is reset to this
385 * value after it reached the first image in the chain. */
386 PVDIMAGE pImageStart;
387 /** S/G buffer */
388 RTSGBUF SgBuf;
389 /** Number of bytes to clear in the buffer before the current read. */
390 size_t cbBufClear;
391 /** Number of images to read. */
392 unsigned cImagesRead;
393 /** Override for the parent image to start reading from. */
394 PVDIMAGE pImageParentOverride;
395 /** Original offset of the transfer - required for filtering read requests. */
396 uint64_t uOffsetXferOrig;
397 /** Original size of the transfer - required for fitlering read requests. */
398 size_t cbXferOrig;
399 } Io;
400 /** Discard requests. */
401 struct
402 {
403 /** Pointer to the range descriptor array. */
404 PCRTRANGE paRanges;
405 /** Number of ranges in the array. */
406 unsigned cRanges;
407 /** Range descriptor index which is processed. */
408 unsigned idxRange;
409 /** Start offset to discard currently. */
410 uint64_t offCur;
411 /** How many bytes left to discard in the current range. */
412 size_t cbDiscardLeft;
413 /** How many bytes to discard in the current block (<= cbDiscardLeft). */
414 size_t cbThisDiscard;
415 /** Discard block handled currently. */
416 PVDDISCARDBLOCK pBlock;
417 } Discard;
418 } Req;
419 /** Parent I/O context if any. Sets the type of the context (root/child) */
420 PVDIOCTX pIoCtxParent;
421 /** Type dependent data (root/child) */
422 union
423 {
424 /** Root data */
425 struct
426 {
427 /** Completion callback */
428 PFNVDASYNCTRANSFERCOMPLETE pfnComplete;
429 /** User argument 1 passed on completion. */
430 void *pvUser1;
431 /** User argument 2 passed on completion. */
432 void *pvUser2;
433 } Root;
434 /** Child data */
435 struct
436 {
437 /** Saved start offset */
438 uint64_t uOffsetSaved;
439 /** Saved transfer size */
440 size_t cbTransferLeftSaved;
441 /** Number of bytes transferred from the parent if this context completes. */
442 size_t cbTransferParent;
443 /** Number of bytes to pre read */
444 size_t cbPreRead;
445 /** Number of bytes to post read. */
446 size_t cbPostRead;
447 /** Number of bytes to write left in the parent. */
448 size_t cbWriteParent;
449 /** Write type dependent data. */
450 union
451 {
452 /** Optimized */
453 struct
454 {
455 /** Bytes to fill to satisfy the block size. Not part of the virtual disk. */
456 size_t cbFill;
457 /** Bytes to copy instead of reading from the parent */
458 size_t cbWriteCopy;
459 /** Bytes to read from the image. */
460 size_t cbReadImage;
461 } Optimized;
462 } Write;
463 } Child;
464 } Type;
465} VDIOCTX;
466
467/** Default flags for an I/O context, i.e. unblocked and async. */
468#define VDIOCTX_FLAGS_DEFAULT (0)
469/** Flag whether the context is blocked. */
470#define VDIOCTX_FLAGS_BLOCKED RT_BIT_32(0)
471/** Flag whether the I/O context is using synchronous I/O. */
472#define VDIOCTX_FLAGS_SYNC RT_BIT_32(1)
473/** Flag whether the read should update the cache. */
474#define VDIOCTX_FLAGS_READ_UPDATE_CACHE RT_BIT_32(2)
475/** Flag whether free blocks should be zeroed.
476 * If false and no image has data for sepcified
477 * range VERR_VD_BLOCK_FREE is returned for the I/O context.
478 * Note that unallocated blocks are still zeroed
479 * if at least one image has valid data for a part
480 * of the range.
481 */
482#define VDIOCTX_FLAGS_ZERO_FREE_BLOCKS RT_BIT_32(3)
483/** Don't free the I/O context when complete because
484 * it was alloacted elsewhere (stack, ...). */
485#define VDIOCTX_FLAGS_DONT_FREE RT_BIT_32(4)
486/** Don't set the modified flag for this I/O context when writing. */
487#define VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG RT_BIT_32(5)
488/** The write filter was applied already and shouldn't be applied a second time.
489 * Used at the beginning of vdWriteHelperAsync() because it might be called
490 * multiple times.
491 */
492#define VDIOCTX_FLAGS_WRITE_FILTER_APPLIED RT_BIT_32(6)
493
494/** NIL I/O context pointer value. */
495#define NIL_VDIOCTX ((PVDIOCTX)0)
496
497/**
498 * List node for deferred I/O contexts.
499 */
500typedef struct VDIOCTXDEFERRED
501{
502 /** Node in the list of deferred requests.
503 * A request can be deferred if the image is growing
504 * and the request accesses the same range or if
505 * the backend needs to read or write metadata from the disk
506 * before it can continue. */
507 RTLISTNODE NodeDeferred;
508 /** I/O context this entry points to. */
509 PVDIOCTX pIoCtx;
510} VDIOCTXDEFERRED, *PVDIOCTXDEFERRED;
511
512/**
513 * I/O task.
514 */
515typedef struct VDIOTASK
516{
517 /** Next I/O task waiting in the list. */
518 struct VDIOTASK * volatile pNext;
519 /** Storage this task belongs to. */
520 PVDIOSTORAGE pIoStorage;
521 /** Optional completion callback. */
522 PFNVDXFERCOMPLETED pfnComplete;
523 /** Opaque user data. */
524 void *pvUser;
525 /** Completion status code for the task. */
526 int rcReq;
527 /** Flag whether this is a meta data transfer. */
528 bool fMeta;
529 /** Type dependent data. */
530 union
531 {
532 /** User data transfer. */
533 struct
534 {
535 /** Number of bytes this task transferred. */
536 uint32_t cbTransfer;
537 /** Pointer to the I/O context the task belongs. */
538 PVDIOCTX pIoCtx;
539 } User;
540 /** Meta data transfer. */
541 struct
542 {
543 /** Meta transfer this task is for. */
544 PVDMETAXFER pMetaXfer;
545 } Meta;
546 } Type;
547} VDIOTASK;
548
549/**
550 * Storage handle.
551 */
552typedef struct VDIOSTORAGE
553{
554 /** Image I/O state this storage handle belongs to. */
555 PVDIO pVDIo;
556 /** AVL tree for pending async metadata transfers. */
557 PAVLRFOFFTREE pTreeMetaXfers;
558 /** Storage handle */
559 void *pStorage;
560} VDIOSTORAGE;
561
562/**
563 * Metadata transfer.
564 *
565 * @note This entry can't be freed if either the list is not empty or
566 * the reference counter is not 0.
567 * The assumption is that the backends don't need to read huge amounts of
568 * metadata to complete a transfer so the additional memory overhead should
569 * be relatively small.
570 */
571typedef struct VDMETAXFER
572{
573 /** AVL core for fast search (the file offset is the key) */
574 AVLRFOFFNODECORE Core;
575 /** I/O storage for this transfer. */
576 PVDIOSTORAGE pIoStorage;
577 /** Flags. */
578 uint32_t fFlags;
579 /** List of I/O contexts waiting for this metadata transfer to complete. */
580 RTLISTNODE ListIoCtxWaiting;
581 /** Number of references to this entry. */
582 unsigned cRefs;
583 /** Size of the data stored with this entry. */
584 size_t cbMeta;
585 /** Shadow buffer which is used in case a write is still active and other
586 * writes update the shadow buffer. */
587 uint8_t *pbDataShw;
588 /** List of I/O contexts updating the shadow buffer while there is a write
589 * in progress. */
590 RTLISTNODE ListIoCtxShwWrites;
591 /** Data stored - variable size. */
592 uint8_t abData[1];
593} VDMETAXFER;
594
595/**
596 * The transfer direction for the metadata.
597 */
598#define VDMETAXFER_TXDIR_MASK 0x3
599#define VDMETAXFER_TXDIR_NONE 0x0
600#define VDMETAXFER_TXDIR_WRITE 0x1
601#define VDMETAXFER_TXDIR_READ 0x2
602#define VDMETAXFER_TXDIR_FLUSH 0x3
603#define VDMETAXFER_TXDIR_GET(flags) ((flags) & VDMETAXFER_TXDIR_MASK)
604#define VDMETAXFER_TXDIR_SET(flags, dir) ((flags) = (flags & ~VDMETAXFER_TXDIR_MASK) | (dir))
605
606/**
607 * Plugin structure.
608 */
609typedef struct VDPLUGIN
610{
611 /** Pointer to the next plugin structure. */
612 RTLISTNODE NodePlugin;
613 /** Handle of loaded plugin library. */
614 RTLDRMOD hPlugin;
615 /** Filename of the loaded plugin. */
616 char *pszFilename;
617} VDPLUGIN;
618/** Pointer to a plugin structure. */
619typedef VDPLUGIN *PVDPLUGIN;
620
621/** Head of loaded plugin list. */
622static RTLISTANCHOR g_ListPluginsLoaded;
623
624/** Number of image backends supported. */
625static unsigned g_cBackends = 0;
626/** Array of pointers to the image backends. */
627static PCVDIMAGEBACKEND *g_apBackends = NULL;
628/** Array of handles to the corresponding plugin. */
629static RTLDRMOD *g_ahBackendPlugins = NULL;
630/** Builtin image backends. */
631static PCVDIMAGEBACKEND aStaticBackends[] =
632{
633 &g_VmdkBackend,
634 &g_VDIBackend,
635 &g_VhdBackend,
636 &g_ParallelsBackend,
637 &g_DmgBackend,
638 &g_QedBackend,
639 &g_QCowBackend,
640 &g_VhdxBackend,
641 &g_RawBackend,
642 &g_ISCSIBackend
643};
644
645/** Number of supported cache backends. */
646static unsigned g_cCacheBackends = 0;
647/** Array of pointers to the cache backends. */
648static PCVDCACHEBACKEND *g_apCacheBackends = NULL;
649/** Array of handles to the corresponding plugin. */
650static RTLDRMOD *g_ahCacheBackendPlugins = NULL;
651/** Builtin cache backends. */
652static PCVDCACHEBACKEND aStaticCacheBackends[] =
653{
654 &g_VciCacheBackend
655};
656
657/** Number of supported filter backends. */
658static unsigned g_cFilterBackends = 0;
659/** Array of pointers to the filters backends. */
660static PCVDFILTERBACKEND *g_apFilterBackends = NULL;
661#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
662/** Array of handles to the corresponding plugin. */
663static PRTLDRMOD g_pahFilterBackendPlugins = NULL;
664#endif
665
666/** Forward declaration of the async discard helper. */
667static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx);
668static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx);
669static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk);
670static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc);
671static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq);
672
673/**
674 * internal: add several backends.
675 */
676static int vdAddBackends(RTLDRMOD hPlugin, PCVDIMAGEBACKEND *ppBackends, unsigned cBackends)
677{
678 PCVDIMAGEBACKEND *pTmp = (PCVDIMAGEBACKEND *)RTMemRealloc(g_apBackends,
679 (g_cBackends + cBackends) * sizeof(PCVDIMAGEBACKEND));
680 if (RT_UNLIKELY(!pTmp))
681 return VERR_NO_MEMORY;
682 g_apBackends = pTmp;
683
684 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahBackendPlugins,
685 (g_cBackends + cBackends) * sizeof(RTLDRMOD));
686 if (RT_UNLIKELY(!pTmpPlugins))
687 return VERR_NO_MEMORY;
688 g_ahBackendPlugins = pTmpPlugins;
689 memcpy(&g_apBackends[g_cBackends], ppBackends, cBackends * sizeof(PCVDIMAGEBACKEND));
690 for (unsigned i = g_cBackends; i < g_cBackends + cBackends; i++)
691 g_ahBackendPlugins[i] = hPlugin;
692 g_cBackends += cBackends;
693 return VINF_SUCCESS;
694}
695
696#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
697/**
698 * internal: add single backend.
699 */
700DECLINLINE(int) vdAddBackend(RTLDRMOD hPlugin, PCVDIMAGEBACKEND pBackend)
701{
702 return vdAddBackends(hPlugin, &pBackend, 1);
703}
704#endif
705
706/**
707 * internal: add several cache backends.
708 */
709static int vdAddCacheBackends(RTLDRMOD hPlugin, PCVDCACHEBACKEND *ppBackends, unsigned cBackends)
710{
711 PCVDCACHEBACKEND *pTmp = (PCVDCACHEBACKEND*)RTMemRealloc(g_apCacheBackends,
712 (g_cCacheBackends + cBackends) * sizeof(PCVDCACHEBACKEND));
713 if (RT_UNLIKELY(!pTmp))
714 return VERR_NO_MEMORY;
715 g_apCacheBackends = pTmp;
716
717 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahCacheBackendPlugins,
718 (g_cCacheBackends + cBackends) * sizeof(RTLDRMOD));
719 if (RT_UNLIKELY(!pTmpPlugins))
720 return VERR_NO_MEMORY;
721 g_ahCacheBackendPlugins = pTmpPlugins;
722 memcpy(&g_apCacheBackends[g_cCacheBackends], ppBackends, cBackends * sizeof(PCVDCACHEBACKEND));
723 for (unsigned i = g_cCacheBackends; i < g_cCacheBackends + cBackends; i++)
724 g_ahCacheBackendPlugins[i] = hPlugin;
725 g_cCacheBackends += cBackends;
726 return VINF_SUCCESS;
727}
728
729#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
730
731/**
732 * internal: add single cache backend.
733 */
734DECLINLINE(int) vdAddCacheBackend(RTLDRMOD hPlugin, PCVDCACHEBACKEND pBackend)
735{
736 return vdAddCacheBackends(hPlugin, &pBackend, 1);
737}
738
739
740/**
741 * Add several filter backends.
742 *
743 * @returns VBox status code.
744 * @param hPlugin Plugin handle to add.
745 * @param ppBackends Array of filter backends to add.
746 * @param cBackends Number of backends to add.
747 */
748static int vdAddFilterBackends(RTLDRMOD hPlugin, PCVDFILTERBACKEND *ppBackends, unsigned cBackends)
749{
750 PCVDFILTERBACKEND *pTmp = (PCVDFILTERBACKEND *)RTMemRealloc(g_apFilterBackends,
751 (g_cFilterBackends + cBackends) * sizeof(PCVDFILTERBACKEND));
752 if (RT_UNLIKELY(!pTmp))
753 return VERR_NO_MEMORY;
754 g_apFilterBackends = pTmp;
755
756 PRTLDRMOD pTmpPlugins = (PRTLDRMOD)RTMemRealloc(g_pahFilterBackendPlugins,
757 (g_cFilterBackends + cBackends) * sizeof(RTLDRMOD));
758 if (RT_UNLIKELY(!pTmpPlugins))
759 return VERR_NO_MEMORY;
760
761 g_pahFilterBackendPlugins = pTmpPlugins;
762 memcpy(&g_apFilterBackends[g_cFilterBackends], ppBackends, cBackends * sizeof(PCVDFILTERBACKEND));
763 for (unsigned i = g_cFilterBackends; i < g_cFilterBackends + cBackends; i++)
764 g_pahFilterBackendPlugins[i] = hPlugin;
765 g_cFilterBackends += cBackends;
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Add a single filter backend to the list of supported filters.
772 *
773 * @returns VBox status code.
774 * @param hPlugin Plugin handle to add.
775 * @param pBackend The backend to add.
776 */
777DECLINLINE(int) vdAddFilterBackend(RTLDRMOD hPlugin, PCVDFILTERBACKEND pBackend)
778{
779 return vdAddFilterBackends(hPlugin, &pBackend, 1);
780}
781
782#endif /* VBOX_HDD_NO_DYNAMIC_BACKENDS*/
783
784/**
785 * internal: issue error message.
786 */
787static int vdError(PVBOXHDD pDisk, int rc, RT_SRC_POS_DECL,
788 const char *pszFormat, ...)
789{
790 va_list va;
791 va_start(va, pszFormat);
792 if (pDisk->pInterfaceError)
793 pDisk->pInterfaceError->pfnError(pDisk->pInterfaceError->Core.pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
794 va_end(va);
795 return rc;
796}
797
798/**
799 * internal: thread synchronization, start read.
800 */
801DECLINLINE(int) vdThreadStartRead(PVBOXHDD pDisk)
802{
803 int rc = VINF_SUCCESS;
804 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
805 rc = pDisk->pInterfaceThreadSync->pfnStartRead(pDisk->pInterfaceThreadSync->Core.pvUser);
806 return rc;
807}
808
809/**
810 * internal: thread synchronization, finish read.
811 */
812DECLINLINE(int) vdThreadFinishRead(PVBOXHDD pDisk)
813{
814 int rc = VINF_SUCCESS;
815 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
816 rc = pDisk->pInterfaceThreadSync->pfnFinishRead(pDisk->pInterfaceThreadSync->Core.pvUser);
817 return rc;
818}
819
820/**
821 * internal: thread synchronization, start write.
822 */
823DECLINLINE(int) vdThreadStartWrite(PVBOXHDD pDisk)
824{
825 int rc = VINF_SUCCESS;
826 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
827 rc = pDisk->pInterfaceThreadSync->pfnStartWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
828 return rc;
829}
830
831/**
832 * internal: thread synchronization, finish write.
833 */
834DECLINLINE(int) vdThreadFinishWrite(PVBOXHDD pDisk)
835{
836 int rc = VINF_SUCCESS;
837 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
838 rc = pDisk->pInterfaceThreadSync->pfnFinishWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
839 return rc;
840}
841
842/**
843 * internal: find image format backend.
844 */
845static int vdFindBackend(const char *pszBackend, PCVDIMAGEBACKEND *ppBackend)
846{
847 int rc = VINF_SUCCESS;
848 PCVDIMAGEBACKEND pBackend = NULL;
849
850 if (!g_apBackends)
851 VDInit();
852
853 for (unsigned i = 0; i < g_cBackends; i++)
854 {
855 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
856 {
857 pBackend = g_apBackends[i];
858 break;
859 }
860 }
861 *ppBackend = pBackend;
862 return rc;
863}
864
865/**
866 * internal: find cache format backend.
867 */
868static int vdFindCacheBackend(const char *pszBackend, PCVDCACHEBACKEND *ppBackend)
869{
870 int rc = VINF_SUCCESS;
871 PCVDCACHEBACKEND pBackend = NULL;
872
873 if (!g_apCacheBackends)
874 VDInit();
875
876 for (unsigned i = 0; i < g_cCacheBackends; i++)
877 {
878 if (!RTStrICmp(pszBackend, g_apCacheBackends[i]->pszBackendName))
879 {
880 pBackend = g_apCacheBackends[i];
881 break;
882 }
883 }
884 *ppBackend = pBackend;
885 return rc;
886}
887
888/**
889 * internal: find filter backend.
890 */
891static int vdFindFilterBackend(const char *pszFilter, PCVDFILTERBACKEND *ppBackend)
892{
893 int rc = VINF_SUCCESS;
894 PCVDFILTERBACKEND pBackend = NULL;
895
896 for (unsigned i = 0; i < g_cFilterBackends; i++)
897 {
898 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
899 {
900 pBackend = g_apFilterBackends[i];
901 break;
902 }
903 }
904 *ppBackend = pBackend;
905 return rc;
906}
907
908
909/**
910 * internal: add image structure to the end of images list.
911 */
912static void vdAddImageToList(PVBOXHDD pDisk, PVDIMAGE pImage)
913{
914 pImage->pPrev = NULL;
915 pImage->pNext = NULL;
916
917 if (pDisk->pBase)
918 {
919 Assert(pDisk->cImages > 0);
920 pImage->pPrev = pDisk->pLast;
921 pDisk->pLast->pNext = pImage;
922 pDisk->pLast = pImage;
923 }
924 else
925 {
926 Assert(pDisk->cImages == 0);
927 pDisk->pBase = pImage;
928 pDisk->pLast = pImage;
929 }
930
931 pDisk->cImages++;
932}
933
934/**
935 * internal: remove image structure from the images list.
936 */
937static void vdRemoveImageFromList(PVBOXHDD pDisk, PVDIMAGE pImage)
938{
939 Assert(pDisk->cImages > 0);
940
941 if (pImage->pPrev)
942 pImage->pPrev->pNext = pImage->pNext;
943 else
944 pDisk->pBase = pImage->pNext;
945
946 if (pImage->pNext)
947 pImage->pNext->pPrev = pImage->pPrev;
948 else
949 pDisk->pLast = pImage->pPrev;
950
951 pImage->pPrev = NULL;
952 pImage->pNext = NULL;
953
954 pDisk->cImages--;
955}
956
957/**
958 * Release a referene to the filter decrementing the counter and destroying the filter
959 * when the counter reaches zero.
960 *
961 * @returns The new reference count.
962 * @param pFilter The filter to release.
963 */
964static uint32_t vdFilterRelease(PVDFILTER pFilter)
965{
966 uint32_t cRefs = ASMAtomicDecU32(&pFilter->cRefs);
967 if (!cRefs)
968 {
969 pFilter->pBackend->pfnDestroy(pFilter->pvBackendData);
970 RTMemFree(pFilter);
971 }
972
973 return cRefs;
974}
975
976/**
977 * Increments the reference counter of the given filter.
978 *
979 * @return The new reference count.
980 * @param pFilter The filter.
981 */
982static uint32_t vdFilterRetain(PVDFILTER pFilter)
983{
984 return ASMAtomicIncU32(&pFilter->cRefs);
985}
986
987/**
988 * internal: find image by index into the images list.
989 */
990static PVDIMAGE vdGetImageByNumber(PVBOXHDD pDisk, unsigned nImage)
991{
992 PVDIMAGE pImage = pDisk->pBase;
993 if (nImage == VD_LAST_IMAGE)
994 return pDisk->pLast;
995 while (pImage && nImage)
996 {
997 pImage = pImage->pNext;
998 nImage--;
999 }
1000 return pImage;
1001}
1002
1003/**
1004 * Applies the filter chain to the given write request.
1005 *
1006 * @returns VBox status code.
1007 * @param pDisk The HDD container.
1008 * @param uOffset The start offset of the write.
1009 * @param cbWrite Number of bytes to write.
1010 * @param pIoCtx The I/O context associated with the request.
1011 */
1012static int vdFilterChainApplyWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
1013 PVDIOCTX pIoCtx)
1014{
1015 int rc = VINF_SUCCESS;
1016
1017 VD_IS_LOCKED(pDisk);
1018
1019 PVDFILTER pFilter;
1020 RTListForEach(&pDisk->ListFilterChainWrite, pFilter, VDFILTER, ListNodeChainWrite)
1021 {
1022 rc = pFilter->pBackend->pfnFilterWrite(pFilter->pvBackendData, uOffset, cbWrite, pIoCtx);
1023 if (RT_FAILURE(rc))
1024 break;
1025 /* Reset S/G buffer for the next filter. */
1026 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1027 }
1028
1029 return rc;
1030}
1031
1032/**
1033 * Applies the filter chain to the given read request.
1034 *
1035 * @returns VBox status code.
1036 * @param pDisk The HDD container.
1037 * @param uOffset The start offset of the read.
1038 * @param cbRead Number of bytes read.
1039 * @param pIoCtx The I/O context associated with the request.
1040 */
1041static int vdFilterChainApplyRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
1042 PVDIOCTX pIoCtx)
1043{
1044 int rc = VINF_SUCCESS;
1045
1046 VD_IS_LOCKED(pDisk);
1047
1048 /* Reset buffer before starting. */
1049 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1050
1051 PVDFILTER pFilter;
1052 RTListForEach(&pDisk->ListFilterChainRead, pFilter, VDFILTER, ListNodeChainRead)
1053 {
1054 rc = pFilter->pBackend->pfnFilterRead(pFilter->pvBackendData, uOffset, cbRead, pIoCtx);
1055 if (RT_FAILURE(rc))
1056 break;
1057 /* Reset S/G buffer for the next filter. */
1058 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1059 }
1060
1061 return rc;
1062}
1063
1064DECLINLINE(void) vdIoCtxRootComplete(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1065{
1066 if ( RT_SUCCESS(pIoCtx->rcReq)
1067 && pIoCtx->enmTxDir == VDIOCTXTXDIR_READ)
1068 pIoCtx->rcReq = vdFilterChainApplyRead(pDisk, pIoCtx->Req.Io.uOffsetXferOrig,
1069 pIoCtx->Req.Io.cbXferOrig, pIoCtx);
1070
1071 pIoCtx->Type.Root.pfnComplete(pIoCtx->Type.Root.pvUser1,
1072 pIoCtx->Type.Root.pvUser2,
1073 pIoCtx->rcReq);
1074}
1075
1076/**
1077 * Initialize the structure members of a given I/O context.
1078 */
1079DECLINLINE(void) vdIoCtxInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1080 uint64_t uOffset, size_t cbTransfer, PVDIMAGE pImageStart,
1081 PCRTSGBUF pcSgBuf, void *pvAllocation,
1082 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1083{
1084 pIoCtx->pDisk = pDisk;
1085 pIoCtx->enmTxDir = enmTxDir;
1086 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTransfer; Assert((uint32_t)cbTransfer == cbTransfer);
1087 pIoCtx->Req.Io.uOffset = uOffset;
1088 pIoCtx->Req.Io.cbTransfer = cbTransfer;
1089 pIoCtx->Req.Io.pImageStart = pImageStart;
1090 pIoCtx->Req.Io.pImageCur = pImageStart;
1091 pIoCtx->Req.Io.cbBufClear = 0;
1092 pIoCtx->Req.Io.pImageParentOverride = NULL;
1093 pIoCtx->Req.Io.uOffsetXferOrig = uOffset;
1094 pIoCtx->Req.Io.cbXferOrig = cbTransfer;
1095 pIoCtx->cDataTransfersPending = 0;
1096 pIoCtx->cMetaTransfersPending = 0;
1097 pIoCtx->fComplete = false;
1098 pIoCtx->fFlags = fFlags;
1099 pIoCtx->pvAllocation = pvAllocation;
1100 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1101 pIoCtx->pfnIoCtxTransferNext = NULL;
1102 pIoCtx->rcReq = VINF_SUCCESS;
1103 pIoCtx->pIoCtxParent = NULL;
1104
1105 /* There is no S/G list for a flush request. */
1106 if ( enmTxDir != VDIOCTXTXDIR_FLUSH
1107 && enmTxDir != VDIOCTXTXDIR_DISCARD)
1108 RTSgBufClone(&pIoCtx->Req.Io.SgBuf, pcSgBuf);
1109 else
1110 memset(&pIoCtx->Req.Io.SgBuf, 0, sizeof(RTSGBUF));
1111}
1112
1113/**
1114 * Internal: Tries to read the desired range from the given cache.
1115 *
1116 * @returns VBox status code.
1117 * @retval VERR_VD_BLOCK_FREE if the block is not in the cache.
1118 * pcbRead will be set to the number of bytes not in the cache.
1119 * Everything thereafter might be in the cache.
1120 * @param pCache The cache to read from.
1121 * @param uOffset Offset of the virtual disk to read.
1122 * @param cbRead How much to read.
1123 * @param pIoCtx The I/O context to read into.
1124 * @param pcbRead Where to store the number of bytes actually read.
1125 * On success this indicates the number of bytes read from the cache.
1126 * If VERR_VD_BLOCK_FREE is returned this gives the number of bytes
1127 * which are not in the cache.
1128 * In both cases everything beyond this value
1129 * might or might not be in the cache.
1130 */
1131static int vdCacheReadHelper(PVDCACHE pCache, uint64_t uOffset,
1132 size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbRead)
1133{
1134 int rc = VINF_SUCCESS;
1135
1136 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbRead=%zu pcbRead=%#p\n",
1137 pCache, uOffset, pIoCtx, cbRead, pcbRead));
1138
1139 AssertPtr(pCache);
1140 AssertPtr(pcbRead);
1141
1142 rc = pCache->Backend->pfnRead(pCache->pBackendData, uOffset, cbRead,
1143 pIoCtx, pcbRead);
1144
1145 LogFlowFunc(("returns rc=%Rrc pcbRead=%zu\n", rc, *pcbRead));
1146 return rc;
1147}
1148
1149/**
1150 * Internal: Writes data for the given block into the cache.
1151 *
1152 * @returns VBox status code.
1153 * @param pCache The cache to write to.
1154 * @param uOffset Offset of the virtual disk to write to the cache.
1155 * @param cbWrite How much to write.
1156 * @param pIoCtx The I/O context to write from.
1157 * @param pcbWritten How much data could be written, optional.
1158 */
1159static int vdCacheWriteHelper(PVDCACHE pCache, uint64_t uOffset, size_t cbWrite,
1160 PVDIOCTX pIoCtx, size_t *pcbWritten)
1161{
1162 int rc = VINF_SUCCESS;
1163
1164 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbWrite=%zu pcbWritten=%#p\n",
1165 pCache, uOffset, pIoCtx, cbWrite, pcbWritten));
1166
1167 AssertPtr(pCache);
1168 AssertPtr(pIoCtx);
1169 Assert(cbWrite > 0);
1170
1171 if (pcbWritten)
1172 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1173 pIoCtx, pcbWritten);
1174 else
1175 {
1176 size_t cbWritten = 0;
1177
1178 do
1179 {
1180 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1181 pIoCtx, &cbWritten);
1182 uOffset += cbWritten;
1183 cbWrite -= cbWritten;
1184 } while ( cbWrite
1185 && ( RT_SUCCESS(rc)
1186 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
1187 }
1188
1189 LogFlowFunc(("returns rc=%Rrc pcbWritten=%zu\n",
1190 rc, pcbWritten ? *pcbWritten : cbWrite));
1191 return rc;
1192}
1193
1194/**
1195 * Creates a new empty discard state.
1196 *
1197 * @returns Pointer to the new discard state or NULL if out of memory.
1198 */
1199static PVDDISCARDSTATE vdDiscardStateCreate(void)
1200{
1201 PVDDISCARDSTATE pDiscard = (PVDDISCARDSTATE)RTMemAllocZ(sizeof(VDDISCARDSTATE));
1202
1203 if (pDiscard)
1204 {
1205 RTListInit(&pDiscard->ListLru);
1206 pDiscard->pTreeBlocks = (PAVLRU64TREE)RTMemAllocZ(sizeof(AVLRU64TREE));
1207 if (!pDiscard->pTreeBlocks)
1208 {
1209 RTMemFree(pDiscard);
1210 pDiscard = NULL;
1211 }
1212 }
1213
1214 return pDiscard;
1215}
1216
1217/**
1218 * Removes the least recently used blocks from the waiting list until
1219 * the new value is reached.
1220 *
1221 * @returns VBox status code.
1222 * @param pDisk VD disk container.
1223 * @param pDiscard The discard state.
1224 * @param cbDiscardingNew How many bytes should be waiting on success.
1225 * The number of bytes waiting can be less.
1226 */
1227static int vdDiscardRemoveBlocks(PVBOXHDD pDisk, PVDDISCARDSTATE pDiscard, size_t cbDiscardingNew)
1228{
1229 int rc = VINF_SUCCESS;
1230
1231 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
1232 pDisk, pDiscard, cbDiscardingNew));
1233
1234 while (pDiscard->cbDiscarding > cbDiscardingNew)
1235 {
1236 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
1237
1238 Assert(!RTListIsEmpty(&pDiscard->ListLru));
1239
1240 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
1241 uint64_t offStart = pBlock->Core.Key;
1242 uint32_t idxStart = 0;
1243 size_t cbLeft = pBlock->cbDiscard;
1244 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
1245 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
1246
1247 while (cbLeft > 0)
1248 {
1249 int32_t idxEnd;
1250 size_t cbThis = cbLeft;
1251
1252 if (fAllocated)
1253 {
1254 /* Check for the first unallocated bit. */
1255 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
1256 if (idxEnd != -1)
1257 {
1258 cbThis = (idxEnd - idxStart) * 512;
1259 fAllocated = false;
1260 }
1261 }
1262 else
1263 {
1264 /* Mark as unused and check for the first set bit. */
1265 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
1266 if (idxEnd != -1)
1267 cbThis = (idxEnd - idxStart) * 512;
1268
1269
1270 VDIOCTX IoCtx;
1271 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_DISCARD, 0, 0, NULL,
1272 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
1273 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData,
1274 &IoCtx, offStart, cbThis, NULL,
1275 NULL, &cbThis, NULL,
1276 VD_DISCARD_MARK_UNUSED);
1277 if (RT_FAILURE(rc))
1278 break;
1279
1280 fAllocated = true;
1281 }
1282
1283 idxStart = idxEnd;
1284 offStart += cbThis;
1285 cbLeft -= cbThis;
1286 }
1287
1288 if (RT_FAILURE(rc))
1289 break;
1290
1291 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
1292 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
1293 RTListNodeRemove(&pBlock->NodeLru);
1294
1295 pDiscard->cbDiscarding -= pBlock->cbDiscard;
1296 RTMemFree(pBlock->pbmAllocated);
1297 RTMemFree(pBlock);
1298 }
1299
1300 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
1301
1302 LogFlowFunc(("returns rc=%Rrc\n", rc));
1303 return rc;
1304}
1305
1306/**
1307 * Destroys the current discard state, writing any waiting blocks to the image.
1308 *
1309 * @returns VBox status code.
1310 * @param pDisk VD disk container.
1311 */
1312static int vdDiscardStateDestroy(PVBOXHDD pDisk)
1313{
1314 int rc = VINF_SUCCESS;
1315
1316 if (pDisk->pDiscard)
1317 {
1318 rc = vdDiscardRemoveBlocks(pDisk, pDisk->pDiscard, 0 /* Remove all blocks. */);
1319 AssertRC(rc);
1320 RTMemFree(pDisk->pDiscard->pTreeBlocks);
1321 RTMemFree(pDisk->pDiscard);
1322 pDisk->pDiscard = NULL;
1323 }
1324
1325 return rc;
1326}
1327
1328/**
1329 * Marks the given range as allocated in the image.
1330 * Required if there are discards in progress and a write to a block which can get discarded
1331 * is written to.
1332 *
1333 * @returns VBox status code.
1334 * @param pDisk VD container data.
1335 * @param uOffset First byte to mark as allocated.
1336 * @param cbRange Number of bytes to mark as allocated.
1337 */
1338static int vdDiscardSetRangeAllocated(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRange)
1339{
1340 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
1341 int rc = VINF_SUCCESS;
1342
1343 if (pDiscard)
1344 {
1345 do
1346 {
1347 size_t cbThisRange = cbRange;
1348 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64RangeGet(pDiscard->pTreeBlocks, uOffset);
1349
1350 if (pBlock)
1351 {
1352 int32_t idxStart, idxEnd;
1353
1354 Assert(!(cbThisRange % 512));
1355 Assert(!((uOffset - pBlock->Core.Key) % 512));
1356
1357 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.KeyLast - uOffset + 1);
1358
1359 idxStart = (uOffset - pBlock->Core.Key) / 512;
1360 idxEnd = idxStart + (int32_t)(cbThisRange / 512);
1361 ASMBitSetRange(pBlock->pbmAllocated, idxStart, idxEnd);
1362 }
1363 else
1364 {
1365 pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, uOffset, true);
1366 if (pBlock)
1367 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.Key - uOffset);
1368 }
1369
1370 Assert(cbRange >= cbThisRange);
1371
1372 uOffset += cbThisRange;
1373 cbRange -= cbThisRange;
1374 } while (cbRange != 0);
1375 }
1376
1377 return rc;
1378}
1379
1380DECLINLINE(PVDIOCTX) vdIoCtxAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1381 uint64_t uOffset, size_t cbTransfer,
1382 PVDIMAGE pImageStart,PCRTSGBUF pcSgBuf,
1383 void *pvAllocation, PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1384 uint32_t fFlags)
1385{
1386 PVDIOCTX pIoCtx = NULL;
1387
1388 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1389 if (RT_LIKELY(pIoCtx))
1390 {
1391 vdIoCtxInit(pIoCtx, pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1392 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1393 }
1394
1395 return pIoCtx;
1396}
1397
1398DECLINLINE(PVDIOCTX) vdIoCtxRootAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1399 uint64_t uOffset, size_t cbTransfer,
1400 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1401 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1402 void *pvUser1, void *pvUser2,
1403 void *pvAllocation,
1404 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1405 uint32_t fFlags)
1406{
1407 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1408 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1409
1410 if (RT_LIKELY(pIoCtx))
1411 {
1412 pIoCtx->pIoCtxParent = NULL;
1413 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1414 pIoCtx->Type.Root.pvUser1 = pvUser1;
1415 pIoCtx->Type.Root.pvUser2 = pvUser2;
1416 }
1417
1418 LogFlow(("Allocated root I/O context %#p\n", pIoCtx));
1419 return pIoCtx;
1420}
1421
1422DECLINLINE(void) vdIoCtxDiscardInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, PCRTRANGE paRanges,
1423 unsigned cRanges, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1424 void *pvUser1, void *pvUser2, void *pvAllocation,
1425 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1426{
1427 pIoCtx->pIoCtxNext = NULL;
1428 pIoCtx->pDisk = pDisk;
1429 pIoCtx->enmTxDir = VDIOCTXTXDIR_DISCARD;
1430 pIoCtx->cDataTransfersPending = 0;
1431 pIoCtx->cMetaTransfersPending = 0;
1432 pIoCtx->fComplete = false;
1433 pIoCtx->fFlags = fFlags;
1434 pIoCtx->pvAllocation = pvAllocation;
1435 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1436 pIoCtx->pfnIoCtxTransferNext = NULL;
1437 pIoCtx->rcReq = VINF_SUCCESS;
1438 pIoCtx->Req.Discard.paRanges = paRanges;
1439 pIoCtx->Req.Discard.cRanges = cRanges;
1440 pIoCtx->Req.Discard.idxRange = 0;
1441 pIoCtx->Req.Discard.cbDiscardLeft = 0;
1442 pIoCtx->Req.Discard.offCur = 0;
1443 pIoCtx->Req.Discard.cbThisDiscard = 0;
1444
1445 pIoCtx->pIoCtxParent = NULL;
1446 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1447 pIoCtx->Type.Root.pvUser1 = pvUser1;
1448 pIoCtx->Type.Root.pvUser2 = pvUser2;
1449}
1450
1451DECLINLINE(PVDIOCTX) vdIoCtxDiscardAlloc(PVBOXHDD pDisk, PCRTRANGE paRanges,
1452 unsigned cRanges,
1453 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1454 void *pvUser1, void *pvUser2,
1455 void *pvAllocation,
1456 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1457 uint32_t fFlags)
1458{
1459 PVDIOCTX pIoCtx = NULL;
1460
1461 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1462 if (RT_LIKELY(pIoCtx))
1463 {
1464 vdIoCtxDiscardInit(pIoCtx, pDisk, paRanges, cRanges, pfnComplete, pvUser1,
1465 pvUser2, pvAllocation, pfnIoCtxTransfer, fFlags);
1466 }
1467
1468 LogFlow(("Allocated discard I/O context %#p\n", pIoCtx));
1469 return pIoCtx;
1470}
1471
1472DECLINLINE(PVDIOCTX) vdIoCtxChildAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1473 uint64_t uOffset, size_t cbTransfer,
1474 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1475 PVDIOCTX pIoCtxParent, size_t cbTransferParent,
1476 size_t cbWriteParent, void *pvAllocation,
1477 PFNVDIOCTXTRANSFER pfnIoCtxTransfer)
1478{
1479 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1480 pcSgBuf, pvAllocation, pfnIoCtxTransfer, pIoCtxParent->fFlags & ~VDIOCTX_FLAGS_DONT_FREE);
1481
1482 AssertPtr(pIoCtxParent);
1483 Assert(!pIoCtxParent->pIoCtxParent);
1484
1485 if (RT_LIKELY(pIoCtx))
1486 {
1487 pIoCtx->pIoCtxParent = pIoCtxParent;
1488 pIoCtx->Type.Child.uOffsetSaved = uOffset;
1489 pIoCtx->Type.Child.cbTransferLeftSaved = cbTransfer;
1490 pIoCtx->Type.Child.cbTransferParent = cbTransferParent;
1491 pIoCtx->Type.Child.cbWriteParent = cbWriteParent;
1492 }
1493
1494 LogFlow(("Allocated child I/O context %#p\n", pIoCtx));
1495 return pIoCtx;
1496}
1497
1498DECLINLINE(PVDIOTASK) vdIoTaskUserAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDIOCTX pIoCtx, uint32_t cbTransfer)
1499{
1500 PVDIOTASK pIoTask = NULL;
1501
1502 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1503 if (pIoTask)
1504 {
1505 pIoTask->pIoStorage = pIoStorage;
1506 pIoTask->pfnComplete = pfnComplete;
1507 pIoTask->pvUser = pvUser;
1508 pIoTask->fMeta = false;
1509 pIoTask->Type.User.cbTransfer = cbTransfer;
1510 pIoTask->Type.User.pIoCtx = pIoCtx;
1511 }
1512
1513 return pIoTask;
1514}
1515
1516DECLINLINE(PVDIOTASK) vdIoTaskMetaAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDMETAXFER pMetaXfer)
1517{
1518 PVDIOTASK pIoTask = NULL;
1519
1520 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1521 if (pIoTask)
1522 {
1523 pIoTask->pIoStorage = pIoStorage;
1524 pIoTask->pfnComplete = pfnComplete;
1525 pIoTask->pvUser = pvUser;
1526 pIoTask->fMeta = true;
1527 pIoTask->Type.Meta.pMetaXfer = pMetaXfer;
1528 }
1529
1530 return pIoTask;
1531}
1532
1533DECLINLINE(void) vdIoCtxFree(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1534{
1535 Log(("Freeing I/O context %#p\n", pIoCtx));
1536
1537 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_FREE))
1538 {
1539 if (pIoCtx->pvAllocation)
1540 RTMemFree(pIoCtx->pvAllocation);
1541#ifdef DEBUG
1542 memset(&pIoCtx->pDisk, 0xff, sizeof(void *));
1543#endif
1544 RTMemCacheFree(pDisk->hMemCacheIoCtx, pIoCtx);
1545 }
1546}
1547
1548DECLINLINE(void) vdIoTaskFree(PVBOXHDD pDisk, PVDIOTASK pIoTask)
1549{
1550#ifdef DEBUG
1551 memset(pIoTask, 0xff, sizeof(VDIOTASK));
1552#endif
1553 RTMemCacheFree(pDisk->hMemCacheIoTask, pIoTask);
1554}
1555
1556DECLINLINE(void) vdIoCtxChildReset(PVDIOCTX pIoCtx)
1557{
1558 AssertPtr(pIoCtx->pIoCtxParent);
1559
1560 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1561 pIoCtx->Req.Io.uOffset = pIoCtx->Type.Child.uOffsetSaved;
1562 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved;
1563 Assert((uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved == pIoCtx->Type.Child.cbTransferLeftSaved);
1564}
1565
1566DECLINLINE(PVDMETAXFER) vdMetaXferAlloc(PVDIOSTORAGE pIoStorage, uint64_t uOffset, size_t cb)
1567{
1568 PVDMETAXFER pMetaXfer = (PVDMETAXFER)RTMemAlloc(RT_OFFSETOF(VDMETAXFER, abData[cb]));
1569
1570 if (RT_LIKELY(pMetaXfer))
1571 {
1572 pMetaXfer->Core.Key = uOffset;
1573 pMetaXfer->Core.KeyLast = uOffset + cb - 1;
1574 pMetaXfer->fFlags = VDMETAXFER_TXDIR_NONE;
1575 pMetaXfer->cbMeta = cb;
1576 pMetaXfer->pIoStorage = pIoStorage;
1577 pMetaXfer->cRefs = 0;
1578 pMetaXfer->pbDataShw = NULL;
1579 RTListInit(&pMetaXfer->ListIoCtxWaiting);
1580 RTListInit(&pMetaXfer->ListIoCtxShwWrites);
1581 }
1582 return pMetaXfer;
1583}
1584
1585DECLINLINE(void) vdIoCtxAddToWaitingList(volatile PVDIOCTX *ppList, PVDIOCTX pIoCtx)
1586{
1587 /* Put it on the waiting list. */
1588 PVDIOCTX pNext = ASMAtomicUoReadPtrT(ppList, PVDIOCTX);
1589 PVDIOCTX pHeadOld;
1590 pIoCtx->pIoCtxNext = pNext;
1591 while (!ASMAtomicCmpXchgExPtr(ppList, pIoCtx, pNext, &pHeadOld))
1592 {
1593 pNext = pHeadOld;
1594 Assert(pNext != pIoCtx);
1595 pIoCtx->pIoCtxNext = pNext;
1596 ASMNopPause();
1597 }
1598}
1599
1600DECLINLINE(void) vdIoCtxDefer(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1601{
1602 LogFlowFunc(("Deferring I/O context pIoCtx=%#p\n", pIoCtx));
1603
1604 Assert(!pIoCtx->pIoCtxParent && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED));
1605 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
1606 vdIoCtxAddToWaitingList(&pDisk->pIoCtxBlockedHead, pIoCtx);
1607}
1608
1609static size_t vdIoCtxCopy(PVDIOCTX pIoCtxDst, PVDIOCTX pIoCtxSrc, size_t cbData)
1610{
1611 return RTSgBufCopy(&pIoCtxDst->Req.Io.SgBuf, &pIoCtxSrc->Req.Io.SgBuf, cbData);
1612}
1613
1614#if 0 /* unused */
1615static int vdIoCtxCmp(PVDIOCTX pIoCtx1, PVDIOCTX pIoCtx2, size_t cbData)
1616{
1617 return RTSgBufCmp(&pIoCtx1->Req.Io.SgBuf, &pIoCtx2->Req.Io.SgBuf, cbData);
1618}
1619#endif
1620
1621static size_t vdIoCtxCopyTo(PVDIOCTX pIoCtx, const uint8_t *pbData, size_t cbData)
1622{
1623 return RTSgBufCopyFromBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1624}
1625
1626static size_t vdIoCtxCopyFrom(PVDIOCTX pIoCtx, uint8_t *pbData, size_t cbData)
1627{
1628 return RTSgBufCopyToBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1629}
1630
1631static size_t vdIoCtxSet(PVDIOCTX pIoCtx, uint8_t ch, size_t cbData)
1632{
1633 return RTSgBufSet(&pIoCtx->Req.Io.SgBuf, ch, cbData);
1634}
1635
1636/**
1637 * Returns whether the given I/O context has completed.
1638 *
1639 * @returns Flag whether the I/O context is complete.
1640 * @param pIoCtx The I/O context to check.
1641 */
1642DECLINLINE(bool) vdIoCtxIsComplete(PVDIOCTX pIoCtx)
1643{
1644 if ( !pIoCtx->cMetaTransfersPending
1645 && !pIoCtx->cDataTransfersPending
1646 && !pIoCtx->pfnIoCtxTransfer)
1647 return true;
1648
1649 /*
1650 * We complete the I/O context in case of an error
1651 * if there is no I/O task pending.
1652 */
1653 if ( RT_FAILURE(pIoCtx->rcReq)
1654 && !pIoCtx->cMetaTransfersPending
1655 && !pIoCtx->cDataTransfersPending)
1656 return true;
1657
1658 return false;
1659}
1660
1661/**
1662 * Returns whether the given I/O context is blocked due to a metadata transfer
1663 * or because the backend blocked it.
1664 *
1665 * @returns Flag whether the I/O context is blocked.
1666 * @param pIoCtx The I/O context to check.
1667 */
1668DECLINLINE(bool) vdIoCtxIsBlocked(PVDIOCTX pIoCtx)
1669{
1670 /* Don't change anything if there is a metadata transfer pending or we are blocked. */
1671 if ( pIoCtx->cMetaTransfersPending
1672 || (pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1673 return true;
1674
1675 return false;
1676}
1677
1678/**
1679 * Process the I/O context, core method which assumes that the I/O context
1680 * acquired the lock.
1681 *
1682 * @returns VBox status code.
1683 * @param pIoCtx I/O context to process.
1684 */
1685static int vdIoCtxProcessLocked(PVDIOCTX pIoCtx)
1686{
1687 int rc = VINF_SUCCESS;
1688
1689 VD_IS_LOCKED(pIoCtx->pDisk);
1690
1691 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
1692
1693 if (!vdIoCtxIsComplete(pIoCtx))
1694 {
1695 if (!vdIoCtxIsBlocked(pIoCtx))
1696 {
1697 if (pIoCtx->pfnIoCtxTransfer)
1698 {
1699 /* Call the transfer function advancing to the next while there is no error. */
1700 while ( pIoCtx->pfnIoCtxTransfer
1701 && !pIoCtx->cMetaTransfersPending
1702 && RT_SUCCESS(rc))
1703 {
1704 LogFlowFunc(("calling transfer function %#p\n", pIoCtx->pfnIoCtxTransfer));
1705 rc = pIoCtx->pfnIoCtxTransfer(pIoCtx);
1706
1707 /* Advance to the next part of the transfer if the current one succeeded. */
1708 if (RT_SUCCESS(rc))
1709 {
1710 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
1711 pIoCtx->pfnIoCtxTransferNext = NULL;
1712 }
1713 }
1714 }
1715
1716 if ( RT_SUCCESS(rc)
1717 && !pIoCtx->cMetaTransfersPending
1718 && !pIoCtx->cDataTransfersPending
1719 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1720 rc = VINF_VD_ASYNC_IO_FINISHED;
1721 else if ( RT_SUCCESS(rc)
1722 || rc == VERR_VD_NOT_ENOUGH_METADATA
1723 || rc == VERR_VD_IOCTX_HALT)
1724 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1725 else if ( RT_FAILURE(rc)
1726 && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
1727 {
1728 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rc, VINF_SUCCESS);
1729
1730 /*
1731 * The I/O context completed if we have an error and there is no data
1732 * or meta data transfer pending.
1733 */
1734 if ( !pIoCtx->cMetaTransfersPending
1735 && !pIoCtx->cDataTransfersPending)
1736 rc = VINF_VD_ASYNC_IO_FINISHED;
1737 else
1738 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1739 }
1740 }
1741 else
1742 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1743 }
1744 else
1745 rc = VINF_VD_ASYNC_IO_FINISHED;
1746
1747 LogFlowFunc(("pIoCtx=%#p rc=%Rrc cDataTransfersPending=%u cMetaTransfersPending=%u fComplete=%RTbool\n",
1748 pIoCtx, rc, pIoCtx->cDataTransfersPending, pIoCtx->cMetaTransfersPending,
1749 pIoCtx->fComplete));
1750
1751 return rc;
1752}
1753
1754/**
1755 * Processes the list of waiting I/O contexts.
1756 *
1757 * @returns VBox status code, only valid if pIoCtxRc is not NULL, treat as void
1758 * function otherwise.
1759 * @param pDisk The disk structure.
1760 * @param pIoCtxRc An I/O context handle which waits on the list. When processed
1761 * The status code is returned. NULL if there is no I/O context
1762 * to return the status code for.
1763 */
1764static int vdDiskProcessWaitingIoCtx(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
1765{
1766 int rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1767
1768 LogFlowFunc(("pDisk=%#p pIoCtxRc=%#p\n", pDisk, pIoCtxRc));
1769
1770 VD_IS_LOCKED(pDisk);
1771
1772 /* Get the waiting list and process it in FIFO order. */
1773 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHead, NULL, PVDIOCTX);
1774
1775 /* Reverse it. */
1776 PVDIOCTX pCur = pIoCtxHead;
1777 pIoCtxHead = NULL;
1778 while (pCur)
1779 {
1780 PVDIOCTX pInsert = pCur;
1781 pCur = pCur->pIoCtxNext;
1782 pInsert->pIoCtxNext = pIoCtxHead;
1783 pIoCtxHead = pInsert;
1784 }
1785
1786 /* Process now. */
1787 pCur = pIoCtxHead;
1788 while (pCur)
1789 {
1790 int rcTmp;
1791 PVDIOCTX pTmp = pCur;
1792
1793 pCur = pCur->pIoCtxNext;
1794 pTmp->pIoCtxNext = NULL;
1795
1796 /*
1797 * Need to clear the sync flag here if there is a new I/O context
1798 * with it set and the context is not given in pIoCtxRc.
1799 * This happens most likely on a different thread and that one shouldn't
1800 * process the context synchronously.
1801 *
1802 * The thread who issued the context will wait on the event semaphore
1803 * anyway which is signalled when the completion handler is called.
1804 */
1805 if ( pTmp->fFlags & VDIOCTX_FLAGS_SYNC
1806 && pTmp != pIoCtxRc)
1807 pTmp->fFlags &= ~VDIOCTX_FLAGS_SYNC;
1808
1809 rcTmp = vdIoCtxProcessLocked(pTmp);
1810 if (pTmp == pIoCtxRc)
1811 {
1812 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1813 && RT_SUCCESS(pTmp->rcReq)
1814 && pTmp->enmTxDir == VDIOCTXTXDIR_READ)
1815 {
1816 int rc2 = vdFilterChainApplyRead(pDisk, pTmp->Req.Io.uOffsetXferOrig,
1817 pTmp->Req.Io.cbXferOrig, pTmp);
1818 if (RT_FAILURE(rc2))
1819 rcTmp = rc2;
1820 }
1821
1822 /* The given I/O context was processed, pass the return code to the caller. */
1823 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1824 && (pTmp->fFlags & VDIOCTX_FLAGS_SYNC))
1825 rc = pTmp->rcReq;
1826 else
1827 rc = rcTmp;
1828 }
1829 else if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1830 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1831 {
1832 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1833 vdThreadFinishWrite(pDisk);
1834 vdIoCtxRootComplete(pDisk, pTmp);
1835 vdIoCtxFree(pDisk, pTmp);
1836 }
1837 }
1838
1839 LogFlowFunc(("returns rc=%Rrc\n", rc));
1840 return rc;
1841}
1842
1843/**
1844 * Processes the list of blocked I/O contexts.
1845 *
1846 * @returns nothing.
1847 * @param pDisk The disk structure.
1848 */
1849static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk)
1850{
1851 LogFlowFunc(("pDisk=%#p\n", pDisk));
1852
1853 VD_IS_LOCKED(pDisk);
1854
1855 /* Get the waiting list and process it in FIFO order. */
1856 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxBlockedHead, NULL, PVDIOCTX);
1857
1858 /* Reverse it. */
1859 PVDIOCTX pCur = pIoCtxHead;
1860 pIoCtxHead = NULL;
1861 while (pCur)
1862 {
1863 PVDIOCTX pInsert = pCur;
1864 pCur = pCur->pIoCtxNext;
1865 pInsert->pIoCtxNext = pIoCtxHead;
1866 pIoCtxHead = pInsert;
1867 }
1868
1869 /* Process now. */
1870 pCur = pIoCtxHead;
1871 while (pCur)
1872 {
1873 int rc;
1874 PVDIOCTX pTmp = pCur;
1875
1876 pCur = pCur->pIoCtxNext;
1877 pTmp->pIoCtxNext = NULL;
1878
1879 Assert(!pTmp->pIoCtxParent);
1880 Assert(pTmp->fFlags & VDIOCTX_FLAGS_BLOCKED);
1881 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
1882
1883 rc = vdIoCtxProcessLocked(pTmp);
1884 if ( rc == VINF_VD_ASYNC_IO_FINISHED
1885 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1886 {
1887 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1888 vdThreadFinishWrite(pDisk);
1889 vdIoCtxRootComplete(pDisk, pTmp);
1890 vdIoCtxFree(pDisk, pTmp);
1891 }
1892 }
1893
1894 LogFlowFunc(("returns\n"));
1895}
1896
1897/**
1898 * Processes the I/O context trying to lock the criticial section.
1899 * The context is deferred if the critical section is busy.
1900 *
1901 * @returns VBox status code.
1902 * @param pIoCtx The I/O context to process.
1903 */
1904static int vdIoCtxProcessTryLockDefer(PVDIOCTX pIoCtx)
1905{
1906 int rc = VINF_SUCCESS;
1907 PVBOXHDD pDisk = pIoCtx->pDisk;
1908
1909 Log(("Defer pIoCtx=%#p\n", pIoCtx));
1910
1911 /* Put it on the waiting list first. */
1912 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHead, pIoCtx);
1913
1914 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
1915 {
1916 /* Leave it again, the context will be processed just before leaving the lock. */
1917 LogFlowFunc(("Successfully acquired the lock\n"));
1918 rc = vdDiskUnlock(pDisk, pIoCtx);
1919 }
1920 else
1921 {
1922 LogFlowFunc(("Lock is held\n"));
1923 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1924 }
1925
1926 return rc;
1927}
1928
1929/**
1930 * Process the I/O context in a synchronous manner, waiting
1931 * for it to complete.
1932 *
1933 * @returns VBox status code of the completed request.
1934 * @param pIoCtx The sync I/O context.
1935 * @param hEventComplete Event sempahore to wait on for completion.
1936 */
1937static int vdIoCtxProcessSync(PVDIOCTX pIoCtx, RTSEMEVENT hEventComplete)
1938{
1939 int rc = VINF_SUCCESS;
1940 PVBOXHDD pDisk = pIoCtx->pDisk;
1941
1942 LogFlowFunc(("pIoCtx=%p\n", pIoCtx));
1943
1944 AssertMsg(pIoCtx->fFlags & (VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE),
1945 ("I/O context is not marked as synchronous\n"));
1946
1947 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
1948 if (rc == VINF_VD_ASYNC_IO_FINISHED)
1949 rc = VINF_SUCCESS;
1950
1951 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1952 {
1953 rc = RTSemEventWait(hEventComplete, RT_INDEFINITE_WAIT);
1954 AssertRC(rc);
1955 }
1956
1957 rc = pIoCtx->rcReq;
1958 vdIoCtxFree(pDisk, pIoCtx);
1959
1960 return rc;
1961}
1962
1963DECLINLINE(bool) vdIoCtxIsDiskLockOwner(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1964{
1965 return pDisk->pIoCtxLockOwner == pIoCtx;
1966}
1967
1968static int vdIoCtxLockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1969{
1970 int rc = VINF_SUCCESS;
1971
1972 VD_IS_LOCKED(pDisk);
1973
1974 LogFlowFunc(("pDisk=%#p pIoCtx=%#p\n", pDisk, pIoCtx));
1975
1976 if (!ASMAtomicCmpXchgPtr(&pDisk->pIoCtxLockOwner, pIoCtx, NIL_VDIOCTX))
1977 {
1978 Assert(pDisk->pIoCtxLockOwner != pIoCtx); /* No nesting allowed. */
1979 vdIoCtxDefer(pDisk, pIoCtx);
1980 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1981 }
1982
1983 LogFlowFunc(("returns -> %Rrc\n", rc));
1984 return rc;
1985}
1986
1987static void vdIoCtxUnlockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx, bool fProcessBlockedReqs)
1988{
1989 RT_NOREF1(pIoCtx);
1990 LogFlowFunc(("pDisk=%#p pIoCtx=%#p fProcessBlockedReqs=%RTbool\n",
1991 pDisk, pIoCtx, fProcessBlockedReqs));
1992
1993 VD_IS_LOCKED(pDisk);
1994
1995 LogFlow(("Unlocking disk lock owner is %#p\n", pDisk->pIoCtxLockOwner));
1996 Assert(pDisk->pIoCtxLockOwner == pIoCtx);
1997 ASMAtomicXchgPtrT(&pDisk->pIoCtxLockOwner, NIL_VDIOCTX, PVDIOCTX);
1998
1999 if (fProcessBlockedReqs)
2000 {
2001 /* Process any blocked writes if the current request didn't caused another growing. */
2002 vdDiskProcessBlockedIoCtx(pDisk);
2003 }
2004
2005 LogFlowFunc(("returns\n"));
2006}
2007
2008/**
2009 * Internal: Reads a given amount of data from the image chain of the disk.
2010 **/
2011static int vdDiskReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2012 uint64_t uOffset, size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbThisRead)
2013{
2014 RT_NOREF1(pDisk);
2015 int rc = VINF_SUCCESS;
2016 size_t cbThisRead = cbRead;
2017
2018 AssertPtr(pcbThisRead);
2019
2020 *pcbThisRead = 0;
2021
2022 /*
2023 * Try to read from the given image.
2024 * If the block is not allocated read from override chain if present.
2025 */
2026 rc = pImage->Backend->pfnRead(pImage->pBackendData,
2027 uOffset, cbThisRead, pIoCtx,
2028 &cbThisRead);
2029
2030 if (rc == VERR_VD_BLOCK_FREE)
2031 {
2032 for (PVDIMAGE pCurrImage = pImageParentOverride ? pImageParentOverride : pImage->pPrev;
2033 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2034 pCurrImage = pCurrImage->pPrev)
2035 {
2036 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2037 uOffset, cbThisRead, pIoCtx,
2038 &cbThisRead);
2039 }
2040 }
2041
2042 if (RT_SUCCESS(rc) || rc == VERR_VD_BLOCK_FREE)
2043 *pcbThisRead = cbThisRead;
2044
2045 return rc;
2046}
2047
2048/**
2049 * internal: read the specified amount of data in whatever blocks the backend
2050 * will give us - async version.
2051 */
2052static DECLCALLBACK(int) vdReadHelperAsync(PVDIOCTX pIoCtx)
2053{
2054 int rc;
2055 PVBOXHDD pDisk = pIoCtx->pDisk;
2056 size_t cbToRead = pIoCtx->Req.Io.cbTransfer;
2057 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2058 PVDIMAGE pCurrImage = pIoCtx->Req.Io.pImageCur;
2059 PVDIMAGE pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
2060 unsigned cImagesRead = pIoCtx->Req.Io.cImagesRead;
2061 size_t cbThisRead;
2062
2063 /*
2064 * Check whether there is a full block write in progress which was not allocated.
2065 * Defer I/O if the range interferes but only if it does not belong to the
2066 * write doing the allocation.
2067 */
2068 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
2069 && uOffset >= pDisk->uOffsetStartLocked
2070 && uOffset < pDisk->uOffsetEndLocked
2071 && ( !pIoCtx->pIoCtxParent
2072 || pIoCtx->pIoCtxParent != pDisk->pIoCtxLockOwner))
2073 {
2074 Log(("Interferring read while allocating a new block => deferring read\n"));
2075 vdIoCtxDefer(pDisk, pIoCtx);
2076 return VERR_VD_ASYNC_IO_IN_PROGRESS;
2077 }
2078
2079 /* Loop until all reads started or we have a backend which needs to read metadata. */
2080 do
2081 {
2082 /* Search for image with allocated block. Do not attempt to read more
2083 * than the previous reads marked as valid. Otherwise this would return
2084 * stale data when different block sizes are used for the images. */
2085 cbThisRead = cbToRead;
2086
2087 if ( pDisk->pCache
2088 && !pImageParentOverride)
2089 {
2090 rc = vdCacheReadHelper(pDisk->pCache, uOffset, cbThisRead,
2091 pIoCtx, &cbThisRead);
2092 if (rc == VERR_VD_BLOCK_FREE)
2093 {
2094 rc = vdDiskReadHelper(pDisk, pCurrImage, NULL, uOffset, cbThisRead,
2095 pIoCtx, &cbThisRead);
2096
2097 /* If the read was successful, write the data back into the cache. */
2098 if ( RT_SUCCESS(rc)
2099 && pIoCtx->fFlags & VDIOCTX_FLAGS_READ_UPDATE_CACHE)
2100 {
2101 rc = vdCacheWriteHelper(pDisk->pCache, uOffset, cbThisRead,
2102 pIoCtx, NULL);
2103 }
2104 }
2105 }
2106 else
2107 {
2108 /*
2109 * Try to read from the given image.
2110 * If the block is not allocated read from override chain if present.
2111 */
2112 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2113 uOffset, cbThisRead, pIoCtx,
2114 &cbThisRead);
2115
2116 if ( rc == VERR_VD_BLOCK_FREE
2117 && cImagesRead != 1)
2118 {
2119 unsigned cImagesToProcess = cImagesRead;
2120
2121 pCurrImage = pImageParentOverride ? pImageParentOverride : pCurrImage->pPrev;
2122 pIoCtx->Req.Io.pImageParentOverride = NULL;
2123
2124 while (pCurrImage && rc == VERR_VD_BLOCK_FREE)
2125 {
2126 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2127 uOffset, cbThisRead,
2128 pIoCtx, &cbThisRead);
2129 if (cImagesToProcess == 1)
2130 break;
2131 else if (cImagesToProcess > 0)
2132 cImagesToProcess--;
2133
2134 if (rc == VERR_VD_BLOCK_FREE)
2135 pCurrImage = pCurrImage->pPrev;
2136 }
2137 }
2138 }
2139
2140 /* The task state will be updated on success already, don't do it here!. */
2141 if (rc == VERR_VD_BLOCK_FREE)
2142 {
2143 /* No image in the chain contains the data for the block. */
2144 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisRead); Assert(cbThisRead == (uint32_t)cbThisRead);
2145
2146 /* Fill the free space with 0 if we are told to do so
2147 * or a previous read returned valid data. */
2148 if (pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS)
2149 vdIoCtxSet(pIoCtx, '\0', cbThisRead);
2150 else
2151 pIoCtx->Req.Io.cbBufClear += cbThisRead;
2152
2153 if (pIoCtx->Req.Io.pImageCur->uOpenFlags & VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS)
2154 rc = VINF_VD_NEW_ZEROED_BLOCK;
2155 else
2156 rc = VINF_SUCCESS;
2157 }
2158 else if (rc == VERR_VD_IOCTX_HALT)
2159 {
2160 uOffset += cbThisRead;
2161 cbToRead -= cbThisRead;
2162 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2163 }
2164 else if ( RT_SUCCESS(rc)
2165 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2166 {
2167 /* First not free block, fill the space before with 0. */
2168 if ( pIoCtx->Req.Io.cbBufClear
2169 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2170 {
2171 RTSGBUF SgBuf;
2172 RTSgBufClone(&SgBuf, &pIoCtx->Req.Io.SgBuf);
2173 RTSgBufReset(&SgBuf);
2174 RTSgBufSet(&SgBuf, 0, pIoCtx->Req.Io.cbBufClear);
2175 pIoCtx->Req.Io.cbBufClear = 0;
2176 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2177 }
2178 rc = VINF_SUCCESS;
2179 }
2180
2181 if (RT_FAILURE(rc))
2182 break;
2183
2184 cbToRead -= cbThisRead;
2185 uOffset += cbThisRead;
2186 pCurrImage = pIoCtx->Req.Io.pImageStart; /* Start with the highest image in the chain. */
2187 } while (cbToRead != 0 && RT_SUCCESS(rc));
2188
2189 if ( rc == VERR_VD_NOT_ENOUGH_METADATA
2190 || rc == VERR_VD_IOCTX_HALT)
2191 {
2192 /* Save the current state. */
2193 pIoCtx->Req.Io.uOffset = uOffset;
2194 pIoCtx->Req.Io.cbTransfer = cbToRead;
2195 pIoCtx->Req.Io.pImageCur = pCurrImage ? pCurrImage : pIoCtx->Req.Io.pImageStart;
2196 }
2197
2198 return (!(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2199 ? VERR_VD_BLOCK_FREE
2200 : rc;
2201}
2202
2203/**
2204 * internal: parent image read wrapper for compacting.
2205 */
2206static DECLCALLBACK(int) vdParentRead(void *pvUser, uint64_t uOffset, void *pvBuf,
2207 size_t cbRead)
2208{
2209 PVDPARENTSTATEDESC pParentState = (PVDPARENTSTATEDESC)pvUser;
2210
2211 /** @todo
2212 * Only used for compaction so far which is not possible to mix with async I/O.
2213 * Needs to be changed if we want to support online compaction of images.
2214 */
2215 bool fLocked = ASMAtomicXchgBool(&pParentState->pDisk->fLocked, true);
2216 AssertMsgReturn(!fLocked,
2217 ("Calling synchronous parent read while another thread holds the disk lock\n"),
2218 VERR_VD_INVALID_STATE);
2219
2220 /* Fake an I/O context. */
2221 RTSGSEG Segment;
2222 RTSGBUF SgBuf;
2223 VDIOCTX IoCtx;
2224
2225 Segment.pvSeg = pvBuf;
2226 Segment.cbSeg = cbRead;
2227 RTSgBufInit(&SgBuf, &Segment, 1);
2228 vdIoCtxInit(&IoCtx, pParentState->pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pParentState->pImage,
2229 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
2230 int rc = vdReadHelperAsync(&IoCtx);
2231 ASMAtomicXchgBool(&pParentState->pDisk->fLocked, false);
2232 return rc;
2233}
2234
2235/**
2236 * Extended version of vdReadHelper(), implementing certain optimizations
2237 * for image cloning.
2238 *
2239 * @returns VBox status code.
2240 * @param pDisk The disk to read from.
2241 * @param pImage The image to start reading from.
2242 * @param pImageParentOverride The parent image to read from
2243 * if the starting image returns a free block.
2244 * If NULL is passed the real parent of the image
2245 * in the chain is used.
2246 * @param uOffset Offset in the disk to start reading from.
2247 * @param pvBuf Where to store the read data.
2248 * @param cbRead How much to read.
2249 * @param fZeroFreeBlocks Flag whether free blocks should be zeroed.
2250 * If false and no image has data for sepcified
2251 * range VERR_VD_BLOCK_FREE is returned.
2252 * Note that unallocated blocks are still zeroed
2253 * if at least one image has valid data for a part
2254 * of the range.
2255 * @param fUpdateCache Flag whether to update the attached cache if
2256 * available.
2257 * @param cImagesRead Number of images in the chain to read until
2258 * the read is cut off. A value of 0 disables the cut off.
2259 */
2260static int vdReadHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2261 uint64_t uOffset, void *pvBuf, size_t cbRead,
2262 bool fZeroFreeBlocks, bool fUpdateCache, unsigned cImagesRead)
2263{
2264 int rc = VINF_SUCCESS;
2265 uint32_t fFlags = VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2266 RTSGSEG Segment;
2267 RTSGBUF SgBuf;
2268 VDIOCTX IoCtx;
2269 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2270
2271 rc = RTSemEventCreate(&hEventComplete);
2272 if (RT_FAILURE(rc))
2273 return rc;
2274
2275 if (fZeroFreeBlocks)
2276 fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2277 if (fUpdateCache)
2278 fFlags |= VDIOCTX_FLAGS_READ_UPDATE_CACHE;
2279
2280 Segment.pvSeg = pvBuf;
2281 Segment.cbSeg = cbRead;
2282 RTSgBufInit(&SgBuf, &Segment, 1);
2283 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pImage, &SgBuf,
2284 NULL, vdReadHelperAsync, fFlags);
2285
2286 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2287 IoCtx.Req.Io.cImagesRead = cImagesRead;
2288 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2289 IoCtx.Type.Root.pvUser1 = pDisk;
2290 IoCtx.Type.Root.pvUser2 = hEventComplete;
2291 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2292 RTSemEventDestroy(hEventComplete);
2293 return rc;
2294}
2295
2296/**
2297 * internal: read the specified amount of data in whatever blocks the backend
2298 * will give us.
2299 */
2300static int vdReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2301 void *pvBuf, size_t cbRead, bool fUpdateCache)
2302{
2303 return vdReadHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbRead,
2304 true /* fZeroFreeBlocks */, fUpdateCache, 0);
2305}
2306
2307/**
2308 * internal: mark the disk as not modified.
2309 */
2310static void vdResetModifiedFlag(PVBOXHDD pDisk)
2311{
2312 if (pDisk->uModified & VD_IMAGE_MODIFIED_FLAG)
2313 {
2314 /* generate new last-modified uuid */
2315 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2316 {
2317 RTUUID Uuid;
2318
2319 RTUuidCreate(&Uuid);
2320 pDisk->pLast->Backend->pfnSetModificationUuid(pDisk->pLast->pBackendData,
2321 &Uuid);
2322
2323 if (pDisk->pCache)
2324 pDisk->pCache->Backend->pfnSetModificationUuid(pDisk->pCache->pBackendData,
2325 &Uuid);
2326 }
2327
2328 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FLAG;
2329 }
2330}
2331
2332/**
2333 * internal: mark the disk as modified.
2334 */
2335static void vdSetModifiedFlag(PVBOXHDD pDisk)
2336{
2337 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2338 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2339 {
2340 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2341
2342 /* First modify, so create a UUID and ensure it's written to disk. */
2343 vdResetModifiedFlag(pDisk);
2344
2345 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2346 {
2347 VDIOCTX IoCtx;
2348 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, NULL,
2349 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2350 pDisk->pLast->Backend->pfnFlush(pDisk->pLast->pBackendData, &IoCtx);
2351 }
2352 }
2353}
2354
2355/**
2356 * internal: write buffer to the image, taking care of block boundaries and
2357 * write optimizations.
2358 */
2359static int vdWriteHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage,
2360 PVDIMAGE pImageParentOverride, uint64_t uOffset,
2361 const void *pvBuf, size_t cbWrite,
2362 uint32_t fFlags, unsigned cImagesRead)
2363{
2364 int rc = VINF_SUCCESS;
2365 RTSGSEG Segment;
2366 RTSGBUF SgBuf;
2367 VDIOCTX IoCtx;
2368 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2369
2370 rc = RTSemEventCreate(&hEventComplete);
2371 if (RT_FAILURE(rc))
2372 return rc;
2373
2374 fFlags |= VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2375
2376 Segment.pvSeg = (void *)pvBuf;
2377 Segment.cbSeg = cbWrite;
2378 RTSgBufInit(&SgBuf, &Segment, 1);
2379 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_WRITE, uOffset, cbWrite, pImage, &SgBuf,
2380 NULL, vdWriteHelperAsync, fFlags);
2381
2382 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2383 IoCtx.Req.Io.cImagesRead = cImagesRead;
2384 IoCtx.pIoCtxParent = NULL;
2385 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2386 IoCtx.Type.Root.pvUser1 = pDisk;
2387 IoCtx.Type.Root.pvUser2 = hEventComplete;
2388 if (RT_SUCCESS(rc))
2389 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2390
2391 RTSemEventDestroy(hEventComplete);
2392 return rc;
2393}
2394
2395/**
2396 * internal: write buffer to the image, taking care of block boundaries and
2397 * write optimizations.
2398 */
2399static int vdWriteHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2400 const void *pvBuf, size_t cbWrite, uint32_t fFlags)
2401{
2402 return vdWriteHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbWrite,
2403 fFlags, 0);
2404}
2405
2406/**
2407 * Internal: Copies the content of one disk to another one applying optimizations
2408 * to speed up the copy process if possible.
2409 */
2410static int vdCopyHelper(PVBOXHDD pDiskFrom, PVDIMAGE pImageFrom, PVBOXHDD pDiskTo,
2411 uint64_t cbSize, unsigned cImagesFromRead, unsigned cImagesToRead,
2412 bool fSuppressRedundantIo, PVDINTERFACEPROGRESS pIfProgress,
2413 PVDINTERFACEPROGRESS pDstIfProgress)
2414{
2415 int rc = VINF_SUCCESS;
2416 int rc2;
2417 uint64_t uOffset = 0;
2418 uint64_t cbRemaining = cbSize;
2419 void *pvBuf = NULL;
2420 bool fLockReadFrom = false;
2421 bool fLockWriteTo = false;
2422 bool fBlockwiseCopy = false;
2423 unsigned uProgressOld = 0;
2424
2425 LogFlowFunc(("pDiskFrom=%#p pImageFrom=%#p pDiskTo=%#p cbSize=%llu cImagesFromRead=%u cImagesToRead=%u fSuppressRedundantIo=%RTbool pIfProgress=%#p pDstIfProgress=%#p\n",
2426 pDiskFrom, pImageFrom, pDiskTo, cbSize, cImagesFromRead, cImagesToRead, fSuppressRedundantIo, pDstIfProgress, pDstIfProgress));
2427
2428 if ( (fSuppressRedundantIo || (cImagesFromRead > 0))
2429 && RTListIsEmpty(&pDiskFrom->ListFilterChainRead))
2430 fBlockwiseCopy = true;
2431
2432 /* Allocate tmp buffer. */
2433 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
2434 if (!pvBuf)
2435 return rc;
2436
2437 do
2438 {
2439 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
2440
2441 /* Note that we don't attempt to synchronize cross-disk accesses.
2442 * It wouldn't be very difficult to do, just the lock order would
2443 * need to be defined somehow to prevent deadlocks. Postpone such
2444 * magic as there is no use case for this. */
2445
2446 rc2 = vdThreadStartRead(pDiskFrom);
2447 AssertRC(rc2);
2448 fLockReadFrom = true;
2449
2450 if (fBlockwiseCopy)
2451 {
2452 RTSGSEG SegmentBuf;
2453 RTSGBUF SgBuf;
2454 VDIOCTX IoCtx;
2455
2456 SegmentBuf.pvSeg = pvBuf;
2457 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
2458 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
2459 vdIoCtxInit(&IoCtx, pDiskFrom, VDIOCTXTXDIR_READ, 0, 0, NULL,
2460 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2461
2462 /* Read the source data. */
2463 rc = pImageFrom->Backend->pfnRead(pImageFrom->pBackendData,
2464 uOffset, cbThisRead, &IoCtx,
2465 &cbThisRead);
2466
2467 if ( rc == VERR_VD_BLOCK_FREE
2468 && cImagesFromRead != 1)
2469 {
2470 unsigned cImagesToProcess = cImagesFromRead;
2471
2472 for (PVDIMAGE pCurrImage = pImageFrom->pPrev;
2473 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2474 pCurrImage = pCurrImage->pPrev)
2475 {
2476 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2477 uOffset, cbThisRead,
2478 &IoCtx, &cbThisRead);
2479 if (cImagesToProcess == 1)
2480 break;
2481 else if (cImagesToProcess > 0)
2482 cImagesToProcess--;
2483 }
2484 }
2485 }
2486 else
2487 rc = vdReadHelper(pDiskFrom, pImageFrom, uOffset, pvBuf, cbThisRead,
2488 false /* fUpdateCache */);
2489
2490 if (RT_FAILURE(rc) && rc != VERR_VD_BLOCK_FREE)
2491 break;
2492
2493 rc2 = vdThreadFinishRead(pDiskFrom);
2494 AssertRC(rc2);
2495 fLockReadFrom = false;
2496
2497 if (rc != VERR_VD_BLOCK_FREE)
2498 {
2499 rc2 = vdThreadStartWrite(pDiskTo);
2500 AssertRC(rc2);
2501 fLockWriteTo = true;
2502
2503 /* Only do collapsed I/O if we are copying the data blockwise. */
2504 rc = vdWriteHelperEx(pDiskTo, pDiskTo->pLast, NULL, uOffset, pvBuf,
2505 cbThisRead, VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG /* fFlags */,
2506 fBlockwiseCopy ? cImagesToRead : 0);
2507 if (RT_FAILURE(rc))
2508 break;
2509
2510 rc2 = vdThreadFinishWrite(pDiskTo);
2511 AssertRC(rc2);
2512 fLockWriteTo = false;
2513 }
2514 else /* Don't propagate the error to the outside */
2515 rc = VINF_SUCCESS;
2516
2517 uOffset += cbThisRead;
2518 cbRemaining -= cbThisRead;
2519
2520 unsigned uProgressNew = uOffset * 99 / cbSize;
2521 if (uProgressNew != uProgressOld)
2522 {
2523 uProgressOld = uProgressNew;
2524
2525 if (pIfProgress && pIfProgress->pfnProgress)
2526 {
2527 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
2528 uProgressOld);
2529 if (RT_FAILURE(rc))
2530 break;
2531 }
2532 if (pDstIfProgress && pDstIfProgress->pfnProgress)
2533 {
2534 rc = pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser,
2535 uProgressOld);
2536 if (RT_FAILURE(rc))
2537 break;
2538 }
2539 }
2540 } while (uOffset < cbSize);
2541
2542 RTMemFree(pvBuf);
2543
2544 if (fLockReadFrom)
2545 {
2546 rc2 = vdThreadFinishRead(pDiskFrom);
2547 AssertRC(rc2);
2548 }
2549
2550 if (fLockWriteTo)
2551 {
2552 rc2 = vdThreadFinishWrite(pDiskTo);
2553 AssertRC(rc2);
2554 }
2555
2556 LogFlowFunc(("returns rc=%Rrc\n", rc));
2557 return rc;
2558}
2559
2560/**
2561 * Flush helper async version.
2562 */
2563static DECLCALLBACK(int) vdSetModifiedHelperAsync(PVDIOCTX pIoCtx)
2564{
2565 int rc = VINF_SUCCESS;
2566 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2567
2568 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
2569 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2570 rc = VINF_SUCCESS;
2571
2572 return rc;
2573}
2574
2575/**
2576 * internal: mark the disk as modified - async version.
2577 */
2578static int vdSetModifiedFlagAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
2579{
2580 int rc = VINF_SUCCESS;
2581
2582 VD_IS_LOCKED(pDisk);
2583
2584 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2585 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2586 {
2587 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
2588 if (RT_SUCCESS(rc))
2589 {
2590 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2591
2592 /* First modify, so create a UUID and ensure it's written to disk. */
2593 vdResetModifiedFlag(pDisk);
2594
2595 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2596 {
2597 PVDIOCTX pIoCtxFlush = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_FLUSH,
2598 0, 0, pDisk->pLast,
2599 NULL, pIoCtx, 0, 0, NULL,
2600 vdSetModifiedHelperAsync);
2601
2602 if (pIoCtxFlush)
2603 {
2604 rc = vdIoCtxProcessLocked(pIoCtxFlush);
2605 if (rc == VINF_VD_ASYNC_IO_FINISHED)
2606 {
2607 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs */);
2608 vdIoCtxFree(pDisk, pIoCtxFlush);
2609 }
2610 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2611 {
2612 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
2613 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2614 }
2615 else /* Another error */
2616 vdIoCtxFree(pDisk, pIoCtxFlush);
2617 }
2618 else
2619 rc = VERR_NO_MEMORY;
2620 }
2621 }
2622 }
2623
2624 return rc;
2625}
2626
2627static DECLCALLBACK(int) vdWriteHelperCommitAsync(PVDIOCTX pIoCtx)
2628{
2629 int rc = VINF_SUCCESS;
2630 PVDIMAGE pImage = pIoCtx->Req.Io.pImageStart;
2631 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2632 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2633 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2634
2635 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2636 rc = pImage->Backend->pfnWrite(pImage->pBackendData,
2637 pIoCtx->Req.Io.uOffset - cbPreRead,
2638 cbPreRead + cbThisWrite + cbPostRead,
2639 pIoCtx, NULL, &cbPreRead, &cbPostRead, 0);
2640 Assert(rc != VERR_VD_BLOCK_FREE);
2641 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPreRead == 0);
2642 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPostRead == 0);
2643 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2644 rc = VINF_SUCCESS;
2645 else if (rc == VERR_VD_IOCTX_HALT)
2646 {
2647 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2648 rc = VINF_SUCCESS;
2649 }
2650
2651 LogFlowFunc(("returns rc=%Rrc\n", rc));
2652 return rc;
2653}
2654
2655static DECLCALLBACK(int) vdWriteHelperOptimizedCmpAndWriteAsync(PVDIOCTX pIoCtx)
2656{
2657 int rc = VINF_SUCCESS;
2658 size_t cbThisWrite = 0;
2659 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2660 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2661 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2662 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2663 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2664 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2665
2666 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2667
2668 AssertPtr(pIoCtxParent);
2669 Assert(!pIoCtxParent->pIoCtxParent);
2670 Assert(!pIoCtx->Req.Io.cbTransferLeft && !pIoCtx->cMetaTransfersPending);
2671
2672 vdIoCtxChildReset(pIoCtx);
2673 cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2674 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2675
2676 /* Check if the write would modify anything in this block. */
2677 if (!RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &pIoCtxParent->Req.Io.SgBuf, cbThisWrite))
2678 {
2679 RTSGBUF SgBufSrcTmp;
2680
2681 RTSgBufClone(&SgBufSrcTmp, &pIoCtxParent->Req.Io.SgBuf);
2682 RTSgBufAdvance(&SgBufSrcTmp, cbThisWrite);
2683 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbThisWrite);
2684
2685 if (!cbWriteCopy || !RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &SgBufSrcTmp, cbWriteCopy))
2686 {
2687 /* Block is completely unchanged, so no need to write anything. */
2688 LogFlowFunc(("Block didn't changed\n"));
2689 ASMAtomicWriteU32(&pIoCtx->Req.Io.cbTransferLeft, 0);
2690 RTSgBufAdvance(&pIoCtxParent->Req.Io.SgBuf, cbThisWrite);
2691 return VINF_VD_ASYNC_IO_FINISHED;
2692 }
2693 }
2694
2695 /* Copy the data to the right place in the buffer. */
2696 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2697 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2698 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2699
2700 /* Handle the data that goes after the write to fill the block. */
2701 if (cbPostRead)
2702 {
2703 /* Now assemble the remaining data. */
2704 if (cbWriteCopy)
2705 {
2706 /*
2707 * The S/G buffer of the parent needs to be cloned because
2708 * it is not allowed to modify the state.
2709 */
2710 RTSGBUF SgBufParentTmp;
2711
2712 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2713 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2714 }
2715
2716 /* Zero out the remainder of this block. Will never be visible, as this
2717 * is beyond the limit of the image. */
2718 if (cbFill)
2719 {
2720 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbReadImage);
2721 vdIoCtxSet(pIoCtx, '\0', cbFill);
2722 }
2723 }
2724
2725 /* Write the full block to the virtual disk. */
2726 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2727 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2728
2729 return rc;
2730}
2731
2732static DECLCALLBACK(int) vdWriteHelperOptimizedPreReadAsync(PVDIOCTX pIoCtx)
2733{
2734 int rc = VINF_SUCCESS;
2735
2736 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2737
2738 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2739
2740 if ( pIoCtx->Req.Io.cbTransferLeft
2741 && !pIoCtx->cDataTransfersPending)
2742 rc = vdReadHelperAsync(pIoCtx);
2743
2744 if ( ( RT_SUCCESS(rc)
2745 || (rc == VERR_VD_ASYNC_IO_IN_PROGRESS))
2746 && ( pIoCtx->Req.Io.cbTransferLeft
2747 || pIoCtx->cMetaTransfersPending))
2748 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2749 else
2750 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedCmpAndWriteAsync;
2751
2752 return rc;
2753}
2754
2755/**
2756 * internal: write a complete block (only used for diff images), taking the
2757 * remaining data from parent images. This implementation optimizes out writes
2758 * that do not change the data relative to the state as of the parent images.
2759 * All backends which support differential/growing images support this - async version.
2760 */
2761static DECLCALLBACK(int) vdWriteHelperOptimizedAsync(PVDIOCTX pIoCtx)
2762{
2763 PVBOXHDD pDisk = pIoCtx->pDisk;
2764 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2765 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2766 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2767 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2768 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2769 size_t cbFill = 0;
2770 size_t cbWriteCopy = 0;
2771 size_t cbReadImage = 0;
2772
2773 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2774
2775 AssertPtr(pIoCtx->pIoCtxParent);
2776 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2777
2778 if (cbPostRead)
2779 {
2780 /* Figure out how much we cannot read from the image, because
2781 * the last block to write might exceed the nominal size of the
2782 * image for technical reasons. */
2783 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2784 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2785
2786 /* If we have data to be written, use that instead of reading
2787 * data from the image. */
2788 if (cbWrite > cbThisWrite)
2789 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2790
2791 /* The rest must be read from the image. */
2792 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2793 }
2794
2795 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2796 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2797 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2798
2799 /* Read the entire data of the block so that we can compare whether it will
2800 * be modified by the write or not. */
2801 size_t cbTmp = cbPreRead + cbThisWrite + cbPostRead - cbFill; Assert(cbTmp == (uint32_t)cbTmp);
2802 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTmp;
2803 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2804 pIoCtx->Req.Io.uOffset -= cbPreRead;
2805
2806 /* Next step */
2807 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedPreReadAsync;
2808 return VINF_SUCCESS;
2809}
2810
2811static DECLCALLBACK(int) vdWriteHelperStandardReadImageAsync(PVDIOCTX pIoCtx)
2812{
2813 int rc = VINF_SUCCESS;
2814
2815 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2816
2817 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2818
2819 if ( pIoCtx->Req.Io.cbTransferLeft
2820 && !pIoCtx->cDataTransfersPending)
2821 rc = vdReadHelperAsync(pIoCtx);
2822
2823 if ( RT_SUCCESS(rc)
2824 && ( pIoCtx->Req.Io.cbTransferLeft
2825 || pIoCtx->cMetaTransfersPending))
2826 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2827 else
2828 {
2829 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2830
2831 /* Zero out the remainder of this block. Will never be visible, as this
2832 * is beyond the limit of the image. */
2833 if (cbFill)
2834 vdIoCtxSet(pIoCtx, '\0', cbFill);
2835
2836 /* Write the full block to the virtual disk. */
2837 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2838
2839 vdIoCtxChildReset(pIoCtx);
2840 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2841 }
2842
2843 return rc;
2844}
2845
2846static DECLCALLBACK(int) vdWriteHelperStandardAssemble(PVDIOCTX pIoCtx)
2847{
2848 int rc = VINF_SUCCESS;
2849 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2850 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2851 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2852
2853 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2854
2855 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2856 if (cbPostRead)
2857 {
2858 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2859 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2860 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2861
2862 /* Now assemble the remaining data. */
2863 if (cbWriteCopy)
2864 {
2865 /*
2866 * The S/G buffer of the parent needs to be cloned because
2867 * it is not allowed to modify the state.
2868 */
2869 RTSGBUF SgBufParentTmp;
2870
2871 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2872 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2873 }
2874
2875 if (cbReadImage)
2876 {
2877 /* Read remaining data. */
2878 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardReadImageAsync;
2879
2880 /* Read the data that goes before the write to fill the block. */
2881 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbReadImage; Assert(cbReadImage == (uint32_t)cbReadImage);
2882 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2883 pIoCtx->Req.Io.uOffset += cbWriteCopy;
2884 }
2885 else
2886 {
2887 /* Zero out the remainder of this block. Will never be visible, as this
2888 * is beyond the limit of the image. */
2889 if (cbFill)
2890 vdIoCtxSet(pIoCtx, '\0', cbFill);
2891
2892 /* Write the full block to the virtual disk. */
2893 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2894 vdIoCtxChildReset(pIoCtx);
2895 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2896 }
2897 }
2898 else
2899 {
2900 /* Write the full block to the virtual disk. */
2901 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2902 vdIoCtxChildReset(pIoCtx);
2903 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2904 }
2905
2906 return rc;
2907}
2908
2909static DECLCALLBACK(int) vdWriteHelperStandardPreReadAsync(PVDIOCTX pIoCtx)
2910{
2911 int rc = VINF_SUCCESS;
2912
2913 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2914
2915 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2916
2917 if ( pIoCtx->Req.Io.cbTransferLeft
2918 && !pIoCtx->cDataTransfersPending)
2919 rc = vdReadHelperAsync(pIoCtx);
2920
2921 if ( RT_SUCCESS(rc)
2922 && ( pIoCtx->Req.Io.cbTransferLeft
2923 || pIoCtx->cMetaTransfersPending))
2924 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2925 else
2926 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2927
2928 return rc;
2929}
2930
2931static DECLCALLBACK(int) vdWriteHelperStandardAsync(PVDIOCTX pIoCtx)
2932{
2933 PVBOXHDD pDisk = pIoCtx->pDisk;
2934 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2935 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2936 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2937 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2938 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2939 size_t cbFill = 0;
2940 size_t cbWriteCopy = 0;
2941 size_t cbReadImage = 0;
2942
2943 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2944
2945 AssertPtr(pIoCtx->pIoCtxParent);
2946 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2947
2948 /* Calculate the amount of data to read that goes after the write to fill the block. */
2949 if (cbPostRead)
2950 {
2951 /* If we have data to be written, use that instead of reading
2952 * data from the image. */
2953 if (cbWrite > cbThisWrite)
2954 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2955 else
2956 cbWriteCopy = 0;
2957
2958 /* Figure out how much we cannot read from the image, because
2959 * the last block to write might exceed the nominal size of the
2960 * image for technical reasons. */
2961 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2962 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2963
2964 /* The rest must be read from the image. */
2965 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2966 }
2967
2968 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2969 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2970 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2971
2972 /* Next step */
2973 if (cbPreRead)
2974 {
2975 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardPreReadAsync;
2976
2977 /* Read the data that goes before the write to fill the block. */
2978 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbPreRead; Assert(cbPreRead == (uint32_t)cbPreRead);
2979 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2980 pIoCtx->Req.Io.uOffset -= cbPreRead;
2981 }
2982 else
2983 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2984
2985 return VINF_SUCCESS;
2986}
2987
2988/**
2989 * internal: write buffer to the image, taking care of block boundaries and
2990 * write optimizations - async version.
2991 */
2992static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx)
2993{
2994 int rc;
2995 size_t cbWrite = pIoCtx->Req.Io.cbTransfer;
2996 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2997 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2998 PVBOXHDD pDisk = pIoCtx->pDisk;
2999 unsigned fWrite;
3000 size_t cbThisWrite;
3001 size_t cbPreRead, cbPostRead;
3002
3003 /* Apply write filter chain here if it was not done already. */
3004 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_WRITE_FILTER_APPLIED))
3005 {
3006 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbWrite, pIoCtx);
3007 if (RT_FAILURE(rc))
3008 return rc;
3009 pIoCtx->fFlags |= VDIOCTX_FLAGS_WRITE_FILTER_APPLIED;
3010 }
3011
3012 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG))
3013 {
3014 rc = vdSetModifiedFlagAsync(pDisk, pIoCtx);
3015 if (RT_FAILURE(rc)) /* Includes I/O in progress. */
3016 return rc;
3017 }
3018
3019 rc = vdDiscardSetRangeAllocated(pDisk, uOffset, cbWrite);
3020 if (RT_FAILURE(rc))
3021 return rc;
3022
3023 /* Loop until all written. */
3024 do
3025 {
3026 /* Try to write the possibly partial block to the last opened image.
3027 * This works when the block is already allocated in this image or
3028 * if it is a full-block write (and allocation isn't suppressed below).
3029 * For image formats which don't support zero blocks, it's beneficial
3030 * to avoid unnecessarily allocating unchanged blocks. This prevents
3031 * unwanted expanding of images. VMDK is an example. */
3032 cbThisWrite = cbWrite;
3033
3034 /*
3035 * Check whether there is a full block write in progress which was not allocated.
3036 * Defer I/O if the range interferes.
3037 */
3038 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
3039 && uOffset >= pDisk->uOffsetStartLocked
3040 && uOffset < pDisk->uOffsetEndLocked)
3041 {
3042 Log(("Interferring write while allocating a new block => deferring write\n"));
3043 vdIoCtxDefer(pDisk, pIoCtx);
3044 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3045 break;
3046 }
3047
3048 fWrite = (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3049 ? 0 : VD_WRITE_NO_ALLOC;
3050 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset, cbThisWrite,
3051 pIoCtx, &cbThisWrite, &cbPreRead, &cbPostRead,
3052 fWrite);
3053 if (rc == VERR_VD_BLOCK_FREE)
3054 {
3055 /* Lock the disk .*/
3056 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3057 if (RT_SUCCESS(rc))
3058 {
3059 /*
3060 * Allocate segment and buffer in one go.
3061 * A bit hackish but avoids the need to allocate memory twice.
3062 */
3063 PRTSGBUF pTmp = (PRTSGBUF)RTMemAlloc(cbPreRead + cbThisWrite + cbPostRead + sizeof(RTSGSEG) + sizeof(RTSGBUF));
3064 AssertBreakStmt(pTmp, rc = VERR_NO_MEMORY);
3065 PRTSGSEG pSeg = (PRTSGSEG)(pTmp + 1);
3066
3067 pSeg->pvSeg = pSeg + 1;
3068 pSeg->cbSeg = cbPreRead + cbThisWrite + cbPostRead;
3069 RTSgBufInit(pTmp, pSeg, 1);
3070
3071 PVDIOCTX pIoCtxWrite = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_WRITE,
3072 uOffset, pSeg->cbSeg, pImage,
3073 pTmp,
3074 pIoCtx, cbThisWrite,
3075 cbWrite,
3076 pTmp,
3077 (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3078 ? vdWriteHelperStandardAsync
3079 : vdWriteHelperOptimizedAsync);
3080 if (!VALID_PTR(pIoCtxWrite))
3081 {
3082 RTMemTmpFree(pTmp);
3083 rc = VERR_NO_MEMORY;
3084 break;
3085 }
3086
3087 LogFlowFunc(("Disk is growing because of pIoCtx=%#p pIoCtxWrite=%#p\n",
3088 pIoCtx, pIoCtxWrite));
3089
3090 /* Save the current range for the growing operation to check for intersecting requests later. */
3091 pDisk->uOffsetStartLocked = uOffset - cbPreRead;
3092 pDisk->uOffsetEndLocked = uOffset + cbThisWrite + cbPostRead;
3093
3094 pIoCtxWrite->Type.Child.cbPreRead = cbPreRead;
3095 pIoCtxWrite->Type.Child.cbPostRead = cbPostRead;
3096 pIoCtxWrite->Req.Io.pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
3097
3098 /* Process the write request */
3099 rc = vdIoCtxProcessLocked(pIoCtxWrite);
3100
3101 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3102 {
3103 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3104 vdIoCtxFree(pDisk, pIoCtxWrite);
3105 break;
3106 }
3107 else if ( rc == VINF_VD_ASYNC_IO_FINISHED
3108 && ASMAtomicCmpXchgBool(&pIoCtxWrite->fComplete, true, false))
3109 {
3110 LogFlow(("Child write request completed\n"));
3111 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbThisWrite);
3112 Assert(cbThisWrite == (uint32_t)cbThisWrite);
3113 rc = pIoCtxWrite->rcReq;
3114 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisWrite);
3115 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3116 vdIoCtxFree(pDisk, pIoCtxWrite);
3117 }
3118 else
3119 {
3120 LogFlow(("Child write pending\n"));
3121 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
3122 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3123 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3124 cbWrite -= cbThisWrite;
3125 uOffset += cbThisWrite;
3126 break;
3127 }
3128 }
3129 else
3130 {
3131 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3132 break;
3133 }
3134 }
3135
3136 if (rc == VERR_VD_IOCTX_HALT)
3137 {
3138 cbWrite -= cbThisWrite;
3139 uOffset += cbThisWrite;
3140 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3141 break;
3142 }
3143 else if (rc == VERR_VD_NOT_ENOUGH_METADATA)
3144 break;
3145
3146 cbWrite -= cbThisWrite;
3147 uOffset += cbThisWrite;
3148 } while (cbWrite != 0 && (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
3149
3150 if ( rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3151 || rc == VERR_VD_NOT_ENOUGH_METADATA
3152 || rc == VERR_VD_IOCTX_HALT)
3153 {
3154 /*
3155 * Tell the caller that we don't need to go back here because all
3156 * writes are initiated.
3157 */
3158 if ( !cbWrite
3159 && rc != VERR_VD_IOCTX_HALT)
3160 rc = VINF_SUCCESS;
3161
3162 pIoCtx->Req.Io.uOffset = uOffset;
3163 pIoCtx->Req.Io.cbTransfer = cbWrite;
3164 }
3165
3166 return rc;
3167}
3168
3169/**
3170 * Flush helper async version.
3171 */
3172static DECLCALLBACK(int) vdFlushHelperAsync(PVDIOCTX pIoCtx)
3173{
3174 int rc = VINF_SUCCESS;
3175 PVBOXHDD pDisk = pIoCtx->pDisk;
3176 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
3177
3178 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3179 if (RT_SUCCESS(rc))
3180 {
3181 /* Mark the whole disk as locked. */
3182 pDisk->uOffsetStartLocked = 0;
3183 pDisk->uOffsetEndLocked = UINT64_C(0xffffffffffffffff);
3184
3185 vdResetModifiedFlag(pDisk);
3186 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
3187 if ( ( RT_SUCCESS(rc)
3188 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3189 || rc == VERR_VD_IOCTX_HALT)
3190 && pDisk->pCache)
3191 {
3192 rc = pDisk->pCache->Backend->pfnFlush(pDisk->pCache->pBackendData, pIoCtx);
3193 if ( RT_SUCCESS(rc)
3194 || ( rc != VERR_VD_ASYNC_IO_IN_PROGRESS
3195 && rc != VERR_VD_IOCTX_HALT))
3196 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3197 else if (rc != VERR_VD_IOCTX_HALT)
3198 rc = VINF_SUCCESS;
3199 }
3200 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3201 rc = VINF_SUCCESS;
3202 else if (rc != VERR_VD_IOCTX_HALT)/* Some other error. */
3203 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3204 }
3205
3206 return rc;
3207}
3208
3209/**
3210 * Async discard helper - discards a whole block which is recorded in the block
3211 * tree.
3212 *
3213 * @returns VBox status code.
3214 * @param pIoCtx The I/O context to operate on.
3215 */
3216static DECLCALLBACK(int) vdDiscardWholeBlockAsync(PVDIOCTX pIoCtx)
3217{
3218 int rc = VINF_SUCCESS;
3219 PVBOXHDD pDisk = pIoCtx->pDisk;
3220 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3221 PVDDISCARDBLOCK pBlock = pIoCtx->Req.Discard.pBlock;
3222 size_t cbPreAllocated, cbPostAllocated, cbActuallyDiscarded;
3223
3224 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3225
3226 AssertPtr(pBlock);
3227
3228 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3229 pBlock->Core.Key, pBlock->cbDiscard,
3230 &cbPreAllocated, &cbPostAllocated,
3231 &cbActuallyDiscarded, NULL, 0);
3232 Assert(rc != VERR_VD_DISCARD_ALIGNMENT_NOT_MET);
3233 Assert(!cbPreAllocated);
3234 Assert(!cbPostAllocated);
3235 Assert(cbActuallyDiscarded == pBlock->cbDiscard || RT_FAILURE(rc));
3236
3237 /* Remove the block on success. */
3238 if ( RT_SUCCESS(rc)
3239 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3240 {
3241 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3242 Assert(pBlockRemove == pBlock); RT_NOREF1(pBlockRemove);
3243
3244 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3245 RTListNodeRemove(&pBlock->NodeLru);
3246 RTMemFree(pBlock->pbmAllocated);
3247 RTMemFree(pBlock);
3248 pIoCtx->Req.Discard.pBlock = NULL;/* Safety precaution. */
3249 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3250 rc = VINF_SUCCESS;
3251 }
3252
3253 LogFlowFunc(("returns rc=%Rrc\n", rc));
3254 return rc;
3255}
3256
3257/**
3258 * Removes the least recently used blocks from the waiting list until
3259 * the new value is reached - version for async I/O.
3260 *
3261 * @returns VBox status code.
3262 * @param pDisk VD disk container.
3263 * @param pIoCtx The I/O context associated with this discard operation.
3264 * @param cbDiscardingNew How many bytes should be waiting on success.
3265 * The number of bytes waiting can be less.
3266 */
3267static int vdDiscardRemoveBlocksAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx, size_t cbDiscardingNew)
3268{
3269 int rc = VINF_SUCCESS;
3270 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3271
3272 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
3273 pDisk, pDiscard, cbDiscardingNew));
3274
3275 while (pDiscard->cbDiscarding > cbDiscardingNew)
3276 {
3277 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
3278
3279 Assert(!RTListIsEmpty(&pDiscard->ListLru));
3280
3281 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
3282 uint64_t offStart = pBlock->Core.Key;
3283 uint32_t idxStart = 0;
3284 size_t cbLeft = pBlock->cbDiscard;
3285 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
3286 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
3287
3288 while (cbLeft > 0)
3289 {
3290 int32_t idxEnd;
3291 size_t cbThis = cbLeft;
3292
3293 if (fAllocated)
3294 {
3295 /* Check for the first unallocated bit. */
3296 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
3297 if (idxEnd != -1)
3298 {
3299 cbThis = (idxEnd - idxStart) * 512;
3300 fAllocated = false;
3301 }
3302 }
3303 else
3304 {
3305 /* Mark as unused and check for the first set bit. */
3306 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
3307 if (idxEnd != -1)
3308 cbThis = (idxEnd - idxStart) * 512;
3309
3310 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3311 offStart, cbThis, NULL, NULL, &cbThis,
3312 NULL, VD_DISCARD_MARK_UNUSED);
3313 if ( RT_FAILURE(rc)
3314 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3315 break;
3316
3317 fAllocated = true;
3318 }
3319
3320 idxStart = idxEnd;
3321 offStart += cbThis;
3322 cbLeft -= cbThis;
3323 }
3324
3325 if ( RT_FAILURE(rc)
3326 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3327 break;
3328
3329 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3330 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
3331 RTListNodeRemove(&pBlock->NodeLru);
3332
3333 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3334 RTMemFree(pBlock->pbmAllocated);
3335 RTMemFree(pBlock);
3336 }
3337
3338 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3339 rc = VINF_SUCCESS;
3340
3341 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
3342
3343 LogFlowFunc(("returns rc=%Rrc\n", rc));
3344 return rc;
3345}
3346
3347/**
3348 * Async discard helper - discards the current range if there is no matching
3349 * block in the tree.
3350 *
3351 * @returns VBox status code.
3352 * @param pIoCtx The I/O context to operate on.
3353 */
3354static DECLCALLBACK(int) vdDiscardCurrentRangeAsync(PVDIOCTX pIoCtx)
3355{
3356 PVBOXHDD pDisk = pIoCtx->pDisk;
3357 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3358 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3359 size_t cbThisDiscard = pIoCtx->Req.Discard.cbThisDiscard;
3360 void *pbmAllocated = NULL;
3361 size_t cbPreAllocated, cbPostAllocated;
3362 int rc = VINF_SUCCESS;
3363
3364 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3365
3366 /* No block found, try to discard using the backend first. */
3367 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3368 offStart, cbThisDiscard, &cbPreAllocated,
3369 &cbPostAllocated, &cbThisDiscard,
3370 &pbmAllocated, 0);
3371 if (rc == VERR_VD_DISCARD_ALIGNMENT_NOT_MET)
3372 {
3373 /* Create new discard block. */
3374 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTMemAllocZ(sizeof(VDDISCARDBLOCK));
3375 if (pBlock)
3376 {
3377 pBlock->Core.Key = offStart - cbPreAllocated;
3378 pBlock->Core.KeyLast = offStart + cbThisDiscard + cbPostAllocated - 1;
3379 pBlock->cbDiscard = cbPreAllocated + cbThisDiscard + cbPostAllocated;
3380 pBlock->pbmAllocated = pbmAllocated;
3381 bool fInserted = RTAvlrU64Insert(pDiscard->pTreeBlocks, &pBlock->Core);
3382 Assert(fInserted); NOREF(fInserted);
3383
3384 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3385 pDiscard->cbDiscarding += pBlock->cbDiscard;
3386
3387 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3388 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3389 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3390 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3391
3392 if (pDiscard->cbDiscarding > VD_DISCARD_REMOVE_THRESHOLD)
3393 rc = vdDiscardRemoveBlocksAsync(pDisk, pIoCtx, VD_DISCARD_REMOVE_THRESHOLD);
3394 else
3395 rc = VINF_SUCCESS;
3396
3397 if (RT_SUCCESS(rc))
3398 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3399 }
3400 else
3401 {
3402 RTMemFree(pbmAllocated);
3403 rc = VERR_NO_MEMORY;
3404 }
3405 }
3406 else if ( RT_SUCCESS(rc)
3407 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS) /* Save state and andvance to next range. */
3408 {
3409 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3410 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3411 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3412 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3413 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3414 rc = VINF_SUCCESS;
3415 }
3416
3417 LogFlowFunc(("returns rc=%Rrc\n", rc));
3418 return rc;
3419}
3420
3421/**
3422 * Async discard helper - entry point.
3423 *
3424 * @returns VBox status code.
3425 * @param pIoCtx The I/O context to operate on.
3426 */
3427static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx)
3428{
3429 int rc = VINF_SUCCESS;
3430 PVBOXHDD pDisk = pIoCtx->pDisk;
3431 PCRTRANGE paRanges = pIoCtx->Req.Discard.paRanges;
3432 unsigned cRanges = pIoCtx->Req.Discard.cRanges;
3433 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3434
3435 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3436
3437 /* Check if the I/O context processed all ranges. */
3438 if ( pIoCtx->Req.Discard.idxRange == cRanges
3439 && !pIoCtx->Req.Discard.cbDiscardLeft)
3440 {
3441 LogFlowFunc(("All ranges discarded, completing\n"));
3442 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDeferredReqs*/);
3443 return VINF_SUCCESS;
3444 }
3445
3446 if (pDisk->pIoCtxLockOwner != pIoCtx)
3447 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3448
3449 if (RT_SUCCESS(rc))
3450 {
3451 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3452 size_t cbDiscardLeft = pIoCtx->Req.Discard.cbDiscardLeft;
3453 size_t cbThisDiscard;
3454
3455 pDisk->uOffsetStartLocked = offStart;
3456 pDisk->uOffsetEndLocked = offStart + cbDiscardLeft;
3457
3458 if (RT_UNLIKELY(!pDiscard))
3459 {
3460 pDiscard = vdDiscardStateCreate();
3461 if (!pDiscard)
3462 return VERR_NO_MEMORY;
3463
3464 pDisk->pDiscard = pDiscard;
3465 }
3466
3467 if (!pIoCtx->Req.Discard.cbDiscardLeft)
3468 {
3469 offStart = paRanges[pIoCtx->Req.Discard.idxRange].offStart;
3470 cbDiscardLeft = paRanges[pIoCtx->Req.Discard.idxRange].cbRange;
3471 LogFlowFunc(("New range descriptor loaded (%u) offStart=%llu cbDiscard=%zu\n",
3472 pIoCtx->Req.Discard.idxRange, offStart, cbDiscardLeft));
3473 pIoCtx->Req.Discard.idxRange++;
3474 }
3475
3476 /* Look for a matching block in the AVL tree first. */
3477 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, false);
3478 if (!pBlock || pBlock->Core.KeyLast < offStart)
3479 {
3480 PVDDISCARDBLOCK pBlockAbove = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, true);
3481
3482 /* Clip range to remain in the current block. */
3483 if (pBlockAbove)
3484 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlockAbove->Core.KeyLast - offStart + 1);
3485 else
3486 cbThisDiscard = cbDiscardLeft;
3487
3488 Assert(!(cbThisDiscard % 512));
3489 pIoCtx->Req.Discard.pBlock = NULL;
3490 pIoCtx->pfnIoCtxTransferNext = vdDiscardCurrentRangeAsync;
3491 }
3492 else
3493 {
3494 /* Range lies partly in the block, update allocation bitmap. */
3495 int32_t idxStart, idxEnd;
3496
3497 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlock->Core.KeyLast - offStart + 1);
3498
3499 AssertPtr(pBlock);
3500
3501 Assert(!(cbThisDiscard % 512));
3502 Assert(!((offStart - pBlock->Core.Key) % 512));
3503
3504 idxStart = (offStart - pBlock->Core.Key) / 512;
3505 idxEnd = idxStart + (int32_t)(cbThisDiscard / 512);
3506
3507 ASMBitClearRange(pBlock->pbmAllocated, idxStart, idxEnd);
3508
3509 cbDiscardLeft -= cbThisDiscard;
3510 offStart += cbThisDiscard;
3511
3512 /* Call the backend to discard the block if it is completely unallocated now. */
3513 if (ASMBitFirstSet((volatile void *)pBlock->pbmAllocated, (uint32_t)(pBlock->cbDiscard / 512)) == -1)
3514 {
3515 pIoCtx->Req.Discard.pBlock = pBlock;
3516 pIoCtx->pfnIoCtxTransferNext = vdDiscardWholeBlockAsync;
3517 rc = VINF_SUCCESS;
3518 }
3519 else
3520 {
3521 RTListNodeRemove(&pBlock->NodeLru);
3522 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3523
3524 /* Start with next range. */
3525 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3526 rc = VINF_SUCCESS;
3527 }
3528 }
3529
3530 /* Save state in the context. */
3531 pIoCtx->Req.Discard.offCur = offStart;
3532 pIoCtx->Req.Discard.cbDiscardLeft = cbDiscardLeft;
3533 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3534 }
3535
3536 LogFlowFunc(("returns rc=%Rrc\n", rc));
3537 return rc;
3538}
3539
3540#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3541
3542/**
3543 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterImage}
3544 */
3545static DECLCALLBACK(int) vdPluginRegisterImage(void *pvUser, PCVDIMAGEBACKEND pBackend)
3546{
3547 int rc = VINF_SUCCESS;
3548
3549 if (VD_VERSION_ARE_COMPATIBLE(VD_IMGBACKEND_VERSION, pBackend->u32Version))
3550 vdAddBackend((RTLDRMOD)pvUser, pBackend);
3551 else
3552 {
3553 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3554 rc = VERR_IGNORED;
3555 }
3556
3557 return rc;
3558}
3559
3560/**
3561 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterCache}
3562 */
3563static DECLCALLBACK(int) vdPluginRegisterCache(void *pvUser, PCVDCACHEBACKEND pBackend)
3564{
3565 int rc = VINF_SUCCESS;
3566
3567 if (VD_VERSION_ARE_COMPATIBLE(VD_CACHEBACKEND_VERSION, pBackend->u32Version))
3568 vdAddCacheBackend((RTLDRMOD)pvUser, pBackend);
3569 else
3570 {
3571 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3572 rc = VERR_IGNORED;
3573 }
3574
3575 return rc;
3576}
3577
3578/**
3579 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterFilter}
3580 */
3581static DECLCALLBACK(int) vdPluginRegisterFilter(void *pvUser, PCVDFILTERBACKEND pBackend)
3582{
3583 int rc = VINF_SUCCESS;
3584
3585 if (VD_VERSION_ARE_COMPATIBLE(VD_FLTBACKEND_VERSION, pBackend->u32Version))
3586 vdAddFilterBackend((RTLDRMOD)pvUser, pBackend);
3587 else
3588 {
3589 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3590 rc = VERR_IGNORED;
3591 }
3592
3593 return rc;
3594}
3595
3596/**
3597 * Checks whether the given plugin filename was already loaded.
3598 *
3599 * @returns Pointer to already loaded plugin, NULL if not found.
3600 * @param pszFilename The filename to check.
3601 */
3602static PVDPLUGIN vdPluginFind(const char *pszFilename)
3603{
3604 PVDPLUGIN pIt;
3605 RTListForEach(&g_ListPluginsLoaded, pIt, VDPLUGIN, NodePlugin)
3606 {
3607 if (!RTStrCmp(pIt->pszFilename, pszFilename))
3608 return pIt;
3609 }
3610
3611 return NULL;
3612}
3613
3614/**
3615 * Adds a plugin to the list of loaded plugins.
3616 *
3617 * @returns VBox status code.
3618 * @param hPlugin Plugin handle to add.
3619 * @param pszFilename The associated filename, used for finding duplicates.
3620 */
3621static int vdAddPlugin(RTLDRMOD hPlugin, const char *pszFilename)
3622{
3623 int rc = VINF_SUCCESS;
3624 PVDPLUGIN pPlugin = (PVDPLUGIN)RTMemAllocZ(sizeof(VDPLUGIN));
3625
3626 if (pPlugin)
3627 {
3628 pPlugin->hPlugin = hPlugin;
3629 pPlugin->pszFilename = RTStrDup(pszFilename);
3630 if (pPlugin->pszFilename)
3631 RTListAppend(&g_ListPluginsLoaded, &pPlugin->NodePlugin);
3632 else
3633 {
3634 RTMemFree(pPlugin);
3635 rc = VERR_NO_MEMORY;
3636 }
3637 }
3638 else
3639 rc = VERR_NO_MEMORY;
3640
3641 return rc;
3642}
3643
3644static int vdRemovePlugin(const char *pszFilename)
3645{
3646 /* Find plugin to be removed from the list. */
3647 PVDPLUGIN pIt = vdPluginFind(pszFilename);
3648 if (!pIt)
3649 return VINF_SUCCESS;
3650
3651 /** @todo r=klaus: need to add a plugin entry point for unregistering the
3652 * backends. Only if this doesn't exist (or fails to work) we should fall
3653 * back to the following uncoordinated backend cleanup. */
3654 for (unsigned i = 0; i < g_cBackends; i++)
3655 {
3656 while (i < g_cBackends && g_ahBackendPlugins[i] == pIt->hPlugin)
3657 {
3658 memcpy(&g_apBackends[i], &g_apBackends[i + 1], (g_cBackends - i - 1) * sizeof(PCVDIMAGEBACKEND));
3659 memcpy(&g_ahBackendPlugins[i], &g_ahBackendPlugins[i + 1], (g_cBackends - i - 1) * sizeof(RTLDRMOD));
3660 /** @todo for now skip reallocating, doesn't save much */
3661 g_cBackends--;
3662 }
3663 }
3664 for (unsigned i = 0; i < g_cCacheBackends; i++)
3665 {
3666 while (i < g_cCacheBackends && g_ahCacheBackendPlugins[i] == pIt->hPlugin)
3667 {
3668 memcpy(&g_apCacheBackends[i], &g_apCacheBackends[i + 1], (g_cCacheBackends - i - 1) * sizeof(PCVDCACHEBACKEND));
3669 memcpy(&g_ahCacheBackendPlugins[i], &g_ahCacheBackendPlugins[i + 1], (g_cCacheBackends - i - 1) * sizeof(RTLDRMOD));
3670 /** @todo for now skip reallocating, doesn't save much */
3671 g_cCacheBackends--;
3672 }
3673 }
3674 for (unsigned i = 0; i < g_cFilterBackends; i++)
3675 {
3676 while (i < g_cFilterBackends && g_pahFilterBackendPlugins[i] == pIt->hPlugin)
3677 {
3678 memcpy(&g_apFilterBackends[i], &g_apFilterBackends[i + 1], (g_cFilterBackends - i - 1) * sizeof(PCVDFILTERBACKEND));
3679 memcpy(&g_pahFilterBackendPlugins[i], &g_pahFilterBackendPlugins[i + 1], (g_cFilterBackends - i - 1) * sizeof(RTLDRMOD));
3680 /** @todo for now skip reallocating, doesn't save much */
3681 g_cFilterBackends--;
3682 }
3683 }
3684
3685 /* Remove the plugin node now, all traces of it are gone. */
3686 RTListNodeRemove(&pIt->NodePlugin);
3687 RTLdrClose(pIt->hPlugin);
3688 RTStrFree(pIt->pszFilename);
3689 RTMemFree(pIt);
3690
3691 return VINF_SUCCESS;
3692}
3693
3694#endif /* !VBOX_HDD_NO_DYNAMIC_BACKENDS */
3695
3696/**
3697 * Worker for VDPluginLoadFromFilename() and vdPluginLoadFromPath().
3698 *
3699 * @returns VBox status code.
3700 * @param pszFilename The plugin filename to load.
3701 */
3702static int vdPluginLoadFromFilename(const char *pszFilename)
3703{
3704#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3705 /* Plugin loaded? Nothing to do. */
3706 if (vdPluginFind(pszFilename))
3707 return VINF_SUCCESS;
3708
3709 RTLDRMOD hPlugin = NIL_RTLDRMOD;
3710 int rc = SUPR3HardenedLdrLoadPlugIn(pszFilename, &hPlugin, NULL);
3711 if (RT_SUCCESS(rc))
3712 {
3713 VDBACKENDREGISTER BackendRegister;
3714 PFNVDPLUGINLOAD pfnVDPluginLoad = NULL;
3715
3716 BackendRegister.u32Version = VD_BACKENDREG_CB_VERSION;
3717 BackendRegister.pfnRegisterImage = vdPluginRegisterImage;
3718 BackendRegister.pfnRegisterCache = vdPluginRegisterCache;
3719 BackendRegister.pfnRegisterFilter = vdPluginRegisterFilter;
3720
3721 rc = RTLdrGetSymbol(hPlugin, VD_PLUGIN_LOAD_NAME, (void**)&pfnVDPluginLoad);
3722 if (RT_FAILURE(rc) || !pfnVDPluginLoad)
3723 {
3724 LogFunc(("error resolving the entry point %s in plugin %s, rc=%Rrc, pfnVDPluginLoad=%#p\n",
3725 VD_PLUGIN_LOAD_NAME, pszFilename, rc, pfnVDPluginLoad));
3726 if (RT_SUCCESS(rc))
3727 rc = VERR_SYMBOL_NOT_FOUND;
3728 }
3729
3730 if (RT_SUCCESS(rc))
3731 {
3732 /* Get the function table. */
3733 rc = pfnVDPluginLoad(hPlugin, &BackendRegister);
3734 }
3735 else
3736 LogFunc(("ignored plugin '%s': rc=%Rrc\n", pszFilename, rc));
3737
3738 /* Create a plugin entry on success. */
3739 if (RT_SUCCESS(rc))
3740 vdAddPlugin(hPlugin, pszFilename);
3741 else
3742 RTLdrClose(hPlugin);
3743 }
3744
3745 return rc;
3746#else
3747 RT_NOREF1(pszFilename);
3748 return VERR_NOT_IMPLEMENTED;
3749#endif
3750}
3751
3752/**
3753 * Worker for VDPluginLoadFromPath() and vdLoadDynamicBackends().
3754 *
3755 * @returns VBox status code.
3756 * @param pszPath The path to load plugins from.
3757 */
3758static int vdPluginLoadFromPath(const char *pszPath)
3759{
3760#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3761 /* To get all entries with VBoxHDD as prefix. */
3762 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3763 if (!pszPluginFilter)
3764 return VERR_NO_STR_MEMORY;
3765
3766 PRTDIRENTRYEX pPluginDirEntry = NULL;
3767 PRTDIR pPluginDir = NULL;
3768 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3769 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3770 if (RT_SUCCESS(rc))
3771 {
3772 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3773 if (pPluginDirEntry)
3774 {
3775 while ( (rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK))
3776 != VERR_NO_MORE_FILES)
3777 {
3778 char *pszPluginPath = NULL;
3779
3780 if (rc == VERR_BUFFER_OVERFLOW)
3781 {
3782 /* allocate new buffer. */
3783 RTMemFree(pPluginDirEntry);
3784 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3785 if (!pPluginDirEntry)
3786 {
3787 rc = VERR_NO_MEMORY;
3788 break;
3789 }
3790 /* Retry. */
3791 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3792 if (RT_FAILURE(rc))
3793 break;
3794 }
3795 else if (RT_FAILURE(rc))
3796 break;
3797
3798 /* We got the new entry. */
3799 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3800 continue;
3801
3802 /* Prepend the path to the libraries. */
3803 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3804 if (!pszPluginPath)
3805 {
3806 rc = VERR_NO_STR_MEMORY;
3807 break;
3808 }
3809
3810 rc = vdPluginLoadFromFilename(pszPluginPath);
3811 RTStrFree(pszPluginPath);
3812 }
3813
3814 RTMemFree(pPluginDirEntry);
3815 }
3816 else
3817 rc = VERR_NO_MEMORY;
3818
3819 RTDirClose(pPluginDir);
3820 }
3821 else
3822 {
3823 /* On Windows the above immediately signals that there are no
3824 * files matching, while on other platforms enumerating the
3825 * files below fails. Either way: no plugins. */
3826 }
3827
3828 if (rc == VERR_NO_MORE_FILES)
3829 rc = VINF_SUCCESS;
3830 RTStrFree(pszPluginFilter);
3831 return rc;
3832#else
3833 RT_NOREF1(pszPath);
3834 return VERR_NOT_IMPLEMENTED;
3835#endif
3836}
3837
3838/**
3839 * internal: scans plugin directory and loads found plugins.
3840 */
3841static int vdLoadDynamicBackends(void)
3842{
3843#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3844 /*
3845 * Enumerate plugin backends from the application directory where the other
3846 * shared libraries are.
3847 */
3848 char szPath[RTPATH_MAX];
3849 int rc = RTPathAppPrivateArch(szPath, sizeof(szPath));
3850 if (RT_FAILURE(rc))
3851 return rc;
3852
3853 return vdPluginLoadFromPath(szPath);
3854#else
3855 return VINF_SUCCESS;
3856#endif
3857}
3858
3859/**
3860 * Worker for VDPluginUnloadFromFilename() and vdPluginUnloadFromPath().
3861 *
3862 * @returns VBox status code.
3863 * @param pszFilename The plugin filename to unload.
3864 */
3865static int vdPluginUnloadFromFilename(const char *pszFilename)
3866{
3867#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3868 return vdRemovePlugin(pszFilename);
3869#else
3870 RT_NOREF1(pszFilename);
3871 return VERR_NOT_IMPLEMENTED;
3872#endif
3873}
3874
3875/**
3876 * Worker for VDPluginUnloadFromPath().
3877 *
3878 * @returns VBox status code.
3879 * @param pszPath The path to unload plugins from.
3880 */
3881static int vdPluginUnloadFromPath(const char *pszPath)
3882{
3883#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3884 /* To get all entries with VBoxHDD as prefix. */
3885 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3886 if (!pszPluginFilter)
3887 return VERR_NO_STR_MEMORY;
3888
3889 PRTDIRENTRYEX pPluginDirEntry = NULL;
3890 PRTDIR pPluginDir = NULL;
3891 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3892 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3893 if (RT_SUCCESS(rc))
3894 {
3895 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3896 if (pPluginDirEntry)
3897 {
3898 while ((rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK)) != VERR_NO_MORE_FILES)
3899 {
3900 char *pszPluginPath = NULL;
3901
3902 if (rc == VERR_BUFFER_OVERFLOW)
3903 {
3904 /* allocate new buffer. */
3905 RTMemFree(pPluginDirEntry);
3906 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3907 if (!pPluginDirEntry)
3908 {
3909 rc = VERR_NO_MEMORY;
3910 break;
3911 }
3912 /* Retry. */
3913 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3914 if (RT_FAILURE(rc))
3915 break;
3916 }
3917 else if (RT_FAILURE(rc))
3918 break;
3919
3920 /* We got the new entry. */
3921 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3922 continue;
3923
3924 /* Prepend the path to the libraries. */
3925 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3926 if (!pszPluginPath)
3927 {
3928 rc = VERR_NO_STR_MEMORY;
3929 break;
3930 }
3931
3932 rc = vdPluginUnloadFromFilename(pszPluginPath);
3933 RTStrFree(pszPluginPath);
3934 }
3935
3936 RTMemFree(pPluginDirEntry);
3937 }
3938 else
3939 rc = VERR_NO_MEMORY;
3940
3941 RTDirClose(pPluginDir);
3942 }
3943 else
3944 {
3945 /* On Windows the above immediately signals that there are no
3946 * files matching, while on other platforms enumerating the
3947 * files below fails. Either way: no plugins. */
3948 }
3949
3950 if (rc == VERR_NO_MORE_FILES)
3951 rc = VINF_SUCCESS;
3952 RTStrFree(pszPluginFilter);
3953 return rc;
3954#else
3955 RT_NOREF1(pszPath);
3956 return VERR_NOT_IMPLEMENTED;
3957#endif
3958}
3959
3960/**
3961 * VD async I/O interface open callback.
3962 */
3963static DECLCALLBACK(int) vdIOOpenFallback(void *pvUser, const char *pszLocation,
3964 uint32_t fOpen, PFNVDCOMPLETED pfnCompleted,
3965 void **ppStorage)
3966{
3967 RT_NOREF1(pvUser);
3968 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)RTMemAllocZ(sizeof(VDIIOFALLBACKSTORAGE));
3969
3970 if (!pStorage)
3971 return VERR_NO_MEMORY;
3972
3973 pStorage->pfnCompleted = pfnCompleted;
3974
3975 /* Open the file. */
3976 int rc = RTFileOpen(&pStorage->File, pszLocation, fOpen);
3977 if (RT_SUCCESS(rc))
3978 {
3979 *ppStorage = pStorage;
3980 return VINF_SUCCESS;
3981 }
3982
3983 RTMemFree(pStorage);
3984 return rc;
3985}
3986
3987/**
3988 * VD async I/O interface close callback.
3989 */
3990static DECLCALLBACK(int) vdIOCloseFallback(void *pvUser, void *pvStorage)
3991{
3992 RT_NOREF1(pvUser);
3993 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
3994
3995 RTFileClose(pStorage->File);
3996 RTMemFree(pStorage);
3997 return VINF_SUCCESS;
3998}
3999
4000static DECLCALLBACK(int) vdIODeleteFallback(void *pvUser, const char *pcszFilename)
4001{
4002 RT_NOREF1(pvUser);
4003 return RTFileDelete(pcszFilename);
4004}
4005
4006static DECLCALLBACK(int) vdIOMoveFallback(void *pvUser, const char *pcszSrc, const char *pcszDst, unsigned fMove)
4007{
4008 RT_NOREF1(pvUser);
4009 return RTFileMove(pcszSrc, pcszDst, fMove);
4010}
4011
4012static DECLCALLBACK(int) vdIOGetFreeSpaceFallback(void *pvUser, const char *pcszFilename, int64_t *pcbFreeSpace)
4013{
4014 RT_NOREF1(pvUser);
4015 return RTFsQuerySizes(pcszFilename, NULL, pcbFreeSpace, NULL, NULL);
4016}
4017
4018static DECLCALLBACK(int) vdIOGetModificationTimeFallback(void *pvUser, const char *pcszFilename, PRTTIMESPEC pModificationTime)
4019{
4020 RT_NOREF1(pvUser);
4021 RTFSOBJINFO info;
4022 int rc = RTPathQueryInfo(pcszFilename, &info, RTFSOBJATTRADD_NOTHING);
4023 if (RT_SUCCESS(rc))
4024 *pModificationTime = info.ModificationTime;
4025 return rc;
4026}
4027
4028/**
4029 * VD async I/O interface callback for retrieving the file size.
4030 */
4031static DECLCALLBACK(int) vdIOGetSizeFallback(void *pvUser, void *pvStorage, uint64_t *pcbSize)
4032{
4033 RT_NOREF1(pvUser);
4034 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4035
4036 return RTFileGetSize(pStorage->File, pcbSize);
4037}
4038
4039/**
4040 * VD async I/O interface callback for setting the file size.
4041 */
4042static DECLCALLBACK(int) vdIOSetSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize)
4043{
4044 RT_NOREF1(pvUser);
4045 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4046
4047 return RTFileSetSize(pStorage->File, cbSize);
4048}
4049
4050/**
4051 * VD async I/O interface callback for setting the file allocation size.
4052 */
4053static DECLCALLBACK(int) vdIOSetAllocationSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize,
4054 uint32_t fFlags)
4055{
4056 RT_NOREF2(pvUser, fFlags);
4057 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4058
4059 return RTFileSetAllocationSize(pStorage->File, cbSize, RTFILE_ALLOC_SIZE_F_DEFAULT);
4060}
4061
4062/**
4063 * VD async I/O interface callback for a synchronous write to the file.
4064 */
4065static DECLCALLBACK(int) vdIOWriteSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4066 const void *pvBuf, size_t cbWrite, size_t *pcbWritten)
4067{
4068 RT_NOREF1(pvUser);
4069 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4070
4071 return RTFileWriteAt(pStorage->File, uOffset, pvBuf, cbWrite, pcbWritten);
4072}
4073
4074/**
4075 * VD async I/O interface callback for a synchronous read from the file.
4076 */
4077static DECLCALLBACK(int) vdIOReadSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4078 void *pvBuf, size_t cbRead, size_t *pcbRead)
4079{
4080 RT_NOREF1(pvUser);
4081 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4082
4083 return RTFileReadAt(pStorage->File, uOffset, pvBuf, cbRead, pcbRead);
4084}
4085
4086/**
4087 * VD async I/O interface callback for a synchronous flush of the file data.
4088 */
4089static DECLCALLBACK(int) vdIOFlushSyncFallback(void *pvUser, void *pvStorage)
4090{
4091 RT_NOREF1(pvUser);
4092 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4093
4094 return RTFileFlush(pStorage->File);
4095}
4096
4097/**
4098 * VD async I/O interface callback for a asynchronous read from the file.
4099 */
4100static DECLCALLBACK(int) vdIOReadAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4101 PCRTSGSEG paSegments, size_t cSegments,
4102 size_t cbRead, void *pvCompletion,
4103 void **ppTask)
4104{
4105 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbRead, pvCompletion, ppTask);
4106 return VERR_NOT_IMPLEMENTED;
4107}
4108
4109/**
4110 * VD async I/O interface callback for a asynchronous write to the file.
4111 */
4112static DECLCALLBACK(int) vdIOWriteAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4113 PCRTSGSEG paSegments, size_t cSegments,
4114 size_t cbWrite, void *pvCompletion,
4115 void **ppTask)
4116{
4117 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbWrite, pvCompletion, ppTask);
4118 return VERR_NOT_IMPLEMENTED;
4119}
4120
4121/**
4122 * VD async I/O interface callback for a asynchronous flush of the file data.
4123 */
4124static DECLCALLBACK(int) vdIOFlushAsyncFallback(void *pvUser, void *pStorage,
4125 void *pvCompletion, void **ppTask)
4126{
4127 RT_NOREF4(pvUser, pStorage, pvCompletion, ppTask);
4128 return VERR_NOT_IMPLEMENTED;
4129}
4130
4131/**
4132 * Internal - Continues an I/O context after
4133 * it was halted because of an active transfer.
4134 */
4135static int vdIoCtxContinue(PVDIOCTX pIoCtx, int rcReq)
4136{
4137 PVBOXHDD pDisk = pIoCtx->pDisk;
4138 int rc = VINF_SUCCESS;
4139
4140 VD_IS_LOCKED(pDisk);
4141
4142 if (RT_FAILURE(rcReq))
4143 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
4144
4145 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
4146 {
4147 /* Continue the transfer */
4148 rc = vdIoCtxProcessLocked(pIoCtx);
4149
4150 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4151 && ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
4152 {
4153 LogFlowFunc(("I/O context completed pIoCtx=%#p\n", pIoCtx));
4154 if (pIoCtx->pIoCtxParent)
4155 {
4156 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
4157
4158 Assert(!pIoCtxParent->pIoCtxParent);
4159 if (RT_FAILURE(pIoCtx->rcReq))
4160 ASMAtomicCmpXchgS32(&pIoCtxParent->rcReq, pIoCtx->rcReq, VINF_SUCCESS);
4161
4162 ASMAtomicDecU32(&pIoCtxParent->cDataTransfersPending);
4163
4164 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE)
4165 {
4166 LogFlowFunc(("I/O context transferred %u bytes for the parent pIoCtxParent=%p\n",
4167 pIoCtx->Type.Child.cbTransferParent, pIoCtxParent));
4168
4169 /* Update the parent state. */
4170 Assert(pIoCtxParent->Req.Io.cbTransferLeft >= pIoCtx->Type.Child.cbTransferParent);
4171 ASMAtomicSubU32(&pIoCtxParent->Req.Io.cbTransferLeft, (uint32_t)pIoCtx->Type.Child.cbTransferParent);
4172 }
4173 else
4174 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH);
4175
4176 /*
4177 * A completed child write means that we finished growing the image.
4178 * We have to process any pending writes now.
4179 */
4180 vdIoCtxUnlockDisk(pDisk, pIoCtxParent, false /* fProcessDeferredReqs */);
4181
4182 /* Unblock the parent */
4183 pIoCtxParent->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4184
4185 rc = vdIoCtxProcessLocked(pIoCtxParent);
4186
4187 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4188 && ASMAtomicCmpXchgBool(&pIoCtxParent->fComplete, true, false))
4189 {
4190 LogFlowFunc(("Parent I/O context completed pIoCtxParent=%#p rcReq=%Rrc\n", pIoCtxParent, pIoCtxParent->rcReq));
4191 vdIoCtxRootComplete(pDisk, pIoCtxParent);
4192 vdThreadFinishWrite(pDisk);
4193 vdIoCtxFree(pDisk, pIoCtxParent);
4194 vdDiskProcessBlockedIoCtx(pDisk);
4195 }
4196 else if (!vdIoCtxIsDiskLockOwner(pDisk, pIoCtx))
4197 {
4198 /* Process any pending writes if the current request didn't caused another growing. */
4199 vdDiskProcessBlockedIoCtx(pDisk);
4200 }
4201 }
4202 else
4203 {
4204 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH)
4205 {
4206 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDerredReqs */);
4207 vdThreadFinishWrite(pDisk);
4208 }
4209 else if ( pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE
4210 || pIoCtx->enmTxDir == VDIOCTXTXDIR_DISCARD)
4211 vdThreadFinishWrite(pDisk);
4212 else
4213 {
4214 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_READ);
4215 vdThreadFinishRead(pDisk);
4216 }
4217
4218 LogFlowFunc(("I/O context completed pIoCtx=%#p rcReq=%Rrc\n", pIoCtx, pIoCtx->rcReq));
4219 vdIoCtxRootComplete(pDisk, pIoCtx);
4220 }
4221
4222 vdIoCtxFree(pDisk, pIoCtx);
4223 }
4224 }
4225
4226 return VINF_SUCCESS;
4227}
4228
4229/**
4230 * Internal - Called when user transfer completed.
4231 */
4232static int vdUserXferCompleted(PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
4233 PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4234 size_t cbTransfer, int rcReq)
4235{
4236 int rc = VINF_SUCCESS;
4237 PVBOXHDD pDisk = pIoCtx->pDisk;
4238
4239 LogFlowFunc(("pIoStorage=%#p pIoCtx=%#p pfnComplete=%#p pvUser=%#p cbTransfer=%zu rcReq=%Rrc\n",
4240 pIoStorage, pIoCtx, pfnComplete, pvUser, cbTransfer, rcReq));
4241
4242 VD_IS_LOCKED(pDisk);
4243
4244 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbTransfer);
4245 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTransfer); Assert(cbTransfer == (uint32_t)cbTransfer);
4246 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4247
4248 if (pfnComplete)
4249 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4250
4251 if (RT_SUCCESS(rc))
4252 rc = vdIoCtxContinue(pIoCtx, rcReq);
4253 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4254 rc = VINF_SUCCESS;
4255
4256 return rc;
4257}
4258
4259static void vdIoCtxContinueDeferredList(PVDIOSTORAGE pIoStorage, PRTLISTANCHOR pListWaiting,
4260 PFNVDXFERCOMPLETED pfnComplete, void *pvUser, int rcReq)
4261{
4262 LogFlowFunc(("pIoStorage=%#p pListWaiting=%#p pfnComplete=%#p pvUser=%#p rcReq=%Rrc\n",
4263 pIoStorage, pListWaiting, pfnComplete, pvUser, rcReq));
4264
4265 /* Go through the waiting list and continue the I/O contexts. */
4266 while (!RTListIsEmpty(pListWaiting))
4267 {
4268 int rc = VINF_SUCCESS;
4269 PVDIOCTXDEFERRED pDeferred = RTListGetFirst(pListWaiting, VDIOCTXDEFERRED, NodeDeferred);
4270 PVDIOCTX pIoCtx = pDeferred->pIoCtx;
4271 RTListNodeRemove(&pDeferred->NodeDeferred);
4272
4273 RTMemFree(pDeferred);
4274 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
4275
4276 if (pfnComplete)
4277 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4278
4279 LogFlow(("Completion callback for I/O context %#p returned %Rrc\n", pIoCtx, rc));
4280
4281 if (RT_SUCCESS(rc))
4282 {
4283 rc = vdIoCtxContinue(pIoCtx, rcReq);
4284 AssertRC(rc);
4285 }
4286 else
4287 Assert(rc == VERR_VD_ASYNC_IO_IN_PROGRESS);
4288 }
4289}
4290
4291/**
4292 * Internal - Called when a meta transfer completed.
4293 */
4294static int vdMetaXferCompleted(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4295 PVDMETAXFER pMetaXfer, int rcReq)
4296{
4297 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4298 RTLISTNODE ListIoCtxWaiting;
4299 bool fFlush;
4300
4301 LogFlowFunc(("pIoStorage=%#p pfnComplete=%#p pvUser=%#p pMetaXfer=%#p rcReq=%Rrc\n",
4302 pIoStorage, pfnComplete, pvUser, pMetaXfer, rcReq));
4303
4304 VD_IS_LOCKED(pDisk);
4305
4306 fFlush = VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_FLUSH;
4307
4308 if (!fFlush)
4309 {
4310 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4311
4312 if (RT_FAILURE(rcReq))
4313 {
4314 /* Remove from the AVL tree. */
4315 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4316 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4317 Assert(fRemoved); NOREF(fRemoved);
4318 /* If this was a write check if there is a shadow buffer with updated data. */
4319 if (pMetaXfer->pbDataShw)
4320 {
4321 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
4322 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4323 RTListConcatenate(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4324 RTMemFree(pMetaXfer->pbDataShw);
4325 pMetaXfer->pbDataShw = NULL;
4326 }
4327 RTMemFree(pMetaXfer);
4328 }
4329 else
4330 {
4331 /* Increase the reference counter to make sure it doesn't go away before the last context is processed. */
4332 pMetaXfer->cRefs++;
4333 }
4334 }
4335 else
4336 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4337
4338 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4339 vdIoCtxContinueDeferredList(pIoStorage, &ListIoCtxWaiting, pfnComplete, pvUser, rcReq);
4340
4341 /*
4342 * If there is a shadow buffer and the previous write was successful update with the
4343 * new data and trigger a new write.
4344 */
4345 if ( pMetaXfer->pbDataShw
4346 && RT_SUCCESS(rcReq)
4347 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
4348 {
4349 LogFlowFunc(("pMetaXfer=%#p Updating from shadow buffer and triggering new write\n", pMetaXfer));
4350 memcpy(pMetaXfer->abData, pMetaXfer->pbDataShw, pMetaXfer->cbMeta);
4351 RTMemFree(pMetaXfer->pbDataShw);
4352 pMetaXfer->pbDataShw = NULL;
4353 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4354
4355 /* Setup a new I/O write. */
4356 PVDIOTASK pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
4357 if (RT_LIKELY(pIoTask))
4358 {
4359 void *pvTask = NULL;
4360 RTSGSEG Seg;
4361
4362 Seg.cbSeg = pMetaXfer->cbMeta;
4363 Seg.pvSeg = pMetaXfer->abData;
4364
4365 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
4366 rcReq = pIoStorage->pVDIo->pInterfaceIo->pfnWriteAsync(pIoStorage->pVDIo->pInterfaceIo->Core.pvUser,
4367 pIoStorage->pStorage,
4368 pMetaXfer->Core.Key, &Seg, 1,
4369 pMetaXfer->cbMeta, pIoTask,
4370 &pvTask);
4371 if ( RT_SUCCESS(rcReq)
4372 || rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4373 {
4374 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4375 vdIoTaskFree(pDisk, pIoTask);
4376 }
4377 else
4378 RTListMove(&pMetaXfer->ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4379 }
4380 else
4381 rcReq = VERR_NO_MEMORY;
4382
4383 /* Cleanup if there was an error or the request completed already. */
4384 if (rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4385 vdIoCtxContinueDeferredList(pIoStorage, &pMetaXfer->ListIoCtxShwWrites, pfnComplete, pvUser, rcReq);
4386 }
4387
4388 /* Remove if not used anymore. */
4389 if (!fFlush)
4390 {
4391 pMetaXfer->cRefs--;
4392 if (!pMetaXfer->cRefs && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting))
4393 {
4394 /* Remove from the AVL tree. */
4395 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4396 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4397 Assert(fRemoved); NOREF(fRemoved);
4398 RTMemFree(pMetaXfer);
4399 }
4400 }
4401 else if (fFlush)
4402 RTMemFree(pMetaXfer);
4403
4404 return VINF_SUCCESS;
4405}
4406
4407/**
4408 * Processes a list of waiting I/O tasks. The disk lock must be held by caller.
4409 *
4410 * @returns nothing.
4411 * @param pDisk The disk to process the list for.
4412 */
4413static void vdIoTaskProcessWaitingList(PVBOXHDD pDisk)
4414{
4415 LogFlowFunc(("pDisk=%#p\n", pDisk));
4416
4417 VD_IS_LOCKED(pDisk);
4418
4419 PVDIOTASK pHead = ASMAtomicXchgPtrT(&pDisk->pIoTasksPendingHead, NULL, PVDIOTASK);
4420
4421 Log(("I/O task list cleared\n"));
4422
4423 /* Reverse order. */
4424 PVDIOTASK pCur = pHead;
4425 pHead = NULL;
4426 while (pCur)
4427 {
4428 PVDIOTASK pInsert = pCur;
4429 pCur = pCur->pNext;
4430 pInsert->pNext = pHead;
4431 pHead = pInsert;
4432 }
4433
4434 while (pHead)
4435 {
4436 PVDIOSTORAGE pIoStorage = pHead->pIoStorage;
4437
4438 if (!pHead->fMeta)
4439 vdUserXferCompleted(pIoStorage, pHead->Type.User.pIoCtx,
4440 pHead->pfnComplete, pHead->pvUser,
4441 pHead->Type.User.cbTransfer, pHead->rcReq);
4442 else
4443 vdMetaXferCompleted(pIoStorage, pHead->pfnComplete, pHead->pvUser,
4444 pHead->Type.Meta.pMetaXfer, pHead->rcReq);
4445
4446 pCur = pHead;
4447 pHead = pHead->pNext;
4448 vdIoTaskFree(pDisk, pCur);
4449 }
4450}
4451
4452/**
4453 * Process any I/O context on the halted list.
4454 *
4455 * @returns nothing.
4456 * @param pDisk The disk.
4457 */
4458static void vdIoCtxProcessHaltedList(PVBOXHDD pDisk)
4459{
4460 LogFlowFunc(("pDisk=%#p\n", pDisk));
4461
4462 VD_IS_LOCKED(pDisk);
4463
4464 /* Get the waiting list and process it in FIFO order. */
4465 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHaltedHead, NULL, PVDIOCTX);
4466
4467 /* Reverse it. */
4468 PVDIOCTX pCur = pIoCtxHead;
4469 pIoCtxHead = NULL;
4470 while (pCur)
4471 {
4472 PVDIOCTX pInsert = pCur;
4473 pCur = pCur->pIoCtxNext;
4474 pInsert->pIoCtxNext = pIoCtxHead;
4475 pIoCtxHead = pInsert;
4476 }
4477
4478 /* Process now. */
4479 pCur = pIoCtxHead;
4480 while (pCur)
4481 {
4482 PVDIOCTX pTmp = pCur;
4483
4484 pCur = pCur->pIoCtxNext;
4485 pTmp->pIoCtxNext = NULL;
4486
4487 /* Continue */
4488 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4489 vdIoCtxContinue(pTmp, pTmp->rcReq);
4490 }
4491}
4492
4493/**
4494 * Unlock the disk and process pending tasks.
4495 *
4496 * @returns VBox status code.
4497 * @param pDisk The disk to unlock.
4498 * @param pIoCtxRc The I/O context to get the status code from, optional.
4499 */
4500static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
4501{
4502 int rc = VINF_SUCCESS;
4503
4504 VD_IS_LOCKED(pDisk);
4505
4506 /*
4507 * Process the list of waiting I/O tasks first
4508 * because they might complete I/O contexts.
4509 * Same for the list of halted I/O contexts.
4510 * Afterwards comes the list of new I/O contexts.
4511 */
4512 vdIoTaskProcessWaitingList(pDisk);
4513 vdIoCtxProcessHaltedList(pDisk);
4514 rc = vdDiskProcessWaitingIoCtx(pDisk, pIoCtxRc);
4515 ASMAtomicXchgBool(&pDisk->fLocked, false);
4516
4517 /*
4518 * Need to check for new I/O tasks and waiting I/O contexts now
4519 * again as other threads might added them while we processed
4520 * previous lists.
4521 */
4522 while ( ASMAtomicUoReadPtrT(&pDisk->pIoCtxHead, PVDIOCTX) != NULL
4523 || ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK) != NULL
4524 || ASMAtomicUoReadPtrT(&pDisk->pIoCtxHaltedHead, PVDIOCTX) != NULL)
4525 {
4526 /* Try lock disk again. */
4527 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4528 {
4529 vdIoTaskProcessWaitingList(pDisk);
4530 vdIoCtxProcessHaltedList(pDisk);
4531 vdDiskProcessWaitingIoCtx(pDisk, NULL);
4532 ASMAtomicXchgBool(&pDisk->fLocked, false);
4533 }
4534 else /* Let the other thread everything when he unlocks the disk. */
4535 break;
4536 }
4537
4538 return rc;
4539}
4540
4541/**
4542 * Try to lock the disk to complete pressing of the I/O task.
4543 * The completion is deferred if the disk is locked already.
4544 *
4545 * @returns nothing.
4546 * @param pIoTask The I/O task to complete.
4547 */
4548static void vdXferTryLockDiskDeferIoTask(PVDIOTASK pIoTask)
4549{
4550 PVDIOSTORAGE pIoStorage = pIoTask->pIoStorage;
4551 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4552
4553 Log(("Deferring I/O task pIoTask=%p\n", pIoTask));
4554
4555 /* Put it on the waiting list. */
4556 PVDIOTASK pNext = ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK);
4557 PVDIOTASK pHeadOld;
4558 pIoTask->pNext = pNext;
4559 while (!ASMAtomicCmpXchgExPtr(&pDisk->pIoTasksPendingHead, pIoTask, pNext, &pHeadOld))
4560 {
4561 pNext = pHeadOld;
4562 Assert(pNext != pIoTask);
4563 pIoTask->pNext = pNext;
4564 ASMNopPause();
4565 }
4566
4567 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4568 {
4569 /* Release disk lock, it will take care of processing all lists. */
4570 vdDiskUnlock(pDisk, NULL);
4571 }
4572}
4573
4574static DECLCALLBACK(int) vdIOIntReqCompleted(void *pvUser, int rcReq)
4575{
4576 PVDIOTASK pIoTask = (PVDIOTASK)pvUser;
4577
4578 LogFlowFunc(("Task completed pIoTask=%#p\n", pIoTask));
4579
4580 pIoTask->rcReq = rcReq;
4581 vdXferTryLockDiskDeferIoTask(pIoTask);
4582 return VINF_SUCCESS;
4583}
4584
4585/**
4586 * VD I/O interface callback for opening a file.
4587 */
4588static DECLCALLBACK(int) vdIOIntOpen(void *pvUser, const char *pszLocation,
4589 unsigned uOpenFlags, PPVDIOSTORAGE ppIoStorage)
4590{
4591 int rc = VINF_SUCCESS;
4592 PVDIO pVDIo = (PVDIO)pvUser;
4593 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
4594
4595 if (!pIoStorage)
4596 return VERR_NO_MEMORY;
4597
4598 /* Create the AVl tree. */
4599 pIoStorage->pTreeMetaXfers = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
4600 if (pIoStorage->pTreeMetaXfers)
4601 {
4602 rc = pVDIo->pInterfaceIo->pfnOpen(pVDIo->pInterfaceIo->Core.pvUser,
4603 pszLocation, uOpenFlags,
4604 vdIOIntReqCompleted,
4605 &pIoStorage->pStorage);
4606 if (RT_SUCCESS(rc))
4607 {
4608 pIoStorage->pVDIo = pVDIo;
4609 *ppIoStorage = pIoStorage;
4610 return VINF_SUCCESS;
4611 }
4612
4613 RTMemFree(pIoStorage->pTreeMetaXfers);
4614 }
4615 else
4616 rc = VERR_NO_MEMORY;
4617
4618 RTMemFree(pIoStorage);
4619 return rc;
4620}
4621
4622static DECLCALLBACK(int) vdIOIntTreeMetaXferDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
4623{
4624 RT_NOREF2(pNode, pvUser);
4625 AssertMsgFailed(("Tree should be empty at this point!\n"));
4626 return VINF_SUCCESS;
4627}
4628
4629static DECLCALLBACK(int) vdIOIntClose(void *pvUser, PVDIOSTORAGE pIoStorage)
4630{
4631 int rc = VINF_SUCCESS;
4632 PVDIO pVDIo = (PVDIO)pvUser;
4633
4634 /* We free everything here, even if closing the file failed for some reason. */
4635 rc = pVDIo->pInterfaceIo->pfnClose(pVDIo->pInterfaceIo->Core.pvUser, pIoStorage->pStorage);
4636 RTAvlrFileOffsetDestroy(pIoStorage->pTreeMetaXfers, vdIOIntTreeMetaXferDestroy, NULL);
4637 RTMemFree(pIoStorage->pTreeMetaXfers);
4638 RTMemFree(pIoStorage);
4639 return rc;
4640}
4641
4642static DECLCALLBACK(int) vdIOIntDelete(void *pvUser, const char *pcszFilename)
4643{
4644 PVDIO pVDIo = (PVDIO)pvUser;
4645 return pVDIo->pInterfaceIo->pfnDelete(pVDIo->pInterfaceIo->Core.pvUser,
4646 pcszFilename);
4647}
4648
4649static DECLCALLBACK(int) vdIOIntMove(void *pvUser, const char *pcszSrc, const char *pcszDst,
4650 unsigned fMove)
4651{
4652 PVDIO pVDIo = (PVDIO)pvUser;
4653 return pVDIo->pInterfaceIo->pfnMove(pVDIo->pInterfaceIo->Core.pvUser,
4654 pcszSrc, pcszDst, fMove);
4655}
4656
4657static DECLCALLBACK(int) vdIOIntGetFreeSpace(void *pvUser, const char *pcszFilename,
4658 int64_t *pcbFreeSpace)
4659{
4660 PVDIO pVDIo = (PVDIO)pvUser;
4661 return pVDIo->pInterfaceIo->pfnGetFreeSpace(pVDIo->pInterfaceIo->Core.pvUser,
4662 pcszFilename, pcbFreeSpace);
4663}
4664
4665static DECLCALLBACK(int) vdIOIntGetModificationTime(void *pvUser, const char *pcszFilename,
4666 PRTTIMESPEC pModificationTime)
4667{
4668 PVDIO pVDIo = (PVDIO)pvUser;
4669 return pVDIo->pInterfaceIo->pfnGetModificationTime(pVDIo->pInterfaceIo->Core.pvUser,
4670 pcszFilename, pModificationTime);
4671}
4672
4673static DECLCALLBACK(int) vdIOIntGetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4674 uint64_t *pcbSize)
4675{
4676 PVDIO pVDIo = (PVDIO)pvUser;
4677 return pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4678 pIoStorage->pStorage, pcbSize);
4679}
4680
4681static DECLCALLBACK(int) vdIOIntSetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4682 uint64_t cbSize)
4683{
4684 PVDIO pVDIo = (PVDIO)pvUser;
4685 return pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4686 pIoStorage->pStorage, cbSize);
4687}
4688
4689static DECLCALLBACK(int) vdIOIntSetAllocationSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4690 uint64_t cbSize, uint32_t fFlags,
4691 PVDINTERFACEPROGRESS pIfProgress,
4692 unsigned uPercentStart, unsigned uPercentSpan)
4693{
4694 PVDIO pVDIo = (PVDIO)pvUser;
4695 int rc = pVDIo->pInterfaceIo->pfnSetAllocationSize(pVDIo->pInterfaceIo->Core.pvUser,
4696 pIoStorage->pStorage, cbSize, fFlags);
4697 if (rc == VERR_NOT_SUPPORTED)
4698 {
4699 /* Fallback if the underlying medium does not support optimized storage allocation. */
4700 uint64_t cbSizeCur = 0;
4701 rc = pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4702 pIoStorage->pStorage, &cbSizeCur);
4703 if (RT_SUCCESS(rc))
4704 {
4705 if (cbSizeCur < cbSize)
4706 {
4707 const size_t cbBuf = 128 * _1K;
4708 void *pvBuf = RTMemTmpAllocZ(cbBuf);
4709 if (RT_LIKELY(pvBuf))
4710 {
4711 uint64_t cbFill = cbSize - cbSizeCur;
4712 uint64_t uOff = 0;
4713
4714 /* Write data to all blocks. */
4715 while ( uOff < cbFill
4716 && RT_SUCCESS(rc))
4717 {
4718 size_t cbChunk = (size_t)RT_MIN(cbFill - uOff, cbBuf);
4719
4720 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4721 pIoStorage->pStorage, cbSizeCur + uOff,
4722 pvBuf, cbChunk, NULL);
4723 if (RT_SUCCESS(rc))
4724 {
4725 uOff += cbChunk;
4726
4727 rc = vdIfProgress(pIfProgress, uPercentStart + uOff * uPercentSpan / cbFill);
4728 }
4729 }
4730
4731 RTMemTmpFree(pvBuf);
4732 }
4733 else
4734 rc = VERR_NO_MEMORY;
4735 }
4736 else if (cbSizeCur > cbSize)
4737 rc = pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4738 pIoStorage->pStorage, cbSize);
4739 }
4740 }
4741
4742 if (RT_SUCCESS(rc))
4743 rc = vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4744
4745 return rc;
4746}
4747
4748static DECLCALLBACK(int) vdIOIntReadUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4749 PVDIOCTX pIoCtx, size_t cbRead)
4750{
4751 int rc = VINF_SUCCESS;
4752 PVDIO pVDIo = (PVDIO)pvUser;
4753 PVBOXHDD pDisk = pVDIo->pDisk;
4754
4755 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbRead=%u\n",
4756 pvUser, pIoStorage, uOffset, pIoCtx, cbRead));
4757
4758 /** @todo Enable check for sync I/O later. */
4759 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4760 VD_IS_LOCKED(pDisk);
4761
4762 Assert(cbRead > 0);
4763
4764 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4765 {
4766 RTSGSEG Seg;
4767 unsigned cSegments = 1;
4768 size_t cbTaskRead = 0;
4769
4770 /* Synchronous I/O contexts only have one buffer segment. */
4771 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4772 ("Invalid number of buffer segments for synchronous I/O context"),
4773 VERR_INVALID_PARAMETER);
4774
4775 cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbRead);
4776 Assert(cbRead == cbTaskRead);
4777 Assert(cSegments == 1);
4778 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4779 pIoStorage->pStorage, uOffset,
4780 Seg.pvSeg, cbRead, NULL);
4781 if (RT_SUCCESS(rc))
4782 {
4783 Assert(cbRead == (uint32_t)cbRead);
4784 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbRead);
4785 }
4786 }
4787 else
4788 {
4789 /* Build the S/G array and spawn a new I/O task */
4790 while (cbRead)
4791 {
4792 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4793 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4794 size_t cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbRead);
4795
4796 Assert(cSegments > 0);
4797 Assert(cbTaskRead > 0);
4798 AssertMsg(cbTaskRead <= cbRead, ("Invalid number of bytes to read\n"));
4799
4800 LogFlow(("Reading %u bytes into %u segments\n", cbTaskRead, cSegments));
4801
4802#ifdef RT_STRICT
4803 for (unsigned i = 0; i < cSegments; i++)
4804 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4805 ("Segment %u is invalid\n", i));
4806#endif
4807
4808 Assert(cbTaskRead == (uint32_t)cbTaskRead);
4809 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, NULL, NULL, pIoCtx, (uint32_t)cbTaskRead);
4810
4811 if (!pIoTask)
4812 return VERR_NO_MEMORY;
4813
4814 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4815
4816 void *pvTask;
4817 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4818 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
4819 pIoStorage->pStorage, uOffset,
4820 aSeg, cSegments, cbTaskRead, pIoTask,
4821 &pvTask);
4822 if (RT_SUCCESS(rc))
4823 {
4824 AssertMsg(cbTaskRead <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4825 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskRead);
4826 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4827 vdIoTaskFree(pDisk, pIoTask);
4828 }
4829 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4830 {
4831 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4832 vdIoTaskFree(pDisk, pIoTask);
4833 break;
4834 }
4835
4836 uOffset += cbTaskRead;
4837 cbRead -= cbTaskRead;
4838 }
4839 }
4840
4841 LogFlowFunc(("returns rc=%Rrc\n", rc));
4842 return rc;
4843}
4844
4845static DECLCALLBACK(int) vdIOIntWriteUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4846 PVDIOCTX pIoCtx, size_t cbWrite, PFNVDXFERCOMPLETED pfnComplete,
4847 void *pvCompleteUser)
4848{
4849 int rc = VINF_SUCCESS;
4850 PVDIO pVDIo = (PVDIO)pvUser;
4851 PVBOXHDD pDisk = pVDIo->pDisk;
4852
4853 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbWrite=%u\n",
4854 pvUser, pIoStorage, uOffset, pIoCtx, cbWrite));
4855
4856 /** @todo Enable check for sync I/O later. */
4857 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4858 VD_IS_LOCKED(pDisk);
4859
4860 Assert(cbWrite > 0);
4861
4862 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4863 {
4864 RTSGSEG Seg;
4865 unsigned cSegments = 1;
4866 size_t cbTaskWrite = 0;
4867
4868 /* Synchronous I/O contexts only have one buffer segment. */
4869 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4870 ("Invalid number of buffer segments for synchronous I/O context"),
4871 VERR_INVALID_PARAMETER);
4872
4873 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbWrite);
4874 Assert(cbWrite == cbTaskWrite);
4875 Assert(cSegments == 1);
4876 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4877 pIoStorage->pStorage, uOffset,
4878 Seg.pvSeg, cbWrite, NULL);
4879 if (RT_SUCCESS(rc))
4880 {
4881 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbWrite);
4882 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbWrite);
4883 }
4884 }
4885 else
4886 {
4887 /* Build the S/G array and spawn a new I/O task */
4888 while (cbWrite)
4889 {
4890 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4891 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4892 size_t cbTaskWrite = 0;
4893
4894 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbWrite);
4895
4896 Assert(cSegments > 0);
4897 Assert(cbTaskWrite > 0);
4898 AssertMsg(cbTaskWrite <= cbWrite, ("Invalid number of bytes to write\n"));
4899
4900 LogFlow(("Writing %u bytes from %u segments\n", cbTaskWrite, cSegments));
4901
4902#ifdef DEBUG
4903 for (unsigned i = 0; i < cSegments; i++)
4904 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4905 ("Segment %u is invalid\n", i));
4906#endif
4907
4908 Assert(cbTaskWrite == (uint32_t)cbTaskWrite);
4909 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, pfnComplete, pvCompleteUser, pIoCtx, (uint32_t)cbTaskWrite);
4910
4911 if (!pIoTask)
4912 return VERR_NO_MEMORY;
4913
4914 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4915
4916 void *pvTask;
4917 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4918 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
4919 pIoStorage->pStorage,
4920 uOffset, aSeg, cSegments,
4921 cbTaskWrite, pIoTask, &pvTask);
4922 if (RT_SUCCESS(rc))
4923 {
4924 AssertMsg(cbTaskWrite <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4925 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskWrite);
4926 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4927 vdIoTaskFree(pDisk, pIoTask);
4928 }
4929 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4930 {
4931 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4932 vdIoTaskFree(pDisk, pIoTask);
4933 break;
4934 }
4935
4936 uOffset += cbTaskWrite;
4937 cbWrite -= cbTaskWrite;
4938 }
4939 }
4940
4941 LogFlowFunc(("returns rc=%Rrc\n", rc));
4942 return rc;
4943}
4944
4945static DECLCALLBACK(int) vdIOIntReadMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4946 void *pvBuf, size_t cbRead, PVDIOCTX pIoCtx,
4947 PPVDMETAXFER ppMetaXfer, PFNVDXFERCOMPLETED pfnComplete,
4948 void *pvCompleteUser)
4949{
4950 PVDIO pVDIo = (PVDIO)pvUser;
4951 PVBOXHDD pDisk = pVDIo->pDisk;
4952 int rc = VINF_SUCCESS;
4953 RTSGSEG Seg;
4954 PVDIOTASK pIoTask;
4955 PVDMETAXFER pMetaXfer = NULL;
4956 void *pvTask = NULL;
4957
4958 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbRead=%u\n",
4959 pvUser, pIoStorage, uOffset, pvBuf, cbRead));
4960
4961 AssertMsgReturn( pIoCtx
4962 || (!ppMetaXfer && !pfnComplete && !pvCompleteUser),
4963 ("A synchronous metadata read is requested but the parameters are wrong\n"),
4964 VERR_INVALID_POINTER);
4965
4966 /** @todo Enable check for sync I/O later. */
4967 if ( pIoCtx
4968 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4969 VD_IS_LOCKED(pDisk);
4970
4971 if ( !pIoCtx
4972 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4973 {
4974 /* Handle synchronous metadata I/O. */
4975 /** @todo Integrate with metadata transfers below. */
4976 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4977 pIoStorage->pStorage, uOffset,
4978 pvBuf, cbRead, NULL);
4979 if (ppMetaXfer)
4980 *ppMetaXfer = NULL;
4981 }
4982 else
4983 {
4984 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
4985 if (!pMetaXfer)
4986 {
4987#ifdef RT_STRICT
4988 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGetBestFit(pIoStorage->pTreeMetaXfers, uOffset, false /* fAbove */);
4989 AssertMsg(!pMetaXfer || (pMetaXfer->Core.Key + (RTFOFF)pMetaXfer->cbMeta <= (RTFOFF)uOffset),
4990 ("Overlapping meta transfers!\n"));
4991#endif
4992
4993 /* Allocate a new meta transfer. */
4994 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbRead);
4995 if (!pMetaXfer)
4996 return VERR_NO_MEMORY;
4997
4998 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
4999 if (!pIoTask)
5000 {
5001 RTMemFree(pMetaXfer);
5002 return VERR_NO_MEMORY;
5003 }
5004
5005 Seg.cbSeg = cbRead;
5006 Seg.pvSeg = pMetaXfer->abData;
5007
5008 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_READ);
5009 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
5010 pIoStorage->pStorage,
5011 uOffset, &Seg, 1,
5012 cbRead, pIoTask, &pvTask);
5013
5014 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5015 {
5016 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5017 Assert(fInserted); NOREF(fInserted);
5018 }
5019 else
5020 RTMemFree(pMetaXfer);
5021
5022 if (RT_SUCCESS(rc))
5023 {
5024 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5025 vdIoTaskFree(pDisk, pIoTask);
5026 }
5027 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS && !pfnComplete)
5028 rc = VERR_VD_NOT_ENOUGH_METADATA;
5029 }
5030
5031 Assert(VALID_PTR(pMetaXfer) || RT_FAILURE(rc));
5032
5033 if (RT_SUCCESS(rc) || rc == VERR_VD_NOT_ENOUGH_METADATA || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5034 {
5035 /* If it is pending add the request to the list. */
5036 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_READ)
5037 {
5038 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5039 AssertPtr(pDeferred);
5040
5041 RTListInit(&pDeferred->NodeDeferred);
5042 pDeferred->pIoCtx = pIoCtx;
5043
5044 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5045 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5046 rc = VERR_VD_NOT_ENOUGH_METADATA;
5047 }
5048 else
5049 {
5050 /* Transfer the data. */
5051 pMetaXfer->cRefs++;
5052 Assert(pMetaXfer->cbMeta >= cbRead);
5053 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5054 if (pMetaXfer->pbDataShw)
5055 memcpy(pvBuf, pMetaXfer->pbDataShw, cbRead);
5056 else
5057 memcpy(pvBuf, pMetaXfer->abData, cbRead);
5058 *ppMetaXfer = pMetaXfer;
5059 }
5060 }
5061 }
5062
5063 LogFlowFunc(("returns rc=%Rrc\n", rc));
5064 return rc;
5065}
5066
5067static DECLCALLBACK(int) vdIOIntWriteMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
5068 const void *pvBuf, size_t cbWrite, PVDIOCTX pIoCtx,
5069 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5070{
5071 PVDIO pVDIo = (PVDIO)pvUser;
5072 PVBOXHDD pDisk = pVDIo->pDisk;
5073 int rc = VINF_SUCCESS;
5074 RTSGSEG Seg;
5075 PVDIOTASK pIoTask;
5076 PVDMETAXFER pMetaXfer = NULL;
5077 bool fInTree = false;
5078 void *pvTask = NULL;
5079
5080 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbWrite=%u\n",
5081 pvUser, pIoStorage, uOffset, pvBuf, cbWrite));
5082
5083 AssertMsgReturn( pIoCtx
5084 || (!pfnComplete && !pvCompleteUser),
5085 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5086 VERR_INVALID_POINTER);
5087
5088 /** @todo Enable check for sync I/O later. */
5089 if ( pIoCtx
5090 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5091 VD_IS_LOCKED(pDisk);
5092
5093 if ( !pIoCtx
5094 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5095 {
5096 /* Handle synchronous metadata I/O. */
5097 /** @todo Integrate with metadata transfers below. */
5098 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
5099 pIoStorage->pStorage, uOffset,
5100 pvBuf, cbWrite, NULL);
5101 }
5102 else
5103 {
5104 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
5105 if (!pMetaXfer)
5106 {
5107 /* Allocate a new meta transfer. */
5108 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbWrite);
5109 if (!pMetaXfer)
5110 return VERR_NO_MEMORY;
5111 }
5112 else
5113 {
5114 Assert(pMetaXfer->cbMeta >= cbWrite);
5115 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5116 fInTree = true;
5117 }
5118
5119 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5120 {
5121 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5122 if (!pIoTask)
5123 {
5124 RTMemFree(pMetaXfer);
5125 return VERR_NO_MEMORY;
5126 }
5127
5128 memcpy(pMetaXfer->abData, pvBuf, cbWrite);
5129 Seg.cbSeg = cbWrite;
5130 Seg.pvSeg = pMetaXfer->abData;
5131
5132 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5133
5134 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
5135 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
5136 pIoStorage->pStorage,
5137 uOffset, &Seg, 1, cbWrite, pIoTask,
5138 &pvTask);
5139 if (RT_SUCCESS(rc))
5140 {
5141 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5142 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5143 vdIoTaskFree(pDisk, pIoTask);
5144 if (fInTree && !pMetaXfer->cRefs)
5145 {
5146 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5147 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5148 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5149 RTMemFree(pMetaXfer);
5150 pMetaXfer = NULL;
5151 }
5152 }
5153 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5154 {
5155 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5156 AssertPtr(pDeferred);
5157
5158 RTListInit(&pDeferred->NodeDeferred);
5159 pDeferred->pIoCtx = pIoCtx;
5160
5161 if (!fInTree)
5162 {
5163 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5164 Assert(fInserted); NOREF(fInserted);
5165 }
5166
5167 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5168 }
5169 else
5170 {
5171 RTMemFree(pMetaXfer);
5172 pMetaXfer = NULL;
5173 }
5174 }
5175 else
5176 {
5177 /* I/O is in progress, update shadow buffer and add to waiting list. */
5178 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5179 if (!pMetaXfer->pbDataShw)
5180 {
5181 /* Allocate shadow buffer and set initial state. */
5182 LogFlowFunc(("pMetaXfer=%#p Creating shadow buffer\n", pMetaXfer));
5183 pMetaXfer->pbDataShw = (uint8_t *)RTMemAlloc(pMetaXfer->cbMeta);
5184 if (RT_LIKELY(pMetaXfer->pbDataShw))
5185 memcpy(pMetaXfer->pbDataShw, pMetaXfer->abData, pMetaXfer->cbMeta);
5186 else
5187 rc = VERR_NO_MEMORY;
5188 }
5189
5190 if (RT_SUCCESS(rc))
5191 {
5192 /* Update with written data and append to waiting list. */
5193 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5194 if (pDeferred)
5195 {
5196 LogFlowFunc(("pMetaXfer=%#p Updating shadow buffer\n", pMetaXfer));
5197
5198 RTListInit(&pDeferred->NodeDeferred);
5199 pDeferred->pIoCtx = pIoCtx;
5200 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5201 memcpy(pMetaXfer->pbDataShw, pvBuf, cbWrite);
5202 RTListAppend(&pMetaXfer->ListIoCtxShwWrites, &pDeferred->NodeDeferred);
5203 }
5204 else
5205 {
5206 /*
5207 * Free shadow buffer if there is no one depending on it, i.e.
5208 * we just allocated it.
5209 */
5210 if (RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites))
5211 {
5212 RTMemFree(pMetaXfer->pbDataShw);
5213 pMetaXfer->pbDataShw = NULL;
5214 }
5215 rc = VERR_NO_MEMORY;
5216 }
5217 }
5218 }
5219 }
5220
5221 LogFlowFunc(("returns rc=%Rrc\n", rc));
5222 return rc;
5223}
5224
5225static DECLCALLBACK(void) vdIOIntMetaXferRelease(void *pvUser, PVDMETAXFER pMetaXfer)
5226{
5227 PVDIO pVDIo = (PVDIO)pvUser;
5228 PVBOXHDD pDisk = pVDIo->pDisk;
5229 PVDIOSTORAGE pIoStorage;
5230
5231 /*
5232 * It is possible that we get called with a NULL metadata xfer handle
5233 * for synchronous I/O. Just exit.
5234 */
5235 if (!pMetaXfer)
5236 return;
5237
5238 pIoStorage = pMetaXfer->pIoStorage;
5239
5240 VD_IS_LOCKED(pDisk);
5241
5242 Assert( VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE
5243 || VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5244 Assert(pMetaXfer->cRefs > 0);
5245
5246 pMetaXfer->cRefs--;
5247 if ( !pMetaXfer->cRefs
5248 && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting)
5249 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5250 {
5251 /* Free the meta data entry. */
5252 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5253 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5254 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5255
5256 RTMemFree(pMetaXfer);
5257 }
5258}
5259
5260static DECLCALLBACK(int) vdIOIntFlush(void *pvUser, PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
5261 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5262{
5263 PVDIO pVDIo = (PVDIO)pvUser;
5264 PVBOXHDD pDisk = pVDIo->pDisk;
5265 int rc = VINF_SUCCESS;
5266 PVDIOTASK pIoTask;
5267 PVDMETAXFER pMetaXfer = NULL;
5268 void *pvTask = NULL;
5269
5270 LogFlowFunc(("pvUser=%#p pIoStorage=%#p pIoCtx=%#p\n",
5271 pvUser, pIoStorage, pIoCtx));
5272
5273 AssertMsgReturn( pIoCtx
5274 || (!pfnComplete && !pvCompleteUser),
5275 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5276 VERR_INVALID_POINTER);
5277
5278 /** @todo Enable check for sync I/O later. */
5279 if ( pIoCtx
5280 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5281 VD_IS_LOCKED(pDisk);
5282
5283 if (pVDIo->fIgnoreFlush)
5284 return VINF_SUCCESS;
5285
5286 if ( !pIoCtx
5287 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5288 {
5289 /* Handle synchronous flushes. */
5290 /** @todo Integrate with metadata transfers below. */
5291 rc = pVDIo->pInterfaceIo->pfnFlushSync(pVDIo->pInterfaceIo->Core.pvUser,
5292 pIoStorage->pStorage);
5293 }
5294 else
5295 {
5296 /* Allocate a new meta transfer. */
5297 pMetaXfer = vdMetaXferAlloc(pIoStorage, 0, 0);
5298 if (!pMetaXfer)
5299 return VERR_NO_MEMORY;
5300
5301 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
5302 if (!pIoTask)
5303 {
5304 RTMemFree(pMetaXfer);
5305 return VERR_NO_MEMORY;
5306 }
5307
5308 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5309
5310 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5311 AssertPtr(pDeferred);
5312
5313 RTListInit(&pDeferred->NodeDeferred);
5314 pDeferred->pIoCtx = pIoCtx;
5315
5316 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5317 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_FLUSH);
5318 rc = pVDIo->pInterfaceIo->pfnFlushAsync(pVDIo->pInterfaceIo->Core.pvUser,
5319 pIoStorage->pStorage,
5320 pIoTask, &pvTask);
5321 if (RT_SUCCESS(rc))
5322 {
5323 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5324 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5325 vdIoTaskFree(pDisk, pIoTask);
5326 RTMemFree(pDeferred);
5327 RTMemFree(pMetaXfer);
5328 }
5329 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
5330 RTMemFree(pMetaXfer);
5331 }
5332
5333 LogFlowFunc(("returns rc=%Rrc\n", rc));
5334 return rc;
5335}
5336
5337static DECLCALLBACK(size_t) vdIOIntIoCtxCopyTo(void *pvUser, PVDIOCTX pIoCtx,
5338 const void *pvBuf, size_t cbBuf)
5339{
5340 PVDIO pVDIo = (PVDIO)pvUser;
5341 PVBOXHDD pDisk = pVDIo->pDisk;
5342 size_t cbCopied = 0;
5343
5344 /** @todo Enable check for sync I/O later. */
5345 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5346 VD_IS_LOCKED(pDisk);
5347
5348 cbCopied = vdIoCtxCopyTo(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5349 Assert(cbCopied == cbBuf);
5350
5351 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCopied); - triggers with vdCopyHelper/dmgRead.
5352 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5353
5354 return cbCopied;
5355}
5356
5357static DECLCALLBACK(size_t) vdIOIntIoCtxCopyFrom(void *pvUser, PVDIOCTX pIoCtx,
5358 void *pvBuf, size_t cbBuf)
5359{
5360 PVDIO pVDIo = (PVDIO)pvUser;
5361 PVBOXHDD pDisk = pVDIo->pDisk;
5362 size_t cbCopied = 0;
5363
5364 /** @todo Enable check for sync I/O later. */
5365 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5366 VD_IS_LOCKED(pDisk);
5367
5368 cbCopied = vdIoCtxCopyFrom(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5369 Assert(cbCopied == cbBuf);
5370
5371 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft > cbCopied); - triggers with vdCopyHelper/dmgRead.
5372 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5373
5374 return cbCopied;
5375}
5376
5377static DECLCALLBACK(size_t) vdIOIntIoCtxSet(void *pvUser, PVDIOCTX pIoCtx, int ch, size_t cb)
5378{
5379 PVDIO pVDIo = (PVDIO)pvUser;
5380 PVBOXHDD pDisk = pVDIo->pDisk;
5381 size_t cbSet = 0;
5382
5383 /** @todo Enable check for sync I/O later. */
5384 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5385 VD_IS_LOCKED(pDisk);
5386
5387 cbSet = vdIoCtxSet(pIoCtx, ch, cb);
5388 Assert(cbSet == cb);
5389
5390 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbSet); - triggers with vdCopyHelper/dmgRead.
5391 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbSet);
5392
5393 return cbSet;
5394}
5395
5396static DECLCALLBACK(size_t) vdIOIntIoCtxSegArrayCreate(void *pvUser, PVDIOCTX pIoCtx,
5397 PRTSGSEG paSeg, unsigned *pcSeg,
5398 size_t cbData)
5399{
5400 PVDIO pVDIo = (PVDIO)pvUser;
5401 PVBOXHDD pDisk = pVDIo->pDisk;
5402 size_t cbCreated = 0;
5403
5404 /** @todo It is possible that this gets called from a filter plugin
5405 * outside of the disk lock. Refine assertion or remove completely. */
5406#if 0
5407 /** @todo Enable check for sync I/O later. */
5408 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5409 VD_IS_LOCKED(pDisk);
5410#else
5411 NOREF(pDisk);
5412#endif
5413
5414 cbCreated = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, paSeg, pcSeg, cbData);
5415 Assert(!paSeg || cbData == cbCreated);
5416
5417 return cbCreated;
5418}
5419
5420static DECLCALLBACK(void) vdIOIntIoCtxCompleted(void *pvUser, PVDIOCTX pIoCtx, int rcReq,
5421 size_t cbCompleted)
5422{
5423 PVDIO pVDIo = (PVDIO)pvUser;
5424 PVBOXHDD pDisk = pVDIo->pDisk;
5425
5426 LogFlowFunc(("pvUser=%#p pIoCtx=%#p rcReq=%Rrc cbCompleted=%zu\n",
5427 pvUser, pIoCtx, rcReq, cbCompleted));
5428
5429 /*
5430 * Grab the disk critical section to avoid races with other threads which
5431 * might still modify the I/O context.
5432 * Example is that iSCSI is doing an asynchronous write but calls us already
5433 * while the other thread is still hanging in vdWriteHelperAsync and couldn't update
5434 * the blocked state yet.
5435 * It can overwrite the state to true before we call vdIoCtxContinue and the
5436 * the request would hang indefinite.
5437 */
5438 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
5439 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCompleted);
5440 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCompleted);
5441
5442 /* Set next transfer function if the current one finished.
5443 * @todo: Find a better way to prevent vdIoCtxContinue from calling the current helper again. */
5444 if (!pIoCtx->Req.Io.cbTransferLeft)
5445 {
5446 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
5447 pIoCtx->pfnIoCtxTransferNext = NULL;
5448 }
5449
5450 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHaltedHead, pIoCtx);
5451 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
5452 {
5453 /* Immediately drop the lock again, it will take care of processing the list. */
5454 vdDiskUnlock(pDisk, NULL);
5455 }
5456}
5457
5458static DECLCALLBACK(bool) vdIOIntIoCtxIsSynchronous(void *pvUser, PVDIOCTX pIoCtx)
5459{
5460 NOREF(pvUser);
5461 return !!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC);
5462}
5463
5464static DECLCALLBACK(bool) vdIOIntIoCtxIsZero(void *pvUser, PVDIOCTX pIoCtx, size_t cbCheck,
5465 bool fAdvance)
5466{
5467 NOREF(pvUser);
5468
5469 bool fIsZero = RTSgBufIsZero(&pIoCtx->Req.Io.SgBuf, cbCheck);
5470 if (fIsZero && fAdvance)
5471 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbCheck);
5472
5473 return fIsZero;
5474}
5475
5476static DECLCALLBACK(size_t) vdIOIntIoCtxGetDataUnitSize(void *pvUser, PVDIOCTX pIoCtx)
5477{
5478 RT_NOREF1(pIoCtx);
5479 PVDIO pVDIo = (PVDIO)pvUser;
5480 PVBOXHDD pDisk = pVDIo->pDisk;
5481
5482 PVDIMAGE pImage = vdGetImageByNumber(pDisk, VD_LAST_IMAGE);
5483 AssertPtrReturn(pImage, 0);
5484 return pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
5485}
5486
5487/**
5488 * VD I/O interface callback for opening a file (limited version for VDGetFormat).
5489 */
5490static DECLCALLBACK(int) vdIOIntOpenLimited(void *pvUser, const char *pszLocation,
5491 uint32_t fOpen, PPVDIOSTORAGE ppIoStorage)
5492{
5493 int rc = VINF_SUCCESS;
5494 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5495 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
5496
5497 if (!pIoStorage)
5498 return VERR_NO_MEMORY;
5499
5500 rc = pInterfaceIo->pfnOpen(NULL, pszLocation, fOpen, NULL, &pIoStorage->pStorage);
5501 if (RT_SUCCESS(rc))
5502 *ppIoStorage = pIoStorage;
5503 else
5504 RTMemFree(pIoStorage);
5505
5506 return rc;
5507}
5508
5509static DECLCALLBACK(int) vdIOIntCloseLimited(void *pvUser, PVDIOSTORAGE pIoStorage)
5510{
5511 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5512 int rc = pInterfaceIo->pfnClose(NULL, pIoStorage->pStorage);
5513
5514 RTMemFree(pIoStorage);
5515 return rc;
5516}
5517
5518static DECLCALLBACK(int) vdIOIntDeleteLimited(void *pvUser, const char *pcszFilename)
5519{
5520 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5521 return pInterfaceIo->pfnDelete(NULL, pcszFilename);
5522}
5523
5524static DECLCALLBACK(int) vdIOIntMoveLimited(void *pvUser, const char *pcszSrc,
5525 const char *pcszDst, unsigned fMove)
5526{
5527 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5528 return pInterfaceIo->pfnMove(NULL, pcszSrc, pcszDst, fMove);
5529}
5530
5531static DECLCALLBACK(int) vdIOIntGetFreeSpaceLimited(void *pvUser, const char *pcszFilename,
5532 int64_t *pcbFreeSpace)
5533{
5534 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5535 return pInterfaceIo->pfnGetFreeSpace(NULL, pcszFilename, pcbFreeSpace);
5536}
5537
5538static DECLCALLBACK(int) vdIOIntGetModificationTimeLimited(void *pvUser,
5539 const char *pcszFilename,
5540 PRTTIMESPEC pModificationTime)
5541{
5542 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5543 return pInterfaceIo->pfnGetModificationTime(NULL, pcszFilename, pModificationTime);
5544}
5545
5546static DECLCALLBACK(int) vdIOIntGetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5547 uint64_t *pcbSize)
5548{
5549 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5550 return pInterfaceIo->pfnGetSize(NULL, pIoStorage->pStorage, pcbSize);
5551}
5552
5553static DECLCALLBACK(int) vdIOIntSetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5554 uint64_t cbSize)
5555{
5556 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5557 return pInterfaceIo->pfnSetSize(NULL, pIoStorage->pStorage, cbSize);
5558}
5559
5560static DECLCALLBACK(int) vdIOIntWriteUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5561 uint64_t uOffset, PVDIOCTX pIoCtx,
5562 size_t cbWrite,
5563 PFNVDXFERCOMPLETED pfnComplete,
5564 void *pvCompleteUser)
5565{
5566 NOREF(pvUser);
5567 NOREF(pStorage);
5568 NOREF(uOffset);
5569 NOREF(pIoCtx);
5570 NOREF(cbWrite);
5571 NOREF(pfnComplete);
5572 NOREF(pvCompleteUser);
5573 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5574}
5575
5576static DECLCALLBACK(int) vdIOIntReadUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5577 uint64_t uOffset, PVDIOCTX pIoCtx,
5578 size_t cbRead)
5579{
5580 NOREF(pvUser);
5581 NOREF(pStorage);
5582 NOREF(uOffset);
5583 NOREF(pIoCtx);
5584 NOREF(cbRead);
5585 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5586}
5587
5588static DECLCALLBACK(int) vdIOIntWriteMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5589 uint64_t uOffset, const void *pvBuffer,
5590 size_t cbBuffer, PVDIOCTX pIoCtx,
5591 PFNVDXFERCOMPLETED pfnComplete,
5592 void *pvCompleteUser)
5593{
5594 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5595
5596 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5597 ("Async I/O not implemented for the limited interface"),
5598 VERR_NOT_SUPPORTED);
5599
5600 return pInterfaceIo->pfnWriteSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5601}
5602
5603static DECLCALLBACK(int) vdIOIntReadMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5604 uint64_t uOffset, void *pvBuffer,
5605 size_t cbBuffer, PVDIOCTX pIoCtx,
5606 PPVDMETAXFER ppMetaXfer,
5607 PFNVDXFERCOMPLETED pfnComplete,
5608 void *pvCompleteUser)
5609{
5610 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5611
5612 AssertMsgReturn(!pIoCtx && !ppMetaXfer && !pfnComplete && !pvCompleteUser,
5613 ("Async I/O not implemented for the limited interface"),
5614 VERR_NOT_SUPPORTED);
5615
5616 return pInterfaceIo->pfnReadSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5617}
5618
5619#if 0 /* unsed */
5620static int vdIOIntMetaXferReleaseLimited(void *pvUser, PVDMETAXFER pMetaXfer)
5621{
5622 /* This is a NOP in this case. */
5623 NOREF(pvUser);
5624 NOREF(pMetaXfer);
5625 return VINF_SUCCESS;
5626}
5627#endif
5628
5629static DECLCALLBACK(int) vdIOIntFlushLimited(void *pvUser, PVDIOSTORAGE pStorage,
5630 PVDIOCTX pIoCtx,
5631 PFNVDXFERCOMPLETED pfnComplete,
5632 void *pvCompleteUser)
5633{
5634 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5635
5636 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5637 ("Async I/O not implemented for the limited interface"),
5638 VERR_NOT_SUPPORTED);
5639
5640 return pInterfaceIo->pfnFlushSync(NULL, pStorage->pStorage);
5641}
5642
5643/**
5644 * internal: send output to the log (unconditionally).
5645 */
5646static DECLCALLBACK(int) vdLogMessage(void *pvUser, const char *pszFormat, va_list args)
5647{
5648 NOREF(pvUser);
5649 RTLogPrintfV(pszFormat, args);
5650 return VINF_SUCCESS;
5651}
5652
5653DECLINLINE(int) vdMessageWrapper(PVBOXHDD pDisk, const char *pszFormat, ...)
5654{
5655 va_list va;
5656 va_start(va, pszFormat);
5657 int rc = pDisk->pInterfaceError->pfnMessage(pDisk->pInterfaceError->Core.pvUser,
5658 pszFormat, va);
5659 va_end(va);
5660 return rc;
5661}
5662
5663
5664/**
5665 * internal: adjust PCHS geometry
5666 */
5667static void vdFixupPCHSGeometry(PVDGEOMETRY pPCHS, uint64_t cbSize)
5668{
5669 /* Fix broken PCHS geometry. Can happen for two reasons: either the backend
5670 * mixes up PCHS and LCHS, or the application used to create the source
5671 * image has put garbage in it. Additionally, if the PCHS geometry covers
5672 * more than the image size, set it back to the default. */
5673 if ( pPCHS->cHeads > 16
5674 || pPCHS->cSectors > 63
5675 || pPCHS->cCylinders == 0
5676 || (uint64_t)pPCHS->cHeads * pPCHS->cSectors * pPCHS->cCylinders * 512 > cbSize)
5677 {
5678 Assert(!(RT_MIN(cbSize / 512 / 16 / 63, 16383) - (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383)));
5679 pPCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383);
5680 pPCHS->cHeads = 16;
5681 pPCHS->cSectors = 63;
5682 }
5683}
5684
5685/**
5686 * internal: adjust PCHS geometry
5687 */
5688static void vdFixupLCHSGeometry(PVDGEOMETRY pLCHS, uint64_t cbSize)
5689{
5690 /* Fix broken LCHS geometry. Can happen for two reasons: either the backend
5691 * mixes up PCHS and LCHS, or the application used to create the source
5692 * image has put garbage in it. The fix in this case is to clear the LCHS
5693 * geometry to trigger autodetection when it is used next. If the geometry
5694 * already says "please autodetect" (cylinders=0) keep it. */
5695 if ( ( pLCHS->cHeads > 255
5696 || pLCHS->cHeads == 0
5697 || pLCHS->cSectors > 63
5698 || pLCHS->cSectors == 0)
5699 && pLCHS->cCylinders != 0)
5700 {
5701 pLCHS->cCylinders = 0;
5702 pLCHS->cHeads = 0;
5703 pLCHS->cSectors = 0;
5704 }
5705 /* Always recompute the number of cylinders stored in the LCHS
5706 * geometry if it isn't set to "autotedetect" at the moment.
5707 * This is very useful if the destination image size is
5708 * larger or smaller than the source image size. Do not modify
5709 * the number of heads and sectors. Windows guests hate it. */
5710 if ( pLCHS->cCylinders != 0
5711 && pLCHS->cHeads != 0 /* paranoia */
5712 && pLCHS->cSectors != 0 /* paranoia */)
5713 {
5714 Assert(!(RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024) - (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024)));
5715 pLCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024);
5716 }
5717}
5718
5719/**
5720 * Sets the I/O callbacks of the given interface to the fallback methods
5721 *
5722 * @returns nothing.
5723 * @param pIfIo The I/O interface to setup.
5724 */
5725static void vdIfIoFallbackCallbacksSetup(PVDINTERFACEIO pIfIo)
5726{
5727 pIfIo->pfnOpen = vdIOOpenFallback;
5728 pIfIo->pfnClose = vdIOCloseFallback;
5729 pIfIo->pfnDelete = vdIODeleteFallback;
5730 pIfIo->pfnMove = vdIOMoveFallback;
5731 pIfIo->pfnGetFreeSpace = vdIOGetFreeSpaceFallback;
5732 pIfIo->pfnGetModificationTime = vdIOGetModificationTimeFallback;
5733 pIfIo->pfnGetSize = vdIOGetSizeFallback;
5734 pIfIo->pfnSetSize = vdIOSetSizeFallback;
5735 pIfIo->pfnSetAllocationSize = vdIOSetAllocationSizeFallback;
5736 pIfIo->pfnReadSync = vdIOReadSyncFallback;
5737 pIfIo->pfnWriteSync = vdIOWriteSyncFallback;
5738 pIfIo->pfnFlushSync = vdIOFlushSyncFallback;
5739 pIfIo->pfnReadAsync = vdIOReadAsyncFallback;
5740 pIfIo->pfnWriteAsync = vdIOWriteAsyncFallback;
5741 pIfIo->pfnFlushAsync = vdIOFlushAsyncFallback;
5742}
5743
5744/**
5745 * Sets the internal I/O callbacks of the given interface.
5746 *
5747 * @returns nothing.
5748 * @param pIfIoInt The internal I/O interface to setup.
5749 */
5750static void vdIfIoIntCallbacksSetup(PVDINTERFACEIOINT pIfIoInt)
5751{
5752 pIfIoInt->pfnOpen = vdIOIntOpen;
5753 pIfIoInt->pfnClose = vdIOIntClose;
5754 pIfIoInt->pfnDelete = vdIOIntDelete;
5755 pIfIoInt->pfnMove = vdIOIntMove;
5756 pIfIoInt->pfnGetFreeSpace = vdIOIntGetFreeSpace;
5757 pIfIoInt->pfnGetModificationTime = vdIOIntGetModificationTime;
5758 pIfIoInt->pfnGetSize = vdIOIntGetSize;
5759 pIfIoInt->pfnSetSize = vdIOIntSetSize;
5760 pIfIoInt->pfnSetAllocationSize = vdIOIntSetAllocationSize;
5761 pIfIoInt->pfnReadUser = vdIOIntReadUser;
5762 pIfIoInt->pfnWriteUser = vdIOIntWriteUser;
5763 pIfIoInt->pfnReadMeta = vdIOIntReadMeta;
5764 pIfIoInt->pfnWriteMeta = vdIOIntWriteMeta;
5765 pIfIoInt->pfnMetaXferRelease = vdIOIntMetaXferRelease;
5766 pIfIoInt->pfnFlush = vdIOIntFlush;
5767 pIfIoInt->pfnIoCtxCopyFrom = vdIOIntIoCtxCopyFrom;
5768 pIfIoInt->pfnIoCtxCopyTo = vdIOIntIoCtxCopyTo;
5769 pIfIoInt->pfnIoCtxSet = vdIOIntIoCtxSet;
5770 pIfIoInt->pfnIoCtxSegArrayCreate = vdIOIntIoCtxSegArrayCreate;
5771 pIfIoInt->pfnIoCtxCompleted = vdIOIntIoCtxCompleted;
5772 pIfIoInt->pfnIoCtxIsSynchronous = vdIOIntIoCtxIsSynchronous;
5773 pIfIoInt->pfnIoCtxIsZero = vdIOIntIoCtxIsZero;
5774 pIfIoInt->pfnIoCtxGetDataUnitSize = vdIOIntIoCtxGetDataUnitSize;
5775}
5776
5777/**
5778 * Internally used completion handler for synchronous I/O contexts.
5779 */
5780static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq)
5781{
5782 RT_NOREF2(pvUser1, rcReq);
5783 RTSEMEVENT hEvent = (RTSEMEVENT)pvUser2;
5784
5785 RTSemEventSignal(hEvent);
5786}
5787
5788/**
5789 * Creates a new region list from the given one converting to match the flags if necessary.
5790 *
5791 * @returns VBox status code.
5792 * @param pRegionList The region list to convert from.
5793 * @param fFlags The flags for the new region list.
5794 * @param ppRegionList Where to store the new region list on success.
5795 */
5796static int vdRegionListConv(PCVDREGIONLIST pRegionList, uint32_t fFlags, PPVDREGIONLIST ppRegionList)
5797{
5798 int rc = VINF_SUCCESS;
5799 PVDREGIONLIST pRegionListNew = (PVDREGIONLIST)RTMemDup(pRegionList, RT_UOFFSETOF(VDREGIONLIST, aRegions[pRegionList->cRegions]));
5800 if (RT_LIKELY(pRegionListNew))
5801 {
5802 /* Do we have to convert anything? */
5803 if (pRegionList->fFlags != fFlags)
5804 {
5805 uint64_t offRegionNext = 0;
5806
5807 pRegionListNew->fFlags = fFlags;
5808 for (unsigned i = 0; i < pRegionListNew->cRegions; i++)
5809 {
5810 PVDREGIONDESC pRegion = &pRegionListNew->aRegions[i];
5811
5812 if ( (fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS)
5813 && !(pRegionList->fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS))
5814 {
5815 Assert(!(pRegion->cRegionBlocksOrBytes % pRegion->cbBlock));
5816
5817 /* Convert from bytes to logical blocks. */
5818 pRegion->offRegion = offRegionNext;
5819 pRegion->cRegionBlocksOrBytes = pRegion->cRegionBlocksOrBytes / pRegion->cbBlock;
5820 offRegionNext += pRegion->cRegionBlocksOrBytes;
5821 }
5822 else
5823 {
5824 /* Convert from logical blocks to bytes. */
5825 pRegion->offRegion = offRegionNext;
5826 pRegion->cRegionBlocksOrBytes = pRegion->cRegionBlocksOrBytes * pRegion->cbBlock;
5827 offRegionNext += pRegion->cRegionBlocksOrBytes;
5828 }
5829 }
5830 }
5831
5832 *ppRegionList = pRegionListNew;
5833 }
5834 else
5835 rc = VERR_NO_MEMORY;
5836
5837 return rc;
5838}
5839
5840/**
5841 * Initializes HDD backends.
5842 *
5843 * @returns VBox status code.
5844 */
5845VBOXDDU_DECL(int) VDInit(void)
5846{
5847 int rc = vdAddBackends(NIL_RTLDRMOD, aStaticBackends, RT_ELEMENTS(aStaticBackends));
5848 if (RT_SUCCESS(rc))
5849 {
5850 rc = vdAddCacheBackends(NIL_RTLDRMOD, aStaticCacheBackends, RT_ELEMENTS(aStaticCacheBackends));
5851 if (RT_SUCCESS(rc))
5852 {
5853 RTListInit(&g_ListPluginsLoaded);
5854 rc = vdLoadDynamicBackends();
5855 }
5856 }
5857 LogRel(("VD: VDInit finished\n"));
5858 return rc;
5859}
5860
5861/**
5862 * Destroys loaded HDD backends.
5863 *
5864 * @returns VBox status code.
5865 */
5866VBOXDDU_DECL(int) VDShutdown(void)
5867{
5868 if (!g_apBackends)
5869 return VERR_INTERNAL_ERROR;
5870
5871 if (g_apCacheBackends)
5872 RTMemFree(g_apCacheBackends);
5873 RTMemFree(g_apBackends);
5874
5875 g_cBackends = 0;
5876 g_apBackends = NULL;
5877
5878 /* Clear the supported cache backends. */
5879 g_cCacheBackends = 0;
5880 g_apCacheBackends = NULL;
5881
5882#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
5883 PVDPLUGIN pPlugin, pPluginNext;
5884 RTListForEachSafe(&g_ListPluginsLoaded, pPlugin, pPluginNext, VDPLUGIN, NodePlugin)
5885 {
5886 RTLdrClose(pPlugin->hPlugin);
5887 RTStrFree(pPlugin->pszFilename);
5888 RTListNodeRemove(&pPlugin->NodePlugin);
5889 RTMemFree(pPlugin);
5890 }
5891#endif
5892
5893 return VINF_SUCCESS;
5894}
5895
5896/**
5897 * Loads a single plugin given by filename.
5898 *
5899 * @returns VBox status code.
5900 * @param pszFilename The plugin filename to load.
5901 */
5902VBOXDDU_DECL(int) VDPluginLoadFromFilename(const char *pszFilename)
5903{
5904 if (!g_apBackends)
5905 {
5906 int rc = VDInit();
5907 if (RT_FAILURE(rc))
5908 return rc;
5909 }
5910
5911 return vdPluginLoadFromFilename(pszFilename);
5912}
5913
5914/**
5915 * Load all plugins from a given path.
5916 *
5917 * @returns VBox statuse code.
5918 * @param pszPath The path to load plugins from.
5919 */
5920VBOXDDU_DECL(int) VDPluginLoadFromPath(const char *pszPath)
5921{
5922 if (!g_apBackends)
5923 {
5924 int rc = VDInit();
5925 if (RT_FAILURE(rc))
5926 return rc;
5927 }
5928
5929 return vdPluginLoadFromPath(pszPath);
5930}
5931
5932/**
5933 * Unloads a single plugin given by filename.
5934 *
5935 * @returns VBox status code.
5936 * @param pszFilename The plugin filename to unload.
5937 */
5938VBOXDDU_DECL(int) VDPluginUnloadFromFilename(const char *pszFilename)
5939{
5940 if (!g_apBackends)
5941 {
5942 int rc = VDInit();
5943 if (RT_FAILURE(rc))
5944 return rc;
5945 }
5946
5947 return vdPluginUnloadFromFilename(pszFilename);
5948}
5949
5950/**
5951 * Unload all plugins from a given path.
5952 *
5953 * @returns VBox statuse code.
5954 * @param pszPath The path to unload plugins from.
5955 */
5956VBOXDDU_DECL(int) VDPluginUnloadFromPath(const char *pszPath)
5957{
5958 if (!g_apBackends)
5959 {
5960 int rc = VDInit();
5961 if (RT_FAILURE(rc))
5962 return rc;
5963 }
5964
5965 return vdPluginUnloadFromPath(pszPath);
5966}
5967
5968/**
5969 * Lists all HDD backends and their capabilities in a caller-provided buffer.
5970 *
5971 * @returns VBox status code.
5972 * VERR_BUFFER_OVERFLOW if not enough space is passed.
5973 * @param cEntriesAlloc Number of list entries available.
5974 * @param pEntries Pointer to array for the entries.
5975 * @param pcEntriesUsed Number of entries returned.
5976 */
5977VBOXDDU_DECL(int) VDBackendInfo(unsigned cEntriesAlloc, PVDBACKENDINFO pEntries,
5978 unsigned *pcEntriesUsed)
5979{
5980 int rc = VINF_SUCCESS;
5981
5982 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
5983 /* Check arguments. */
5984 AssertMsgReturn(cEntriesAlloc,
5985 ("cEntriesAlloc=%u\n", cEntriesAlloc),
5986 VERR_INVALID_PARAMETER);
5987 AssertMsgReturn(VALID_PTR(pEntries),
5988 ("pEntries=%#p\n", pEntries),
5989 VERR_INVALID_PARAMETER);
5990 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
5991 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
5992 VERR_INVALID_PARAMETER);
5993 if (!g_apBackends)
5994 VDInit();
5995
5996 if (cEntriesAlloc < g_cBackends)
5997 {
5998 *pcEntriesUsed = g_cBackends;
5999 return VERR_BUFFER_OVERFLOW;
6000 }
6001
6002 for (unsigned i = 0; i < g_cBackends; i++)
6003 {
6004 pEntries[i].pszBackend = g_apBackends[i]->pszBackendName;
6005 pEntries[i].uBackendCaps = g_apBackends[i]->uBackendCaps;
6006 pEntries[i].paFileExtensions = g_apBackends[i]->paFileExtensions;
6007 pEntries[i].paConfigInfo = g_apBackends[i]->paConfigInfo;
6008 pEntries[i].pfnComposeLocation = g_apBackends[i]->pfnComposeLocation;
6009 pEntries[i].pfnComposeName = g_apBackends[i]->pfnComposeName;
6010 }
6011
6012 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cBackends));
6013 *pcEntriesUsed = g_cBackends;
6014 return rc;
6015}
6016
6017/**
6018 * Lists the capabilities of a backend identified by its name.
6019 *
6020 * @returns VBox status code.
6021 * @param pszBackend The backend name.
6022 * @param pEntry Pointer to an entry.
6023 */
6024VBOXDDU_DECL(int) VDBackendInfoOne(const char *pszBackend, PVDBACKENDINFO pEntry)
6025{
6026 LogFlowFunc(("pszBackend=%#p pEntry=%#p\n", pszBackend, pEntry));
6027 /* Check arguments. */
6028 AssertMsgReturn(VALID_PTR(pszBackend),
6029 ("pszBackend=%#p\n", pszBackend),
6030 VERR_INVALID_PARAMETER);
6031 AssertMsgReturn(VALID_PTR(pEntry),
6032 ("pEntry=%#p\n", pEntry),
6033 VERR_INVALID_PARAMETER);
6034 if (!g_apBackends)
6035 VDInit();
6036
6037 /* Go through loaded backends. */
6038 for (unsigned i = 0; i < g_cBackends; i++)
6039 {
6040 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
6041 {
6042 pEntry->pszBackend = g_apBackends[i]->pszBackendName;
6043 pEntry->uBackendCaps = g_apBackends[i]->uBackendCaps;
6044 pEntry->paFileExtensions = g_apBackends[i]->paFileExtensions;
6045 pEntry->paConfigInfo = g_apBackends[i]->paConfigInfo;
6046 return VINF_SUCCESS;
6047 }
6048 }
6049
6050 return VERR_NOT_FOUND;
6051}
6052
6053/**
6054 * Lists all filters and their capabilities in a caller-provided buffer.
6055 *
6056 * @return VBox status code.
6057 * VERR_BUFFER_OVERFLOW if not enough space is passed.
6058 * @param cEntriesAlloc Number of list entries available.
6059 * @param pEntries Pointer to array for the entries.
6060 * @param pcEntriesUsed Number of entries returned.
6061 */
6062VBOXDDU_DECL(int) VDFilterInfo(unsigned cEntriesAlloc, PVDFILTERINFO pEntries,
6063 unsigned *pcEntriesUsed)
6064{
6065 int rc = VINF_SUCCESS;
6066
6067 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
6068 /* Check arguments. */
6069 AssertMsgReturn(cEntriesAlloc,
6070 ("cEntriesAlloc=%u\n", cEntriesAlloc),
6071 VERR_INVALID_PARAMETER);
6072 AssertMsgReturn(VALID_PTR(pEntries),
6073 ("pEntries=%#p\n", pEntries),
6074 VERR_INVALID_PARAMETER);
6075 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
6076 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
6077 VERR_INVALID_PARAMETER);
6078 if (!g_apBackends)
6079 VDInit();
6080
6081 if (cEntriesAlloc < g_cFilterBackends)
6082 {
6083 *pcEntriesUsed = g_cFilterBackends;
6084 return VERR_BUFFER_OVERFLOW;
6085 }
6086
6087 for (unsigned i = 0; i < g_cFilterBackends; i++)
6088 {
6089 pEntries[i].pszFilter = g_apFilterBackends[i]->pszBackendName;
6090 pEntries[i].paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6091 }
6092
6093 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cFilterBackends));
6094 *pcEntriesUsed = g_cFilterBackends;
6095 return rc;
6096}
6097
6098/**
6099 * Lists the capabilities of a filter identified by its name.
6100 *
6101 * @return VBox status code.
6102 * @param pszFilter The filter name (case insensitive).
6103 * @param pEntry Pointer to an entry.
6104 */
6105VBOXDDU_DECL(int) VDFilterInfoOne(const char *pszFilter, PVDFILTERINFO pEntry)
6106{
6107 LogFlowFunc(("pszFilter=%#p pEntry=%#p\n", pszFilter, pEntry));
6108 /* Check arguments. */
6109 AssertMsgReturn(VALID_PTR(pszFilter),
6110 ("pszFilter=%#p\n", pszFilter),
6111 VERR_INVALID_PARAMETER);
6112 AssertMsgReturn(VALID_PTR(pEntry),
6113 ("pEntry=%#p\n", pEntry),
6114 VERR_INVALID_PARAMETER);
6115 if (!g_apBackends)
6116 VDInit();
6117
6118 /* Go through loaded backends. */
6119 for (unsigned i = 0; i < g_cFilterBackends; i++)
6120 {
6121 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
6122 {
6123 pEntry->pszFilter = g_apFilterBackends[i]->pszBackendName;
6124 pEntry->paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6125 return VINF_SUCCESS;
6126 }
6127 }
6128
6129 return VERR_NOT_FOUND;
6130}
6131
6132/**
6133 * Allocates and initializes an empty HDD container.
6134 * No image files are opened.
6135 *
6136 * @returns VBox status code.
6137 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6138 * @param enmType Type of the image container.
6139 * @param ppDisk Where to store the reference to HDD container.
6140 */
6141VBOXDDU_DECL(int) VDCreate(PVDINTERFACE pVDIfsDisk, VDTYPE enmType, PVBOXHDD *ppDisk)
6142{
6143 int rc = VINF_SUCCESS;
6144 PVBOXHDD pDisk = NULL;
6145
6146 LogFlowFunc(("pVDIfsDisk=%#p\n", pVDIfsDisk));
6147 do
6148 {
6149 /* Check arguments. */
6150 AssertMsgBreakStmt(VALID_PTR(ppDisk),
6151 ("ppDisk=%#p\n", ppDisk),
6152 rc = VERR_INVALID_PARAMETER);
6153
6154 pDisk = (PVBOXHDD)RTMemAllocZ(sizeof(VBOXHDD));
6155 if (pDisk)
6156 {
6157 pDisk->u32Signature = VBOXHDDDISK_SIGNATURE;
6158 pDisk->enmType = enmType;
6159 pDisk->cImages = 0;
6160 pDisk->pBase = NULL;
6161 pDisk->pLast = NULL;
6162 pDisk->cbSize = 0;
6163 pDisk->PCHSGeometry.cCylinders = 0;
6164 pDisk->PCHSGeometry.cHeads = 0;
6165 pDisk->PCHSGeometry.cSectors = 0;
6166 pDisk->LCHSGeometry.cCylinders = 0;
6167 pDisk->LCHSGeometry.cHeads = 0;
6168 pDisk->LCHSGeometry.cSectors = 0;
6169 pDisk->pVDIfsDisk = pVDIfsDisk;
6170 pDisk->pInterfaceError = NULL;
6171 pDisk->pInterfaceThreadSync = NULL;
6172 pDisk->pIoCtxLockOwner = NULL;
6173 pDisk->pIoCtxHead = NULL;
6174 pDisk->fLocked = false;
6175 pDisk->hMemCacheIoCtx = NIL_RTMEMCACHE;
6176 pDisk->hMemCacheIoTask = NIL_RTMEMCACHE;
6177 RTListInit(&pDisk->ListFilterChainWrite);
6178 RTListInit(&pDisk->ListFilterChainRead);
6179
6180 /* Create the I/O ctx cache */
6181 rc = RTMemCacheCreate(&pDisk->hMemCacheIoCtx, sizeof(VDIOCTX), 0, UINT32_MAX,
6182 NULL, NULL, NULL, 0);
6183 if (RT_FAILURE(rc))
6184 break;
6185
6186 /* Create the I/O task cache */
6187 rc = RTMemCacheCreate(&pDisk->hMemCacheIoTask, sizeof(VDIOTASK), 0, UINT32_MAX,
6188 NULL, NULL, NULL, 0);
6189 if (RT_FAILURE(rc))
6190 break;
6191
6192 pDisk->pInterfaceError = VDIfErrorGet(pVDIfsDisk);
6193 pDisk->pInterfaceThreadSync = VDIfThreadSyncGet(pVDIfsDisk);
6194
6195 *ppDisk = pDisk;
6196 }
6197 else
6198 {
6199 rc = VERR_NO_MEMORY;
6200 break;
6201 }
6202 } while (0);
6203
6204 if ( RT_FAILURE(rc)
6205 && pDisk)
6206 {
6207 if (pDisk->hMemCacheIoCtx != NIL_RTMEMCACHE)
6208 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6209 if (pDisk->hMemCacheIoTask != NIL_RTMEMCACHE)
6210 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6211 }
6212
6213 LogFlowFunc(("returns %Rrc (pDisk=%#p)\n", rc, pDisk));
6214 return rc;
6215}
6216
6217/**
6218 * Destroys HDD container.
6219 * If container has opened image files they will be closed.
6220 *
6221 * @returns VBox status code.
6222 * @param pDisk Pointer to HDD container.
6223 */
6224VBOXDDU_DECL(int) VDDestroy(PVBOXHDD pDisk)
6225{
6226 int rc = VINF_SUCCESS;
6227 LogFlowFunc(("pDisk=%#p\n", pDisk));
6228 do
6229 {
6230 /* sanity check */
6231 AssertPtrBreak(pDisk);
6232 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6233 Assert(!pDisk->fLocked);
6234
6235 rc = VDCloseAll(pDisk);
6236 int rc2 = VDFilterRemoveAll(pDisk);
6237 if (RT_SUCCESS(rc))
6238 rc = rc2;
6239
6240 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6241 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6242 RTMemFree(pDisk);
6243 } while (0);
6244 LogFlowFunc(("returns %Rrc\n", rc));
6245 return rc;
6246}
6247
6248/**
6249 * Try to get the backend name which can use this image.
6250 *
6251 * @returns VBox status code.
6252 * VINF_SUCCESS if a plugin was found.
6253 * ppszFormat contains the string which can be used as backend name.
6254 * VERR_NOT_SUPPORTED if no backend was found.
6255 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6256 * @param pVDIfsImage Pointer to the per-image VD interface list.
6257 * @param pszFilename Name of the image file for which the backend is queried.
6258 * @param ppszFormat Receives pointer of the UTF-8 string which contains the format name.
6259 * The returned pointer must be freed using RTStrFree().
6260 */
6261VBOXDDU_DECL(int) VDGetFormat(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6262 const char *pszFilename, char **ppszFormat, VDTYPE *penmType)
6263{
6264 int rc = VERR_NOT_SUPPORTED;
6265 VDINTERFACEIOINT VDIfIoInt;
6266 VDINTERFACEIO VDIfIoFallback;
6267 PVDINTERFACEIO pInterfaceIo;
6268
6269 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6270 /* Check arguments. */
6271 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
6272 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6273 VERR_INVALID_PARAMETER);
6274 AssertMsgReturn(VALID_PTR(ppszFormat),
6275 ("ppszFormat=%#p\n", ppszFormat),
6276 VERR_INVALID_PARAMETER);
6277 AssertMsgReturn(VALID_PTR(penmType),
6278 ("penmType=%#p\n", penmType),
6279 VERR_INVALID_PARAMETER);
6280
6281 if (!g_apBackends)
6282 VDInit();
6283
6284 pInterfaceIo = VDIfIoGet(pVDIfsImage);
6285 if (!pInterfaceIo)
6286 {
6287 /*
6288 * Caller doesn't provide an I/O interface, create our own using the
6289 * native file API.
6290 */
6291 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
6292 pInterfaceIo = &VDIfIoFallback;
6293 }
6294
6295 /* Set up the internal I/O interface. */
6296 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
6297 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
6298 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
6299 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
6300 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
6301 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
6302 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
6303 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
6304 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
6305 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
6306 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
6307 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
6308 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
6309 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
6310 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6311 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
6312 AssertRC(rc);
6313
6314 /* Find the backend supporting this file format. */
6315 for (unsigned i = 0; i < g_cBackends; i++)
6316 {
6317 if (g_apBackends[i]->pfnProbe)
6318 {
6319 rc = g_apBackends[i]->pfnProbe(pszFilename, pVDIfsDisk, pVDIfsImage, penmType);
6320 if ( RT_SUCCESS(rc)
6321 /* The correct backend has been found, but there is a small
6322 * incompatibility so that the file cannot be used. Stop here
6323 * and signal success - the actual open will of course fail,
6324 * but that will create a really sensible error message. */
6325 || ( rc != VERR_VD_GEN_INVALID_HEADER
6326 && rc != VERR_VD_VDI_INVALID_HEADER
6327 && rc != VERR_VD_VMDK_INVALID_HEADER
6328 && rc != VERR_VD_ISCSI_INVALID_HEADER
6329 && rc != VERR_VD_VHD_INVALID_HEADER
6330 && rc != VERR_VD_RAW_INVALID_HEADER
6331 && rc != VERR_VD_RAW_SIZE_MODULO_512
6332 && rc != VERR_VD_RAW_SIZE_MODULO_2048
6333 && rc != VERR_VD_RAW_SIZE_OPTICAL_TOO_SMALL
6334 && rc != VERR_VD_RAW_SIZE_FLOPPY_TOO_BIG
6335 && rc != VERR_VD_PARALLELS_INVALID_HEADER
6336 && rc != VERR_VD_DMG_INVALID_HEADER))
6337 {
6338 /* Copy the name into the new string. */
6339 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6340 if (!pszFormat)
6341 {
6342 rc = VERR_NO_MEMORY;
6343 break;
6344 }
6345 *ppszFormat = pszFormat;
6346 /* Do not consider the typical file access errors as success,
6347 * which allows the caller to deal with such issues. */
6348 if ( rc != VERR_ACCESS_DENIED
6349 && rc != VERR_PATH_NOT_FOUND
6350 && rc != VERR_FILE_NOT_FOUND)
6351 rc = VINF_SUCCESS;
6352 break;
6353 }
6354 rc = VERR_NOT_SUPPORTED;
6355 }
6356 }
6357
6358 /* Try the cache backends. */
6359 if (rc == VERR_NOT_SUPPORTED)
6360 {
6361 for (unsigned i = 0; i < g_cCacheBackends; i++)
6362 {
6363 if (g_apCacheBackends[i]->pfnProbe)
6364 {
6365 rc = g_apCacheBackends[i]->pfnProbe(pszFilename, pVDIfsDisk,
6366 pVDIfsImage);
6367 if ( RT_SUCCESS(rc)
6368 || (rc != VERR_VD_GEN_INVALID_HEADER))
6369 {
6370 /* Copy the name into the new string. */
6371 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6372 if (!pszFormat)
6373 {
6374 rc = VERR_NO_MEMORY;
6375 break;
6376 }
6377 *ppszFormat = pszFormat;
6378 rc = VINF_SUCCESS;
6379 break;
6380 }
6381 rc = VERR_NOT_SUPPORTED;
6382 }
6383 }
6384 }
6385
6386 LogFlowFunc(("returns %Rrc *ppszFormat=\"%s\"\n", rc, *ppszFormat));
6387 return rc;
6388}
6389
6390/**
6391 * Opens an image file.
6392 *
6393 * The first opened image file in HDD container must have a base image type,
6394 * others (next opened images) must be a differencing or undo images.
6395 * Linkage is checked for differencing image to be in consistence with the previously opened image.
6396 * When another differencing image is opened and the last image was opened in read/write access
6397 * mode, then the last image is reopened in read-only with deny write sharing mode. This allows
6398 * other processes to use images in read-only mode too.
6399 *
6400 * Note that the image is opened in read-only mode if a read/write open is not possible.
6401 * Use VDIsReadOnly to check open mode.
6402 *
6403 * @returns VBox status code.
6404 * @param pDisk Pointer to HDD container.
6405 * @param pszBackend Name of the image file backend to use.
6406 * @param pszFilename Name of the image file to open.
6407 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6408 * @param pVDIfsImage Pointer to the per-image VD interface list.
6409 */
6410VBOXDDU_DECL(int) VDOpen(PVBOXHDD pDisk, const char *pszBackend,
6411 const char *pszFilename, unsigned uOpenFlags,
6412 PVDINTERFACE pVDIfsImage)
6413{
6414 int rc = VINF_SUCCESS;
6415 int rc2;
6416 bool fLockWrite = false;
6417 PVDIMAGE pImage = NULL;
6418
6419 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsImage=%#p\n",
6420 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsImage));
6421
6422 do
6423 {
6424 /* sanity check */
6425 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6426 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6427
6428 /* Check arguments. */
6429 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6430 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6431 rc = VERR_INVALID_PARAMETER);
6432 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6433 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6434 rc = VERR_INVALID_PARAMETER);
6435 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6436 ("uOpenFlags=%#x\n", uOpenFlags),
6437 rc = VERR_INVALID_PARAMETER);
6438 AssertMsgBreakStmt( !(uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)
6439 || (uOpenFlags & VD_OPEN_FLAGS_READONLY),
6440 ("uOpenFlags=%#x\n", uOpenFlags),
6441 rc = VERR_INVALID_PARAMETER);
6442
6443 /*
6444 * Destroy the current discard state first which might still have pending blocks
6445 * for the currently opened image which will be switched to readonly mode.
6446 */
6447 /* Lock disk for writing, as we modify pDisk information below. */
6448 rc2 = vdThreadStartWrite(pDisk);
6449 AssertRC(rc2);
6450 fLockWrite = true;
6451 rc = vdDiscardStateDestroy(pDisk);
6452 if (RT_FAILURE(rc))
6453 break;
6454 rc2 = vdThreadFinishWrite(pDisk);
6455 AssertRC(rc2);
6456 fLockWrite = false;
6457
6458 /* Set up image descriptor. */
6459 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
6460 if (!pImage)
6461 {
6462 rc = VERR_NO_MEMORY;
6463 break;
6464 }
6465 pImage->pszFilename = RTStrDup(pszFilename);
6466 if (!pImage->pszFilename)
6467 {
6468 rc = VERR_NO_MEMORY;
6469 break;
6470 }
6471
6472 pImage->VDIo.pDisk = pDisk;
6473 pImage->pVDIfsImage = pVDIfsImage;
6474
6475 rc = vdFindBackend(pszBackend, &pImage->Backend);
6476 if (RT_FAILURE(rc))
6477 break;
6478 if (!pImage->Backend)
6479 {
6480 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6481 N_("VD: unknown backend name '%s'"), pszBackend);
6482 break;
6483 }
6484
6485 /*
6486 * Fail if the backend can't do async I/O but the
6487 * flag is set.
6488 */
6489 if ( !(pImage->Backend->uBackendCaps & VD_CAP_ASYNC)
6490 && (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO))
6491 {
6492 rc = vdError(pDisk, VERR_NOT_SUPPORTED, RT_SRC_POS,
6493 N_("VD: Backend '%s' does not support async I/O"), pszBackend);
6494 break;
6495 }
6496
6497 /*
6498 * Fail if the backend doesn't support the discard operation but the
6499 * flag is set.
6500 */
6501 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DISCARD)
6502 && (uOpenFlags & VD_OPEN_FLAGS_DISCARD))
6503 {
6504 rc = vdError(pDisk, VERR_VD_DISCARD_NOT_SUPPORTED, RT_SRC_POS,
6505 N_("VD: Backend '%s' does not support discard"), pszBackend);
6506 break;
6507 }
6508
6509 /* Set up the I/O interface. */
6510 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
6511 if (!pImage->VDIo.pInterfaceIo)
6512 {
6513 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
6514 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6515 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
6516 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
6517 }
6518
6519 /* Set up the internal I/O interface. */
6520 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
6521 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
6522 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6523 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
6524 AssertRC(rc);
6525
6526 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
6527 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
6528 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6529 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6530 pDisk->pVDIfsDisk,
6531 pImage->pVDIfsImage,
6532 pDisk->enmType,
6533 &pImage->pBackendData);
6534 /*
6535 * If the image is corrupted and there is a repair method try to repair it
6536 * first if it was openend in read-write mode and open again afterwards.
6537 */
6538 if ( RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED)
6539 && !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6540 && pImage->Backend->pfnRepair)
6541 {
6542 rc = pImage->Backend->pfnRepair(pszFilename, pDisk->pVDIfsDisk, pImage->pVDIfsImage, 0 /* fFlags */);
6543 if (RT_SUCCESS(rc))
6544 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6545 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6546 pDisk->pVDIfsDisk,
6547 pImage->pVDIfsImage,
6548 pDisk->enmType,
6549 &pImage->pBackendData);
6550 else
6551 {
6552 rc = vdError(pDisk, rc, RT_SRC_POS,
6553 N_("VD: error %Rrc repairing corrupted image file '%s'"), rc, pszFilename);
6554 break;
6555 }
6556 }
6557 else if (RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED))
6558 {
6559 rc = vdError(pDisk, rc, RT_SRC_POS,
6560 N_("VD: Image file '%s' is corrupted and can't be opened"), pszFilename);
6561 break;
6562 }
6563
6564 /* If the open in read-write mode failed, retry in read-only mode. */
6565 if (RT_FAILURE(rc))
6566 {
6567 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6568 && ( rc == VERR_ACCESS_DENIED
6569 || rc == VERR_PERMISSION_DENIED
6570 || rc == VERR_WRITE_PROTECT
6571 || rc == VERR_SHARING_VIOLATION
6572 || rc == VERR_FILE_LOCK_FAILED))
6573 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6574 (uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS))
6575 | VD_OPEN_FLAGS_READONLY,
6576 pDisk->pVDIfsDisk,
6577 pImage->pVDIfsImage,
6578 pDisk->enmType,
6579 &pImage->pBackendData);
6580 if (RT_FAILURE(rc))
6581 {
6582 rc = vdError(pDisk, rc, RT_SRC_POS,
6583 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6584 break;
6585 }
6586 }
6587
6588 /* Lock disk for writing, as we modify pDisk information below. */
6589 rc2 = vdThreadStartWrite(pDisk);
6590 AssertRC(rc2);
6591 fLockWrite = true;
6592
6593 pImage->VDIo.pBackendData = pImage->pBackendData;
6594
6595 /* Check image type. As the image itself has only partial knowledge
6596 * whether it's a base image or not, this info is derived here. The
6597 * base image can be fixed or normal, all others must be normal or
6598 * diff images. Some image formats don't distinguish between normal
6599 * and diff images, so this must be corrected here. */
6600 unsigned uImageFlags;
6601 uImageFlags = pImage->Backend->pfnGetImageFlags(pImage->pBackendData);
6602 if (RT_FAILURE(rc))
6603 uImageFlags = VD_IMAGE_FLAGS_NONE;
6604 if ( RT_SUCCESS(rc)
6605 && !(uOpenFlags & VD_OPEN_FLAGS_INFO))
6606 {
6607 if ( pDisk->cImages == 0
6608 && (uImageFlags & VD_IMAGE_FLAGS_DIFF))
6609 {
6610 rc = VERR_VD_INVALID_TYPE;
6611 break;
6612 }
6613 else if (pDisk->cImages != 0)
6614 {
6615 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6616 {
6617 rc = VERR_VD_INVALID_TYPE;
6618 break;
6619 }
6620 else
6621 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6622 }
6623 }
6624
6625 /* Ensure we always get correct diff information, even if the backend
6626 * doesn't actually have a stored flag for this. It must not return
6627 * bogus information for the parent UUID if it is not a diff image. */
6628 RTUUID parentUuid;
6629 RTUuidClear(&parentUuid);
6630 rc2 = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, &parentUuid);
6631 if (RT_SUCCESS(rc2) && !RTUuidIsNull(&parentUuid))
6632 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6633
6634 pImage->uImageFlags = uImageFlags;
6635
6636 /* Force sane optimization settings. It's not worth avoiding writes
6637 * to fixed size images. The overhead would have almost no payback. */
6638 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6639 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
6640
6641 /** @todo optionally check UUIDs */
6642
6643 /* Cache disk information. */
6644 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
6645
6646 /* Cache PCHS geometry. */
6647 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
6648 &pDisk->PCHSGeometry);
6649 if (RT_FAILURE(rc2))
6650 {
6651 pDisk->PCHSGeometry.cCylinders = 0;
6652 pDisk->PCHSGeometry.cHeads = 0;
6653 pDisk->PCHSGeometry.cSectors = 0;
6654 }
6655 else
6656 {
6657 /* Make sure the PCHS geometry is properly clipped. */
6658 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
6659 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
6660 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
6661 }
6662
6663 /* Cache LCHS geometry. */
6664 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
6665 &pDisk->LCHSGeometry);
6666 if (RT_FAILURE(rc2))
6667 {
6668 pDisk->LCHSGeometry.cCylinders = 0;
6669 pDisk->LCHSGeometry.cHeads = 0;
6670 pDisk->LCHSGeometry.cSectors = 0;
6671 }
6672 else
6673 {
6674 /* Make sure the LCHS geometry is properly clipped. */
6675 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
6676 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
6677 }
6678
6679 if (pDisk->cImages != 0)
6680 {
6681 /* Switch previous image to read-only mode. */
6682 unsigned uOpenFlagsPrevImg;
6683 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
6684 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
6685 {
6686 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
6687 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
6688 }
6689 }
6690
6691 if (RT_SUCCESS(rc))
6692 {
6693 /* Image successfully opened, make it the last image. */
6694 vdAddImageToList(pDisk, pImage);
6695 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
6696 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
6697 }
6698 else
6699 {
6700 /* Error detected, but image opened. Close image. */
6701 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
6702 AssertRC(rc2);
6703 pImage->pBackendData = NULL;
6704 }
6705 } while (0);
6706
6707 if (RT_UNLIKELY(fLockWrite))
6708 {
6709 rc2 = vdThreadFinishWrite(pDisk);
6710 AssertRC(rc2);
6711 }
6712
6713 if (RT_FAILURE(rc))
6714 {
6715 if (pImage)
6716 {
6717 if (pImage->pszFilename)
6718 RTStrFree(pImage->pszFilename);
6719 RTMemFree(pImage);
6720 }
6721 }
6722
6723 LogFlowFunc(("returns %Rrc\n", rc));
6724 return rc;
6725}
6726
6727/**
6728 * Opens a cache image.
6729 *
6730 * @return VBox status code.
6731 * @param pDisk Pointer to the HDD container which should use the cache image.
6732 * @param pszBackend Name of the cache file backend to use (case insensitive).
6733 * @param pszFilename Name of the cache image to open.
6734 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6735 * @param pVDIfsCache Pointer to the per-cache VD interface list.
6736 */
6737VBOXDDU_DECL(int) VDCacheOpen(PVBOXHDD pDisk, const char *pszBackend,
6738 const char *pszFilename, unsigned uOpenFlags,
6739 PVDINTERFACE pVDIfsCache)
6740{
6741 int rc = VINF_SUCCESS;
6742 int rc2;
6743 bool fLockWrite = false;
6744 PVDCACHE pCache = NULL;
6745
6746 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsCache=%#p\n",
6747 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsCache));
6748
6749 do
6750 {
6751 /* sanity check */
6752 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6753 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6754
6755 /* Check arguments. */
6756 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6757 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6758 rc = VERR_INVALID_PARAMETER);
6759 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6760 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6761 rc = VERR_INVALID_PARAMETER);
6762 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6763 ("uOpenFlags=%#x\n", uOpenFlags),
6764 rc = VERR_INVALID_PARAMETER);
6765
6766 /* Set up image descriptor. */
6767 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
6768 if (!pCache)
6769 {
6770 rc = VERR_NO_MEMORY;
6771 break;
6772 }
6773 pCache->pszFilename = RTStrDup(pszFilename);
6774 if (!pCache->pszFilename)
6775 {
6776 rc = VERR_NO_MEMORY;
6777 break;
6778 }
6779
6780 pCache->VDIo.pDisk = pDisk;
6781 pCache->pVDIfsCache = pVDIfsCache;
6782
6783 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
6784 if (RT_FAILURE(rc))
6785 break;
6786 if (!pCache->Backend)
6787 {
6788 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6789 N_("VD: unknown backend name '%s'"), pszBackend);
6790 break;
6791 }
6792
6793 /* Set up the I/O interface. */
6794 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
6795 if (!pCache->VDIo.pInterfaceIo)
6796 {
6797 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
6798 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6799 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
6800 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
6801 }
6802
6803 /* Set up the internal I/O interface. */
6804 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
6805 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
6806 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6807 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
6808 AssertRC(rc);
6809
6810 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
6811 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6812 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
6813 pDisk->pVDIfsDisk,
6814 pCache->pVDIfsCache,
6815 &pCache->pBackendData);
6816 /* If the open in read-write mode failed, retry in read-only mode. */
6817 if (RT_FAILURE(rc))
6818 {
6819 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6820 && ( rc == VERR_ACCESS_DENIED
6821 || rc == VERR_PERMISSION_DENIED
6822 || rc == VERR_WRITE_PROTECT
6823 || rc == VERR_SHARING_VIOLATION
6824 || rc == VERR_FILE_LOCK_FAILED))
6825 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6826 (uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME)
6827 | VD_OPEN_FLAGS_READONLY,
6828 pDisk->pVDIfsDisk,
6829 pCache->pVDIfsCache,
6830 &pCache->pBackendData);
6831 if (RT_FAILURE(rc))
6832 {
6833 rc = vdError(pDisk, rc, RT_SRC_POS,
6834 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6835 break;
6836 }
6837 }
6838
6839 /* Lock disk for writing, as we modify pDisk information below. */
6840 rc2 = vdThreadStartWrite(pDisk);
6841 AssertRC(rc2);
6842 fLockWrite = true;
6843
6844 /*
6845 * Check that the modification UUID of the cache and last image
6846 * match. If not the image was modified in-between without the cache.
6847 * The cache might contain stale data.
6848 */
6849 RTUUID UuidImage, UuidCache;
6850
6851 rc = pCache->Backend->pfnGetModificationUuid(pCache->pBackendData,
6852 &UuidCache);
6853 if (RT_SUCCESS(rc))
6854 {
6855 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
6856 &UuidImage);
6857 if (RT_SUCCESS(rc))
6858 {
6859 if (RTUuidCompare(&UuidImage, &UuidCache))
6860 rc = VERR_VD_CACHE_NOT_UP_TO_DATE;
6861 }
6862 }
6863
6864 /*
6865 * We assume that the user knows what he is doing if one of the images
6866 * doesn't support the modification uuid.
6867 */
6868 if (rc == VERR_NOT_SUPPORTED)
6869 rc = VINF_SUCCESS;
6870
6871 if (RT_SUCCESS(rc))
6872 {
6873 /* Cache successfully opened, make it the current one. */
6874 if (!pDisk->pCache)
6875 pDisk->pCache = pCache;
6876 else
6877 rc = VERR_VD_CACHE_ALREADY_EXISTS;
6878 }
6879
6880 if (RT_FAILURE(rc))
6881 {
6882 /* Error detected, but image opened. Close image. */
6883 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
6884 AssertRC(rc2);
6885 pCache->pBackendData = NULL;
6886 }
6887 } while (0);
6888
6889 if (RT_UNLIKELY(fLockWrite))
6890 {
6891 rc2 = vdThreadFinishWrite(pDisk);
6892 AssertRC(rc2);
6893 }
6894
6895 if (RT_FAILURE(rc))
6896 {
6897 if (pCache)
6898 {
6899 if (pCache->pszFilename)
6900 RTStrFree(pCache->pszFilename);
6901 RTMemFree(pCache);
6902 }
6903 }
6904
6905 LogFlowFunc(("returns %Rrc\n", rc));
6906 return rc;
6907}
6908
6909VBOXDDU_DECL(int) VDFilterAdd(PVBOXHDD pDisk, const char *pszFilter, uint32_t fFlags,
6910 PVDINTERFACE pVDIfsFilter)
6911{
6912 int rc = VINF_SUCCESS;
6913 int rc2;
6914 bool fLockWrite = false;
6915 PVDFILTER pFilter = NULL;
6916
6917 LogFlowFunc(("pDisk=%#p pszFilter=\"%s\" pVDIfsFilter=%#p\n",
6918 pDisk, pszFilter, pVDIfsFilter));
6919
6920 do
6921 {
6922 /* sanity check */
6923 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6924 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6925
6926 /* Check arguments. */
6927 AssertMsgBreakStmt(VALID_PTR(pszFilter) && *pszFilter,
6928 ("pszFilter=%#p \"%s\"\n", pszFilter, pszFilter),
6929 rc = VERR_INVALID_PARAMETER);
6930
6931 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
6932 ("Invalid flags set (fFlags=%#x)\n", fFlags),
6933 rc = VERR_INVALID_PARAMETER);
6934
6935 /* Set up image descriptor. */
6936 pFilter = (PVDFILTER)RTMemAllocZ(sizeof(VDFILTER));
6937 if (!pFilter)
6938 {
6939 rc = VERR_NO_MEMORY;
6940 break;
6941 }
6942
6943 rc = vdFindFilterBackend(pszFilter, &pFilter->pBackend);
6944 if (RT_FAILURE(rc))
6945 break;
6946 if (!pFilter->pBackend)
6947 {
6948 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6949 N_("VD: unknown filter backend name '%s'"), pszFilter);
6950 break;
6951 }
6952
6953 pFilter->VDIo.pDisk = pDisk;
6954 pFilter->pVDIfsFilter = pVDIfsFilter;
6955
6956 /* Set up the internal I/O interface. */
6957 AssertBreakStmt(!VDIfIoIntGet(pVDIfsFilter), rc = VERR_INVALID_PARAMETER);
6958 vdIfIoIntCallbacksSetup(&pFilter->VDIo.VDIfIoInt);
6959 rc = VDInterfaceAdd(&pFilter->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6960 &pFilter->VDIo, sizeof(VDINTERFACEIOINT), &pFilter->pVDIfsFilter);
6961 AssertRC(rc);
6962
6963 rc = pFilter->pBackend->pfnCreate(pDisk->pVDIfsDisk, fFlags & VD_FILTER_FLAGS_INFO,
6964 pFilter->pVDIfsFilter, &pFilter->pvBackendData);
6965 if (RT_FAILURE(rc))
6966 break;
6967
6968 /* Lock disk for writing, as we modify pDisk information below. */
6969 rc2 = vdThreadStartWrite(pDisk);
6970 AssertRC(rc2);
6971 fLockWrite = true;
6972
6973 /* Add filter to chains. */
6974 if (fFlags & VD_FILTER_FLAGS_WRITE)
6975 {
6976 RTListAppend(&pDisk->ListFilterChainWrite, &pFilter->ListNodeChainWrite);
6977 vdFilterRetain(pFilter);
6978 }
6979
6980 if (fFlags & VD_FILTER_FLAGS_READ)
6981 {
6982 RTListAppend(&pDisk->ListFilterChainRead, &pFilter->ListNodeChainRead);
6983 vdFilterRetain(pFilter);
6984 }
6985 } while (0);
6986
6987 if (RT_UNLIKELY(fLockWrite))
6988 {
6989 rc2 = vdThreadFinishWrite(pDisk);
6990 AssertRC(rc2);
6991 }
6992
6993 if (RT_FAILURE(rc))
6994 {
6995 if (pFilter)
6996 RTMemFree(pFilter);
6997 }
6998
6999 LogFlowFunc(("returns %Rrc\n", rc));
7000 return rc;
7001}
7002
7003/**
7004 * Creates and opens a new base image file.
7005 *
7006 * @returns VBox status code.
7007 * @param pDisk Pointer to HDD container.
7008 * @param pszBackend Name of the image file backend to use.
7009 * @param pszFilename Name of the image file to create.
7010 * @param cbSize Image size in bytes.
7011 * @param uImageFlags Flags specifying special image features.
7012 * @param pszComment Pointer to image comment. NULL is ok.
7013 * @param pPCHSGeometry Pointer to physical disk geometry <= (16383,16,63). Not NULL.
7014 * @param pLCHSGeometry Pointer to logical disk geometry <= (x,255,63). Not NULL.
7015 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7016 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7017 * @param pVDIfsImage Pointer to the per-image VD interface list.
7018 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7019 */
7020VBOXDDU_DECL(int) VDCreateBase(PVBOXHDD pDisk, const char *pszBackend,
7021 const char *pszFilename, uint64_t cbSize,
7022 unsigned uImageFlags, const char *pszComment,
7023 PCVDGEOMETRY pPCHSGeometry,
7024 PCVDGEOMETRY pLCHSGeometry,
7025 PCRTUUID pUuid, unsigned uOpenFlags,
7026 PVDINTERFACE pVDIfsImage,
7027 PVDINTERFACE pVDIfsOperation)
7028{
7029 int rc = VINF_SUCCESS;
7030 int rc2;
7031 bool fLockWrite = false, fLockRead = false;
7032 PVDIMAGE pImage = NULL;
7033 RTUUID uuid;
7034
7035 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" PCHS=%u/%u/%u LCHS=%u/%u/%u Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7036 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment,
7037 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7038 pPCHSGeometry->cSectors, pLCHSGeometry->cCylinders,
7039 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors, pUuid,
7040 uOpenFlags, pVDIfsImage, pVDIfsOperation));
7041
7042 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7043
7044 do
7045 {
7046 /* sanity check */
7047 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7048 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7049
7050 /* Check arguments. */
7051 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7052 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7053 rc = VERR_INVALID_PARAMETER);
7054 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7055 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7056 rc = VERR_INVALID_PARAMETER);
7057 AssertMsgBreakStmt(cbSize,
7058 ("cbSize=%llu\n", cbSize),
7059 rc = VERR_INVALID_PARAMETER);
7060 if (cbSize % 512)
7061 {
7062 rc = vdError(pDisk, VERR_VD_INVALID_SIZE, RT_SRC_POS,
7063 N_("VD: The given disk size %llu is not aligned on a sector boundary (512 bytes)"), cbSize);
7064 break;
7065 }
7066 AssertMsgBreakStmt( ((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0)
7067 || ((uImageFlags & (VD_IMAGE_FLAGS_FIXED | VD_IMAGE_FLAGS_DIFF)) != VD_IMAGE_FLAGS_FIXED),
7068 ("uImageFlags=%#x\n", uImageFlags),
7069 rc = VERR_INVALID_PARAMETER);
7070 /* The PCHS geometry fields may be 0 to leave it for later. */
7071 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
7072 && pPCHSGeometry->cHeads <= 16
7073 && pPCHSGeometry->cSectors <= 63,
7074 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
7075 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7076 pPCHSGeometry->cSectors),
7077 rc = VERR_INVALID_PARAMETER);
7078 /* The LCHS geometry fields may be 0 to leave it to later autodetection. */
7079 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
7080 && pLCHSGeometry->cHeads <= 255
7081 && pLCHSGeometry->cSectors <= 63,
7082 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
7083 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
7084 pLCHSGeometry->cSectors),
7085 rc = VERR_INVALID_PARAMETER);
7086 /* The UUID may be NULL. */
7087 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7088 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7089 rc = VERR_INVALID_PARAMETER);
7090 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7091 ("uOpenFlags=%#x\n", uOpenFlags),
7092 rc = VERR_INVALID_PARAMETER);
7093
7094 /* Check state. Needs a temporary read lock. Holding the write lock
7095 * all the time would be blocking other activities for too long. */
7096 rc2 = vdThreadStartRead(pDisk);
7097 AssertRC(rc2);
7098 fLockRead = true;
7099 AssertMsgBreakStmt(pDisk->cImages == 0,
7100 ("Create base image cannot be done with other images open\n"),
7101 rc = VERR_VD_INVALID_STATE);
7102 rc2 = vdThreadFinishRead(pDisk);
7103 AssertRC(rc2);
7104 fLockRead = false;
7105
7106 /* Set up image descriptor. */
7107 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7108 if (!pImage)
7109 {
7110 rc = VERR_NO_MEMORY;
7111 break;
7112 }
7113 pImage->pszFilename = RTStrDup(pszFilename);
7114 if (!pImage->pszFilename)
7115 {
7116 rc = VERR_NO_MEMORY;
7117 break;
7118 }
7119 pImage->VDIo.pDisk = pDisk;
7120 pImage->pVDIfsImage = pVDIfsImage;
7121
7122 /* Set up the I/O interface. */
7123 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7124 if (!pImage->VDIo.pInterfaceIo)
7125 {
7126 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7127 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7128 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7129 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7130 }
7131
7132 /* Set up the internal I/O interface. */
7133 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7134 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7135 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7136 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7137 AssertRC(rc);
7138
7139 rc = vdFindBackend(pszBackend, &pImage->Backend);
7140 if (RT_FAILURE(rc))
7141 break;
7142 if (!pImage->Backend)
7143 {
7144 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7145 N_("VD: unknown backend name '%s'"), pszBackend);
7146 break;
7147 }
7148 if (!(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7149 | VD_CAP_CREATE_DYNAMIC)))
7150 {
7151 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7152 N_("VD: backend '%s' cannot create base images"), pszBackend);
7153 break;
7154 }
7155 if ( ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7156 && !(pImage->Backend->uBackendCaps & VD_CAP_CREATE_SPLIT_2G))
7157 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7158 && RTStrICmp(pszBackend, "VMDK")))
7159 {
7160 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7161 N_("VD: backend '%s' does not support the selected image variant"), pszBackend);
7162 break;
7163 }
7164
7165 /* Create UUID if the caller didn't specify one. */
7166 if (!pUuid)
7167 {
7168 rc = RTUuidCreate(&uuid);
7169 if (RT_FAILURE(rc))
7170 {
7171 rc = vdError(pDisk, rc, RT_SRC_POS,
7172 N_("VD: cannot generate UUID for image '%s'"),
7173 pszFilename);
7174 break;
7175 }
7176 pUuid = &uuid;
7177 }
7178
7179 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7180 uImageFlags &= ~VD_IMAGE_FLAGS_DIFF;
7181 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7182 rc = pImage->Backend->pfnCreate(pImage->pszFilename, cbSize,
7183 uImageFlags, pszComment, pPCHSGeometry,
7184 pLCHSGeometry, pUuid,
7185 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7186 0, 99,
7187 pDisk->pVDIfsDisk,
7188 pImage->pVDIfsImage,
7189 pVDIfsOperation,
7190 pDisk->enmType,
7191 &pImage->pBackendData);
7192
7193 if (RT_SUCCESS(rc))
7194 {
7195 pImage->VDIo.pBackendData = pImage->pBackendData;
7196 pImage->uImageFlags = uImageFlags;
7197
7198 /* Force sane optimization settings. It's not worth avoiding writes
7199 * to fixed size images. The overhead would have almost no payback. */
7200 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
7201 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
7202
7203 /* Lock disk for writing, as we modify pDisk information below. */
7204 rc2 = vdThreadStartWrite(pDisk);
7205 AssertRC(rc2);
7206 fLockWrite = true;
7207
7208 /** @todo optionally check UUIDs */
7209
7210 /* Re-check state, as the lock wasn't held and another image
7211 * creation call could have been done by another thread. */
7212 AssertMsgStmt(pDisk->cImages == 0,
7213 ("Create base image cannot be done with other images open\n"),
7214 rc = VERR_VD_INVALID_STATE);
7215 }
7216
7217 if (RT_SUCCESS(rc))
7218 {
7219 /* Cache disk information. */
7220 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
7221
7222 /* Cache PCHS geometry. */
7223 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
7224 &pDisk->PCHSGeometry);
7225 if (RT_FAILURE(rc2))
7226 {
7227 pDisk->PCHSGeometry.cCylinders = 0;
7228 pDisk->PCHSGeometry.cHeads = 0;
7229 pDisk->PCHSGeometry.cSectors = 0;
7230 }
7231 else
7232 {
7233 /* Make sure the CHS geometry is properly clipped. */
7234 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
7235 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
7236 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
7237 }
7238
7239 /* Cache LCHS geometry. */
7240 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
7241 &pDisk->LCHSGeometry);
7242 if (RT_FAILURE(rc2))
7243 {
7244 pDisk->LCHSGeometry.cCylinders = 0;
7245 pDisk->LCHSGeometry.cHeads = 0;
7246 pDisk->LCHSGeometry.cSectors = 0;
7247 }
7248 else
7249 {
7250 /* Make sure the CHS geometry is properly clipped. */
7251 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
7252 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
7253 }
7254
7255 /* Image successfully opened, make it the last image. */
7256 vdAddImageToList(pDisk, pImage);
7257 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7258 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7259 }
7260 else
7261 {
7262 /* Error detected, image may or may not be opened. Close and delete
7263 * image if it was opened. */
7264 if (pImage->pBackendData)
7265 {
7266 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7267 AssertRC(rc2);
7268 pImage->pBackendData = NULL;
7269 }
7270 }
7271 } while (0);
7272
7273 if (RT_UNLIKELY(fLockWrite))
7274 {
7275 rc2 = vdThreadFinishWrite(pDisk);
7276 AssertRC(rc2);
7277 }
7278 else if (RT_UNLIKELY(fLockRead))
7279 {
7280 rc2 = vdThreadFinishRead(pDisk);
7281 AssertRC(rc2);
7282 }
7283
7284 if (RT_FAILURE(rc))
7285 {
7286 if (pImage)
7287 {
7288 if (pImage->pszFilename)
7289 RTStrFree(pImage->pszFilename);
7290 RTMemFree(pImage);
7291 }
7292 }
7293
7294 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7295 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7296
7297 LogFlowFunc(("returns %Rrc\n", rc));
7298 return rc;
7299}
7300
7301/**
7302 * Creates and opens a new differencing image file in HDD container.
7303 * See comments for VDOpen function about differencing images.
7304 *
7305 * @returns VBox status code.
7306 * @param pDisk Pointer to HDD container.
7307 * @param pszBackend Name of the image file backend to use.
7308 * @param pszFilename Name of the differencing image file to create.
7309 * @param uImageFlags Flags specifying special image features.
7310 * @param pszComment Pointer to image comment. NULL is ok.
7311 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7312 * @param pParentUuid New parent UUID of the image. If NULL, the UUID is queried automatically.
7313 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7314 * @param pVDIfsImage Pointer to the per-image VD interface list.
7315 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7316 */
7317VBOXDDU_DECL(int) VDCreateDiff(PVBOXHDD pDisk, const char *pszBackend,
7318 const char *pszFilename, unsigned uImageFlags,
7319 const char *pszComment, PCRTUUID pUuid,
7320 PCRTUUID pParentUuid, unsigned uOpenFlags,
7321 PVDINTERFACE pVDIfsImage,
7322 PVDINTERFACE pVDIfsOperation)
7323{
7324 int rc = VINF_SUCCESS;
7325 int rc2;
7326 bool fLockWrite = false, fLockRead = false;
7327 PVDIMAGE pImage = NULL;
7328 RTUUID uuid;
7329
7330 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7331 pDisk, pszBackend, pszFilename, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsImage, pVDIfsOperation));
7332
7333 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7334
7335 do
7336 {
7337 /* sanity check */
7338 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7339 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7340
7341 /* Check arguments. */
7342 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7343 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7344 rc = VERR_INVALID_PARAMETER);
7345 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7346 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7347 rc = VERR_INVALID_PARAMETER);
7348 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7349 ("uImageFlags=%#x\n", uImageFlags),
7350 rc = VERR_INVALID_PARAMETER);
7351 /* The UUID may be NULL. */
7352 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7353 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7354 rc = VERR_INVALID_PARAMETER);
7355 /* The parent UUID may be NULL. */
7356 AssertMsgBreakStmt(pParentUuid == NULL || VALID_PTR(pParentUuid),
7357 ("pParentUuid=%#p ParentUUID=%RTuuid\n", pParentUuid, pParentUuid),
7358 rc = VERR_INVALID_PARAMETER);
7359 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7360 ("uOpenFlags=%#x\n", uOpenFlags),
7361 rc = VERR_INVALID_PARAMETER);
7362
7363 /* Check state. Needs a temporary read lock. Holding the write lock
7364 * all the time would be blocking other activities for too long. */
7365 rc2 = vdThreadStartRead(pDisk);
7366 AssertRC(rc2);
7367 fLockRead = true;
7368 AssertMsgBreakStmt(pDisk->cImages != 0,
7369 ("Create diff image cannot be done without other images open\n"),
7370 rc = VERR_VD_INVALID_STATE);
7371 rc2 = vdThreadFinishRead(pDisk);
7372 AssertRC(rc2);
7373 fLockRead = false;
7374
7375 /*
7376 * Destroy the current discard state first which might still have pending blocks
7377 * for the currently opened image which will be switched to readonly mode.
7378 */
7379 /* Lock disk for writing, as we modify pDisk information below. */
7380 rc2 = vdThreadStartWrite(pDisk);
7381 AssertRC(rc2);
7382 fLockWrite = true;
7383 rc = vdDiscardStateDestroy(pDisk);
7384 if (RT_FAILURE(rc))
7385 break;
7386 rc2 = vdThreadFinishWrite(pDisk);
7387 AssertRC(rc2);
7388 fLockWrite = false;
7389
7390 /* Set up image descriptor. */
7391 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7392 if (!pImage)
7393 {
7394 rc = VERR_NO_MEMORY;
7395 break;
7396 }
7397 pImage->pszFilename = RTStrDup(pszFilename);
7398 if (!pImage->pszFilename)
7399 {
7400 rc = VERR_NO_MEMORY;
7401 break;
7402 }
7403
7404 rc = vdFindBackend(pszBackend, &pImage->Backend);
7405 if (RT_FAILURE(rc))
7406 break;
7407 if (!pImage->Backend)
7408 {
7409 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7410 N_("VD: unknown backend name '%s'"), pszBackend);
7411 break;
7412 }
7413 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DIFF)
7414 || !(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7415 | VD_CAP_CREATE_DYNAMIC)))
7416 {
7417 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7418 N_("VD: backend '%s' cannot create diff images"), pszBackend);
7419 break;
7420 }
7421
7422 pImage->VDIo.pDisk = pDisk;
7423 pImage->pVDIfsImage = pVDIfsImage;
7424
7425 /* Set up the I/O interface. */
7426 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7427 if (!pImage->VDIo.pInterfaceIo)
7428 {
7429 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7430 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7431 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7432 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7433 }
7434
7435 /* Set up the internal I/O interface. */
7436 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7437 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7438 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7439 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7440 AssertRC(rc);
7441
7442 /* Create UUID if the caller didn't specify one. */
7443 if (!pUuid)
7444 {
7445 rc = RTUuidCreate(&uuid);
7446 if (RT_FAILURE(rc))
7447 {
7448 rc = vdError(pDisk, rc, RT_SRC_POS,
7449 N_("VD: cannot generate UUID for image '%s'"),
7450 pszFilename);
7451 break;
7452 }
7453 pUuid = &uuid;
7454 }
7455
7456 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7457 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7458 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
7459 rc = pImage->Backend->pfnCreate(pImage->pszFilename, pDisk->cbSize,
7460 uImageFlags | VD_IMAGE_FLAGS_DIFF,
7461 pszComment, &pDisk->PCHSGeometry,
7462 &pDisk->LCHSGeometry, pUuid,
7463 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7464 0, 99,
7465 pDisk->pVDIfsDisk,
7466 pImage->pVDIfsImage,
7467 pVDIfsOperation,
7468 pDisk->enmType,
7469 &pImage->pBackendData);
7470
7471 if (RT_SUCCESS(rc))
7472 {
7473 pImage->VDIo.pBackendData = pImage->pBackendData;
7474 pImage->uImageFlags = uImageFlags;
7475
7476 /* Lock disk for writing, as we modify pDisk information below. */
7477 rc2 = vdThreadStartWrite(pDisk);
7478 AssertRC(rc2);
7479 fLockWrite = true;
7480
7481 /* Switch previous image to read-only mode. */
7482 unsigned uOpenFlagsPrevImg;
7483 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
7484 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
7485 {
7486 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
7487 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
7488 }
7489
7490 /** @todo optionally check UUIDs */
7491
7492 /* Re-check state, as the lock wasn't held and another image
7493 * creation call could have been done by another thread. */
7494 AssertMsgStmt(pDisk->cImages != 0,
7495 ("Create diff image cannot be done without other images open\n"),
7496 rc = VERR_VD_INVALID_STATE);
7497 }
7498
7499 if (RT_SUCCESS(rc))
7500 {
7501 RTUUID Uuid;
7502 RTTIMESPEC ts;
7503
7504 if (pParentUuid && !RTUuidIsNull(pParentUuid))
7505 {
7506 Uuid = *pParentUuid;
7507 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7508 }
7509 else
7510 {
7511 rc2 = pDisk->pLast->Backend->pfnGetUuid(pDisk->pLast->pBackendData,
7512 &Uuid);
7513 if (RT_SUCCESS(rc2))
7514 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7515 }
7516 rc2 = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7517 &Uuid);
7518 if (RT_SUCCESS(rc2))
7519 pImage->Backend->pfnSetParentModificationUuid(pImage->pBackendData,
7520 &Uuid);
7521 if (pDisk->pLast->Backend->pfnGetTimestamp)
7522 rc2 = pDisk->pLast->Backend->pfnGetTimestamp(pDisk->pLast->pBackendData,
7523 &ts);
7524 else
7525 rc2 = VERR_NOT_IMPLEMENTED;
7526 if (RT_SUCCESS(rc2) && pImage->Backend->pfnSetParentTimestamp)
7527 pImage->Backend->pfnSetParentTimestamp(pImage->pBackendData, &ts);
7528
7529 if (pImage->Backend->pfnSetParentFilename)
7530 rc2 = pImage->Backend->pfnSetParentFilename(pImage->pBackendData, pDisk->pLast->pszFilename);
7531 }
7532
7533 if (RT_SUCCESS(rc))
7534 {
7535 /* Image successfully opened, make it the last image. */
7536 vdAddImageToList(pDisk, pImage);
7537 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7538 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7539 }
7540 else
7541 {
7542 /* Error detected, but image opened. Close and delete image. */
7543 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7544 AssertRC(rc2);
7545 pImage->pBackendData = NULL;
7546 }
7547 } while (0);
7548
7549 if (RT_UNLIKELY(fLockWrite))
7550 {
7551 rc2 = vdThreadFinishWrite(pDisk);
7552 AssertRC(rc2);
7553 }
7554 else if (RT_UNLIKELY(fLockRead))
7555 {
7556 rc2 = vdThreadFinishRead(pDisk);
7557 AssertRC(rc2);
7558 }
7559
7560 if (RT_FAILURE(rc))
7561 {
7562 if (pImage)
7563 {
7564 if (pImage->pszFilename)
7565 RTStrFree(pImage->pszFilename);
7566 RTMemFree(pImage);
7567 }
7568 }
7569
7570 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7571 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7572
7573 LogFlowFunc(("returns %Rrc\n", rc));
7574 return rc;
7575}
7576
7577
7578/**
7579 * Creates and opens new cache image file in HDD container.
7580 *
7581 * @return VBox status code.
7582 * @param pDisk Name of the cache file backend to use (case insensitive).
7583 * @param pszFilename Name of the differencing cache file to create.
7584 * @param cbSize Maximum size of the cache.
7585 * @param uImageFlags Flags specifying special cache features.
7586 * @param pszComment Pointer to image comment. NULL is ok.
7587 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7588 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7589 * @param pVDIfsCache Pointer to the per-cache VD interface list.
7590 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7591 */
7592VBOXDDU_DECL(int) VDCreateCache(PVBOXHDD pDisk, const char *pszBackend,
7593 const char *pszFilename, uint64_t cbSize,
7594 unsigned uImageFlags, const char *pszComment,
7595 PCRTUUID pUuid, unsigned uOpenFlags,
7596 PVDINTERFACE pVDIfsCache, PVDINTERFACE pVDIfsOperation)
7597{
7598 int rc = VINF_SUCCESS;
7599 int rc2;
7600 bool fLockWrite = false, fLockRead = false;
7601 PVDCACHE pCache = NULL;
7602 RTUUID uuid;
7603
7604 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7605 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsCache, pVDIfsOperation));
7606
7607 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7608
7609 do
7610 {
7611 /* sanity check */
7612 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7613 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7614
7615 /* Check arguments. */
7616 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7617 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7618 rc = VERR_INVALID_PARAMETER);
7619 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7620 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7621 rc = VERR_INVALID_PARAMETER);
7622 AssertMsgBreakStmt(cbSize,
7623 ("cbSize=%llu\n", cbSize),
7624 rc = VERR_INVALID_PARAMETER);
7625 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7626 ("uImageFlags=%#x\n", uImageFlags),
7627 rc = VERR_INVALID_PARAMETER);
7628 /* The UUID may be NULL. */
7629 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7630 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7631 rc = VERR_INVALID_PARAMETER);
7632 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7633 ("uOpenFlags=%#x\n", uOpenFlags),
7634 rc = VERR_INVALID_PARAMETER);
7635
7636 /* Check state. Needs a temporary read lock. Holding the write lock
7637 * all the time would be blocking other activities for too long. */
7638 rc2 = vdThreadStartRead(pDisk);
7639 AssertRC(rc2);
7640 fLockRead = true;
7641 AssertMsgBreakStmt(!pDisk->pCache,
7642 ("Create cache image cannot be done with a cache already attached\n"),
7643 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7644 rc2 = vdThreadFinishRead(pDisk);
7645 AssertRC(rc2);
7646 fLockRead = false;
7647
7648 /* Set up image descriptor. */
7649 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
7650 if (!pCache)
7651 {
7652 rc = VERR_NO_MEMORY;
7653 break;
7654 }
7655 pCache->pszFilename = RTStrDup(pszFilename);
7656 if (!pCache->pszFilename)
7657 {
7658 rc = VERR_NO_MEMORY;
7659 break;
7660 }
7661
7662 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
7663 if (RT_FAILURE(rc))
7664 break;
7665 if (!pCache->Backend)
7666 {
7667 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7668 N_("VD: unknown backend name '%s'"), pszBackend);
7669 break;
7670 }
7671
7672 pCache->VDIo.pDisk = pDisk;
7673 pCache->pVDIfsCache = pVDIfsCache;
7674
7675 /* Set up the I/O interface. */
7676 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
7677 if (!pCache->VDIo.pInterfaceIo)
7678 {
7679 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
7680 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7681 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
7682 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
7683 }
7684
7685 /* Set up the internal I/O interface. */
7686 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
7687 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
7688 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7689 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
7690 AssertRC(rc);
7691
7692 /* Create UUID if the caller didn't specify one. */
7693 if (!pUuid)
7694 {
7695 rc = RTUuidCreate(&uuid);
7696 if (RT_FAILURE(rc))
7697 {
7698 rc = vdError(pDisk, rc, RT_SRC_POS,
7699 N_("VD: cannot generate UUID for image '%s'"),
7700 pszFilename);
7701 break;
7702 }
7703 pUuid = &uuid;
7704 }
7705
7706 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7707 pCache->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7708 rc = pCache->Backend->pfnCreate(pCache->pszFilename, cbSize,
7709 uImageFlags,
7710 pszComment, pUuid,
7711 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7712 0, 99,
7713 pDisk->pVDIfsDisk,
7714 pCache->pVDIfsCache,
7715 pVDIfsOperation,
7716 &pCache->pBackendData);
7717
7718 if (RT_SUCCESS(rc))
7719 {
7720 /* Lock disk for writing, as we modify pDisk information below. */
7721 rc2 = vdThreadStartWrite(pDisk);
7722 AssertRC(rc2);
7723 fLockWrite = true;
7724
7725 pCache->VDIo.pBackendData = pCache->pBackendData;
7726
7727 /* Re-check state, as the lock wasn't held and another image
7728 * creation call could have been done by another thread. */
7729 AssertMsgStmt(!pDisk->pCache,
7730 ("Create cache image cannot be done with another cache open\n"),
7731 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7732 }
7733
7734 if ( RT_SUCCESS(rc)
7735 && pDisk->pLast)
7736 {
7737 RTUUID UuidModification;
7738
7739 /* Set same modification Uuid as the last image. */
7740 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7741 &UuidModification);
7742 if (RT_SUCCESS(rc))
7743 {
7744 rc = pCache->Backend->pfnSetModificationUuid(pCache->pBackendData,
7745 &UuidModification);
7746 }
7747
7748 if (rc == VERR_NOT_SUPPORTED)
7749 rc = VINF_SUCCESS;
7750 }
7751
7752 if (RT_SUCCESS(rc))
7753 {
7754 /* Cache successfully created. */
7755 pDisk->pCache = pCache;
7756 }
7757 else
7758 {
7759 /* Error detected, but image opened. Close and delete image. */
7760 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, true);
7761 AssertRC(rc2);
7762 pCache->pBackendData = NULL;
7763 }
7764 } while (0);
7765
7766 if (RT_UNLIKELY(fLockWrite))
7767 {
7768 rc2 = vdThreadFinishWrite(pDisk);
7769 AssertRC(rc2);
7770 }
7771 else if (RT_UNLIKELY(fLockRead))
7772 {
7773 rc2 = vdThreadFinishRead(pDisk);
7774 AssertRC(rc2);
7775 }
7776
7777 if (RT_FAILURE(rc))
7778 {
7779 if (pCache)
7780 {
7781 if (pCache->pszFilename)
7782 RTStrFree(pCache->pszFilename);
7783 RTMemFree(pCache);
7784 }
7785 }
7786
7787 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7788 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7789
7790 LogFlowFunc(("returns %Rrc\n", rc));
7791 return rc;
7792}
7793
7794/**
7795 * Merges two images (not necessarily with direct parent/child relationship).
7796 * As a side effect the source image and potentially the other images which
7797 * are also merged to the destination are deleted from both the disk and the
7798 * images in the HDD container.
7799 *
7800 * @returns VBox status code.
7801 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
7802 * @param pDisk Pointer to HDD container.
7803 * @param nImageFrom Name of the image file to merge from.
7804 * @param nImageTo Name of the image file to merge to.
7805 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7806 */
7807VBOXDDU_DECL(int) VDMerge(PVBOXHDD pDisk, unsigned nImageFrom,
7808 unsigned nImageTo, PVDINTERFACE pVDIfsOperation)
7809{
7810 int rc = VINF_SUCCESS;
7811 int rc2;
7812 bool fLockWrite = false, fLockRead = false;
7813 void *pvBuf = NULL;
7814
7815 LogFlowFunc(("pDisk=%#p nImageFrom=%u nImageTo=%u pVDIfsOperation=%#p\n",
7816 pDisk, nImageFrom, nImageTo, pVDIfsOperation));
7817
7818 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7819
7820 do
7821 {
7822 /* sanity check */
7823 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7824 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7825
7826 /* For simplicity reasons lock for writing as the image reopen below
7827 * might need it. After all the reopen is usually needed. */
7828 rc2 = vdThreadStartWrite(pDisk);
7829 AssertRC(rc2);
7830 fLockWrite = true;
7831 PVDIMAGE pImageFrom = vdGetImageByNumber(pDisk, nImageFrom);
7832 PVDIMAGE pImageTo = vdGetImageByNumber(pDisk, nImageTo);
7833 if (!pImageFrom || !pImageTo)
7834 {
7835 rc = VERR_VD_IMAGE_NOT_FOUND;
7836 break;
7837 }
7838 AssertBreakStmt(pImageFrom != pImageTo, rc = VERR_INVALID_PARAMETER);
7839
7840 /* Make sure destination image is writable. */
7841 unsigned uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
7842 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7843 {
7844 /*
7845 * Clear skip consistency checks because the image is made writable now and
7846 * skipping consistency checks is only possible for readonly images.
7847 */
7848 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
7849 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
7850 uOpenFlags);
7851 if (RT_FAILURE(rc))
7852 break;
7853 }
7854
7855 /* Get size of destination image. */
7856 uint64_t cbSize = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
7857 rc2 = vdThreadFinishWrite(pDisk);
7858 AssertRC(rc2);
7859 fLockWrite = false;
7860
7861 /* Allocate tmp buffer. */
7862 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
7863 if (!pvBuf)
7864 {
7865 rc = VERR_NO_MEMORY;
7866 break;
7867 }
7868
7869 /* Merging is done directly on the images itself. This potentially
7870 * causes trouble if the disk is full in the middle of operation. */
7871 if (nImageFrom < nImageTo)
7872 {
7873 /* Merge parent state into child. This means writing all not
7874 * allocated blocks in the destination image which are allocated in
7875 * the images to be merged. */
7876 uint64_t uOffset = 0;
7877 uint64_t cbRemaining = cbSize;
7878 do
7879 {
7880 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7881 RTSGSEG SegmentBuf;
7882 RTSGBUF SgBuf;
7883 VDIOCTX IoCtx;
7884
7885 SegmentBuf.pvSeg = pvBuf;
7886 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7887 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7888 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7889 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7890
7891 /* Need to hold the write lock during a read-write operation. */
7892 rc2 = vdThreadStartWrite(pDisk);
7893 AssertRC(rc2);
7894 fLockWrite = true;
7895
7896 rc = pImageTo->Backend->pfnRead(pImageTo->pBackendData,
7897 uOffset, cbThisRead,
7898 &IoCtx, &cbThisRead);
7899 if (rc == VERR_VD_BLOCK_FREE)
7900 {
7901 /* Search for image with allocated block. Do not attempt to
7902 * read more than the previous reads marked as valid.
7903 * Otherwise this would return stale data when different
7904 * block sizes are used for the images. */
7905 for (PVDIMAGE pCurrImage = pImageTo->pPrev;
7906 pCurrImage != NULL && pCurrImage != pImageFrom->pPrev && rc == VERR_VD_BLOCK_FREE;
7907 pCurrImage = pCurrImage->pPrev)
7908 {
7909 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7910 uOffset, cbThisRead,
7911 &IoCtx, &cbThisRead);
7912 }
7913
7914 if (rc != VERR_VD_BLOCK_FREE)
7915 {
7916 if (RT_FAILURE(rc))
7917 break;
7918 /* Updating the cache is required because this might be a live merge. */
7919 rc = vdWriteHelperEx(pDisk, pImageTo, pImageFrom->pPrev,
7920 uOffset, pvBuf, cbThisRead,
7921 VDIOCTX_FLAGS_READ_UPDATE_CACHE, 0);
7922 if (RT_FAILURE(rc))
7923 break;
7924 }
7925 else
7926 rc = VINF_SUCCESS;
7927 }
7928 else if (RT_FAILURE(rc))
7929 break;
7930
7931 rc2 = vdThreadFinishWrite(pDisk);
7932 AssertRC(rc2);
7933 fLockWrite = false;
7934
7935 uOffset += cbThisRead;
7936 cbRemaining -= cbThisRead;
7937
7938 if (pIfProgress && pIfProgress->pfnProgress)
7939 {
7940 /** @todo r=klaus: this can update the progress to the same
7941 * percentage over and over again if the image format makes
7942 * relatively small increments. */
7943 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
7944 uOffset * 99 / cbSize);
7945 if (RT_FAILURE(rc))
7946 break;
7947 }
7948 } while (uOffset < cbSize);
7949 }
7950 else
7951 {
7952 /*
7953 * We may need to update the parent uuid of the child coming after
7954 * the last image to be merged. We have to reopen it read/write.
7955 *
7956 * This is done before we do the actual merge to prevent an
7957 * inconsistent chain if the mode change fails for some reason.
7958 */
7959 if (pImageFrom->pNext)
7960 {
7961 PVDIMAGE pImageChild = pImageFrom->pNext;
7962
7963 /* Take the write lock. */
7964 rc2 = vdThreadStartWrite(pDisk);
7965 AssertRC(rc2);
7966 fLockWrite = true;
7967
7968 /* We need to open the image in read/write mode. */
7969 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
7970
7971 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7972 {
7973 uOpenFlags &= ~VD_OPEN_FLAGS_READONLY;
7974 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
7975 uOpenFlags);
7976 if (RT_FAILURE(rc))
7977 break;
7978 }
7979
7980 rc2 = vdThreadFinishWrite(pDisk);
7981 AssertRC(rc2);
7982 fLockWrite = false;
7983 }
7984
7985 /* If the merge is from the last image we have to relay all writes
7986 * to the merge destination as well, so that concurrent writes
7987 * (in case of a live merge) are handled correctly. */
7988 if (!pImageFrom->pNext)
7989 {
7990 /* Take the write lock. */
7991 rc2 = vdThreadStartWrite(pDisk);
7992 AssertRC(rc2);
7993 fLockWrite = true;
7994
7995 pDisk->pImageRelay = pImageTo;
7996
7997 rc2 = vdThreadFinishWrite(pDisk);
7998 AssertRC(rc2);
7999 fLockWrite = false;
8000 }
8001
8002 /* Merge child state into parent. This means writing all blocks
8003 * which are allocated in the image up to the source image to the
8004 * destination image. */
8005 uint64_t uOffset = 0;
8006 uint64_t cbRemaining = cbSize;
8007 do
8008 {
8009 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8010 RTSGSEG SegmentBuf;
8011 RTSGBUF SgBuf;
8012 VDIOCTX IoCtx;
8013
8014 rc = VERR_VD_BLOCK_FREE;
8015
8016 SegmentBuf.pvSeg = pvBuf;
8017 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8018 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8019 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8020 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8021
8022 /* Need to hold the write lock during a read-write operation. */
8023 rc2 = vdThreadStartWrite(pDisk);
8024 AssertRC(rc2);
8025 fLockWrite = true;
8026
8027 /* Search for image with allocated block. Do not attempt to
8028 * read more than the previous reads marked as valid. Otherwise
8029 * this would return stale data when different block sizes are
8030 * used for the images. */
8031 for (PVDIMAGE pCurrImage = pImageFrom;
8032 pCurrImage != NULL && pCurrImage != pImageTo && rc == VERR_VD_BLOCK_FREE;
8033 pCurrImage = pCurrImage->pPrev)
8034 {
8035 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
8036 uOffset, cbThisRead,
8037 &IoCtx, &cbThisRead);
8038 }
8039
8040 if (rc != VERR_VD_BLOCK_FREE)
8041 {
8042 if (RT_FAILURE(rc))
8043 break;
8044 rc = vdWriteHelper(pDisk, pImageTo, uOffset, pvBuf,
8045 cbThisRead, VDIOCTX_FLAGS_READ_UPDATE_CACHE);
8046 if (RT_FAILURE(rc))
8047 break;
8048 }
8049 else
8050 rc = VINF_SUCCESS;
8051
8052 rc2 = vdThreadFinishWrite(pDisk);
8053 AssertRC(rc2);
8054 fLockWrite = false;
8055
8056 uOffset += cbThisRead;
8057 cbRemaining -= cbThisRead;
8058
8059 if (pIfProgress && pIfProgress->pfnProgress)
8060 {
8061 /** @todo r=klaus: this can update the progress to the same
8062 * percentage over and over again if the image format makes
8063 * relatively small increments. */
8064 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8065 uOffset * 99 / cbSize);
8066 if (RT_FAILURE(rc))
8067 break;
8068 }
8069 } while (uOffset < cbSize);
8070
8071 /* In case we set up a "write proxy" image above we must clear
8072 * this again now to prevent stray writes. Failure or not. */
8073 if (!pImageFrom->pNext)
8074 {
8075 /* Take the write lock. */
8076 rc2 = vdThreadStartWrite(pDisk);
8077 AssertRC(rc2);
8078 fLockWrite = true;
8079
8080 pDisk->pImageRelay = NULL;
8081
8082 rc2 = vdThreadFinishWrite(pDisk);
8083 AssertRC(rc2);
8084 fLockWrite = false;
8085 }
8086 }
8087
8088 /*
8089 * Leave in case of an error to avoid corrupted data in the image chain
8090 * (includes cancelling the operation by the user).
8091 */
8092 if (RT_FAILURE(rc))
8093 break;
8094
8095 /* Need to hold the write lock while finishing the merge. */
8096 rc2 = vdThreadStartWrite(pDisk);
8097 AssertRC(rc2);
8098 fLockWrite = true;
8099
8100 /* Update parent UUID so that image chain is consistent.
8101 * The two attempts work around the problem that some backends
8102 * (e.g. iSCSI) do not support UUIDs, so we exploit the fact that
8103 * so far there can only be one such image in the chain. */
8104 /** @todo needs a better long-term solution, passing the UUID
8105 * knowledge from the caller or some such */
8106 RTUUID Uuid;
8107 PVDIMAGE pImageChild = NULL;
8108 if (nImageFrom < nImageTo)
8109 {
8110 if (pImageFrom->pPrev)
8111 {
8112 /* plan A: ask the parent itself for its UUID */
8113 rc = pImageFrom->pPrev->Backend->pfnGetUuid(pImageFrom->pPrev->pBackendData,
8114 &Uuid);
8115 if (RT_FAILURE(rc))
8116 {
8117 /* plan B: ask the child of the parent for parent UUID */
8118 rc = pImageFrom->Backend->pfnGetParentUuid(pImageFrom->pBackendData,
8119 &Uuid);
8120 }
8121 AssertRC(rc);
8122 }
8123 else
8124 RTUuidClear(&Uuid);
8125 rc = pImageTo->Backend->pfnSetParentUuid(pImageTo->pBackendData,
8126 &Uuid);
8127 AssertRC(rc);
8128 }
8129 else
8130 {
8131 /* Update the parent uuid of the child of the last merged image. */
8132 if (pImageFrom->pNext)
8133 {
8134 /* plan A: ask the parent itself for its UUID */
8135 rc = pImageTo->Backend->pfnGetUuid(pImageTo->pBackendData,
8136 &Uuid);
8137 if (RT_FAILURE(rc))
8138 {
8139 /* plan B: ask the child of the parent for parent UUID */
8140 rc = pImageTo->pNext->Backend->pfnGetParentUuid(pImageTo->pNext->pBackendData,
8141 &Uuid);
8142 }
8143 AssertRC(rc);
8144
8145 rc = pImageFrom->Backend->pfnSetParentUuid(pImageFrom->pNext->pBackendData,
8146 &Uuid);
8147 AssertRC(rc);
8148
8149 pImageChild = pImageFrom->pNext;
8150 }
8151 }
8152
8153 /* Delete the no longer needed images. */
8154 PVDIMAGE pImg = pImageFrom, pTmp;
8155 while (pImg != pImageTo)
8156 {
8157 if (nImageFrom < nImageTo)
8158 pTmp = pImg->pNext;
8159 else
8160 pTmp = pImg->pPrev;
8161 vdRemoveImageFromList(pDisk, pImg);
8162 pImg->Backend->pfnClose(pImg->pBackendData, true);
8163 RTMemFree(pImg->pszFilename);
8164 RTMemFree(pImg);
8165 pImg = pTmp;
8166 }
8167
8168 /* Make sure destination image is back to read only if necessary. */
8169 if (pImageTo != pDisk->pLast)
8170 {
8171 uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
8172 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8173 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8174 uOpenFlags);
8175 if (RT_FAILURE(rc))
8176 break;
8177 }
8178
8179 /*
8180 * Make sure the child is readonly
8181 * for the child -> parent merge direction
8182 * if necessary.
8183 */
8184 if ( nImageFrom > nImageTo
8185 && pImageChild
8186 && pImageChild != pDisk->pLast)
8187 {
8188 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
8189 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8190 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
8191 uOpenFlags);
8192 if (RT_FAILURE(rc))
8193 break;
8194 }
8195 } while (0);
8196
8197 if (RT_UNLIKELY(fLockWrite))
8198 {
8199 rc2 = vdThreadFinishWrite(pDisk);
8200 AssertRC(rc2);
8201 }
8202 else if (RT_UNLIKELY(fLockRead))
8203 {
8204 rc2 = vdThreadFinishRead(pDisk);
8205 AssertRC(rc2);
8206 }
8207
8208 if (pvBuf)
8209 RTMemTmpFree(pvBuf);
8210
8211 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
8212 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8213
8214 LogFlowFunc(("returns %Rrc\n", rc));
8215 return rc;
8216}
8217
8218/**
8219 * Copies an image from one HDD container to another - extended version.
8220 * The copy is opened in the target HDD container.
8221 * It is possible to convert between different image formats, because the
8222 * backend for the destination may be different from the source.
8223 * If both the source and destination reference the same HDD container,
8224 * then the image is moved (by copying/deleting or renaming) to the new location.
8225 * The source container is unchanged if the move operation fails, otherwise
8226 * the image at the new location is opened in the same way as the old one was.
8227 *
8228 * @note The read/write accesses across disks are not synchronized, just the
8229 * accesses to each disk. Once there is a use case which requires a defined
8230 * read/write behavior in this situation this needs to be extended.
8231 *
8232 * @returns VBox status code.
8233 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8234 * @param pDiskFrom Pointer to source HDD container.
8235 * @param nImage Image number, counts from 0. 0 is always base image of container.
8236 * @param pDiskTo Pointer to destination HDD container.
8237 * @param pszBackend Name of the image file backend to use (may be NULL to use the same as the source, case insensitive).
8238 * @param pszFilename New name of the image (may be NULL to specify that the
8239 * copy destination is the destination container, or
8240 * if pDiskFrom == pDiskTo, i.e. when moving).
8241 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8242 * @param cbSize New image size (0 means leave unchanged).
8243 * @param nImageFromSame todo
8244 * @param nImageToSame todo
8245 * @param uImageFlags Flags specifying special destination image features.
8246 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8247 * This parameter is used if and only if a true copy is created.
8248 * In all rename/move cases or copy to existing image cases the modification UUIDs are copied over.
8249 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8250 * Only used if the destination image is created.
8251 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8252 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8253 * destination image.
8254 * @param pDstVDIfsOperation Pointer to the per-operation VD interface list,
8255 * for the destination operation.
8256 */
8257VBOXDDU_DECL(int) VDCopyEx(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8258 const char *pszBackend, const char *pszFilename,
8259 bool fMoveByRename, uint64_t cbSize,
8260 unsigned nImageFromSame, unsigned nImageToSame,
8261 unsigned uImageFlags, PCRTUUID pDstUuid,
8262 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8263 PVDINTERFACE pDstVDIfsImage,
8264 PVDINTERFACE pDstVDIfsOperation)
8265{
8266 int rc = VINF_SUCCESS;
8267 int rc2;
8268 bool fLockReadFrom = false, fLockWriteFrom = false, fLockWriteTo = false;
8269 PVDIMAGE pImageTo = NULL;
8270
8271 LogFlowFunc(("pDiskFrom=%#p nImage=%u pDiskTo=%#p pszBackend=\"%s\" pszFilename=\"%s\" fMoveByRename=%d cbSize=%llu nImageFromSame=%u nImageToSame=%u uImageFlags=%#x pDstUuid=%#p uOpenFlags=%#x pVDIfsOperation=%#p pDstVDIfsImage=%#p pDstVDIfsOperation=%#p\n",
8272 pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename, cbSize, nImageFromSame, nImageToSame, uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation, pDstVDIfsImage, pDstVDIfsOperation));
8273
8274 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8275 PVDINTERFACEPROGRESS pDstIfProgress = VDIfProgressGet(pDstVDIfsOperation);
8276
8277 do {
8278 /* Check arguments. */
8279 AssertMsgBreakStmt(VALID_PTR(pDiskFrom), ("pDiskFrom=%#p\n", pDiskFrom),
8280 rc = VERR_INVALID_PARAMETER);
8281 AssertMsg(pDiskFrom->u32Signature == VBOXHDDDISK_SIGNATURE,
8282 ("u32Signature=%08x\n", pDiskFrom->u32Signature));
8283
8284 rc2 = vdThreadStartRead(pDiskFrom);
8285 AssertRC(rc2);
8286 fLockReadFrom = true;
8287 PVDIMAGE pImageFrom = vdGetImageByNumber(pDiskFrom, nImage);
8288 AssertPtrBreakStmt(pImageFrom, rc = VERR_VD_IMAGE_NOT_FOUND);
8289 AssertMsgBreakStmt(VALID_PTR(pDiskTo), ("pDiskTo=%#p\n", pDiskTo),
8290 rc = VERR_INVALID_PARAMETER);
8291 AssertMsg(pDiskTo->u32Signature == VBOXHDDDISK_SIGNATURE,
8292 ("u32Signature=%08x\n", pDiskTo->u32Signature));
8293 AssertMsgBreakStmt( (nImageFromSame < nImage || nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8294 && (nImageToSame < pDiskTo->cImages || nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8295 && ( (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN && nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8296 || (nImageFromSame != VD_IMAGE_CONTENT_UNKNOWN && nImageToSame != VD_IMAGE_CONTENT_UNKNOWN)),
8297 ("nImageFromSame=%u nImageToSame=%u\n", nImageFromSame, nImageToSame),
8298 rc = VERR_INVALID_PARAMETER);
8299
8300 /* Move the image. */
8301 if (pDiskFrom == pDiskTo)
8302 {
8303 /* Rename only works when backends are the same, are file based
8304 * and the rename method is implemented. */
8305 if ( fMoveByRename
8306 && !RTStrICmp(pszBackend, pImageFrom->Backend->pszBackendName)
8307 && pImageFrom->Backend->uBackendCaps & VD_CAP_FILE
8308 && pImageFrom->Backend->pfnRename)
8309 {
8310 rc2 = vdThreadFinishRead(pDiskFrom);
8311 AssertRC(rc2);
8312 fLockReadFrom = false;
8313
8314 rc2 = vdThreadStartWrite(pDiskFrom);
8315 AssertRC(rc2);
8316 fLockWriteFrom = true;
8317 rc = pImageFrom->Backend->pfnRename(pImageFrom->pBackendData, pszFilename ? pszFilename : pImageFrom->pszFilename);
8318 break;
8319 }
8320
8321 /** @todo Moving (including shrinking/growing) of the image is
8322 * requested, but the rename attempt failed or it wasn't possible.
8323 * Must now copy image to temp location. */
8324 AssertReleaseMsgFailed(("VDCopy: moving by copy/delete not implemented\n"));
8325 }
8326
8327 /* pszFilename is allowed to be NULL, as this indicates copy to the existing image. */
8328 AssertMsgBreakStmt(pszFilename == NULL || (VALID_PTR(pszFilename) && *pszFilename),
8329 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
8330 rc = VERR_INVALID_PARAMETER);
8331
8332 uint64_t cbSizeFrom;
8333 cbSizeFrom = pImageFrom->Backend->pfnGetSize(pImageFrom->pBackendData);
8334 if (cbSizeFrom == 0)
8335 {
8336 rc = VERR_VD_VALUE_NOT_FOUND;
8337 break;
8338 }
8339
8340 VDGEOMETRY PCHSGeometryFrom = {0, 0, 0};
8341 VDGEOMETRY LCHSGeometryFrom = {0, 0, 0};
8342 pImageFrom->Backend->pfnGetPCHSGeometry(pImageFrom->pBackendData, &PCHSGeometryFrom);
8343 pImageFrom->Backend->pfnGetLCHSGeometry(pImageFrom->pBackendData, &LCHSGeometryFrom);
8344
8345 RTUUID ImageUuid, ImageModificationUuid;
8346 if (pDiskFrom != pDiskTo)
8347 {
8348 if (pDstUuid)
8349 ImageUuid = *pDstUuid;
8350 else
8351 RTUuidCreate(&ImageUuid);
8352 }
8353 else
8354 {
8355 rc = pImageFrom->Backend->pfnGetUuid(pImageFrom->pBackendData, &ImageUuid);
8356 if (RT_FAILURE(rc))
8357 RTUuidCreate(&ImageUuid);
8358 }
8359 rc = pImageFrom->Backend->pfnGetModificationUuid(pImageFrom->pBackendData, &ImageModificationUuid);
8360 if (RT_FAILURE(rc))
8361 RTUuidClear(&ImageModificationUuid);
8362
8363 char szComment[1024];
8364 rc = pImageFrom->Backend->pfnGetComment(pImageFrom->pBackendData, szComment, sizeof(szComment));
8365 if (RT_FAILURE(rc))
8366 szComment[0] = '\0';
8367 else
8368 szComment[sizeof(szComment) - 1] = '\0';
8369
8370 rc2 = vdThreadFinishRead(pDiskFrom);
8371 AssertRC(rc2);
8372 fLockReadFrom = false;
8373
8374 rc2 = vdThreadStartRead(pDiskTo);
8375 AssertRC(rc2);
8376 unsigned cImagesTo = pDiskTo->cImages;
8377 rc2 = vdThreadFinishRead(pDiskTo);
8378 AssertRC(rc2);
8379
8380 if (pszFilename)
8381 {
8382 if (cbSize == 0)
8383 cbSize = cbSizeFrom;
8384
8385 /* Create destination image with the properties of source image. */
8386 /** @todo replace the VDCreateDiff/VDCreateBase calls by direct
8387 * calls to the backend. Unifies the code and reduces the API
8388 * dependencies. Would also make the synchronization explicit. */
8389 if (cImagesTo > 0)
8390 {
8391 rc = VDCreateDiff(pDiskTo, pszBackend, pszFilename,
8392 uImageFlags, szComment, &ImageUuid,
8393 NULL /* pParentUuid */,
8394 uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8395 pDstVDIfsImage, NULL);
8396
8397 rc2 = vdThreadStartWrite(pDiskTo);
8398 AssertRC(rc2);
8399 fLockWriteTo = true;
8400 } else {
8401 /** @todo hack to force creation of a fixed image for
8402 * the RAW backend, which can't handle anything else. */
8403 if (!RTStrICmp(pszBackend, "RAW"))
8404 uImageFlags |= VD_IMAGE_FLAGS_FIXED;
8405
8406 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8407 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8408
8409 rc = VDCreateBase(pDiskTo, pszBackend, pszFilename, cbSize,
8410 uImageFlags, szComment,
8411 &PCHSGeometryFrom, &LCHSGeometryFrom,
8412 NULL, uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8413 pDstVDIfsImage, NULL);
8414
8415 rc2 = vdThreadStartWrite(pDiskTo);
8416 AssertRC(rc2);
8417 fLockWriteTo = true;
8418
8419 if (RT_SUCCESS(rc) && !RTUuidIsNull(&ImageUuid))
8420 pDiskTo->pLast->Backend->pfnSetUuid(pDiskTo->pLast->pBackendData, &ImageUuid);
8421 }
8422 if (RT_FAILURE(rc))
8423 break;
8424
8425 pImageTo = pDiskTo->pLast;
8426 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8427
8428 cbSize = RT_MIN(cbSize, cbSizeFrom);
8429 }
8430 else
8431 {
8432 pImageTo = pDiskTo->pLast;
8433 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8434
8435 uint64_t cbSizeTo;
8436 cbSizeTo = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
8437 if (cbSizeTo == 0)
8438 {
8439 rc = VERR_VD_VALUE_NOT_FOUND;
8440 break;
8441 }
8442
8443 if (cbSize == 0)
8444 cbSize = RT_MIN(cbSizeFrom, cbSizeTo);
8445
8446 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8447 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8448
8449 /* Update the geometry in the destination image. */
8450 pImageTo->Backend->pfnSetPCHSGeometry(pImageTo->pBackendData, &PCHSGeometryFrom);
8451 pImageTo->Backend->pfnSetLCHSGeometry(pImageTo->pBackendData, &LCHSGeometryFrom);
8452 }
8453
8454 rc2 = vdThreadFinishWrite(pDiskTo);
8455 AssertRC(rc2);
8456 fLockWriteTo = false;
8457
8458 /* Whether we can take the optimized copy path (false) or not.
8459 * Don't optimize if the image existed or if it is a child image. */
8460 bool fSuppressRedundantIo = ( !(pszFilename == NULL || cImagesTo > 0)
8461 || (nImageToSame != VD_IMAGE_CONTENT_UNKNOWN));
8462 unsigned cImagesFromReadBack, cImagesToReadBack;
8463
8464 if (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8465 cImagesFromReadBack = 0;
8466 else
8467 {
8468 if (nImage == VD_LAST_IMAGE)
8469 cImagesFromReadBack = pDiskFrom->cImages - nImageFromSame - 1;
8470 else
8471 cImagesFromReadBack = nImage - nImageFromSame;
8472 }
8473
8474 if (nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8475 cImagesToReadBack = 0;
8476 else
8477 cImagesToReadBack = pDiskTo->cImages - nImageToSame - 1;
8478
8479 /* Copy the data. */
8480 rc = vdCopyHelper(pDiskFrom, pImageFrom, pDiskTo, cbSize,
8481 cImagesFromReadBack, cImagesToReadBack,
8482 fSuppressRedundantIo, pIfProgress, pDstIfProgress);
8483
8484 if (RT_SUCCESS(rc))
8485 {
8486 rc2 = vdThreadStartWrite(pDiskTo);
8487 AssertRC(rc2);
8488 fLockWriteTo = true;
8489
8490 /* Only set modification UUID if it is non-null, since the source
8491 * backend might not provide a valid modification UUID. */
8492 if (!RTUuidIsNull(&ImageModificationUuid))
8493 pImageTo->Backend->pfnSetModificationUuid(pImageTo->pBackendData, &ImageModificationUuid);
8494
8495 /* Set the requested open flags if they differ from the value
8496 * required for creating the image and copying the contents. */
8497 if ( pImageTo && pszFilename
8498 && uOpenFlags != (uOpenFlags & ~VD_OPEN_FLAGS_READONLY))
8499 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8500 uOpenFlags);
8501 }
8502 } while (0);
8503
8504 if (RT_FAILURE(rc) && pImageTo && pszFilename)
8505 {
8506 /* Take the write lock only if it is not taken. Not worth making the
8507 * above code even more complicated. */
8508 if (RT_UNLIKELY(!fLockWriteTo))
8509 {
8510 rc2 = vdThreadStartWrite(pDiskTo);
8511 AssertRC(rc2);
8512 fLockWriteTo = true;
8513 }
8514 /* Error detected, but new image created. Remove image from list. */
8515 vdRemoveImageFromList(pDiskTo, pImageTo);
8516
8517 /* Close and delete image. */
8518 rc2 = pImageTo->Backend->pfnClose(pImageTo->pBackendData, true);
8519 AssertRC(rc2);
8520 pImageTo->pBackendData = NULL;
8521
8522 /* Free remaining resources. */
8523 if (pImageTo->pszFilename)
8524 RTStrFree(pImageTo->pszFilename);
8525
8526 RTMemFree(pImageTo);
8527 }
8528
8529 if (RT_UNLIKELY(fLockWriteTo))
8530 {
8531 rc2 = vdThreadFinishWrite(pDiskTo);
8532 AssertRC(rc2);
8533 }
8534 if (RT_UNLIKELY(fLockWriteFrom))
8535 {
8536 rc2 = vdThreadFinishWrite(pDiskFrom);
8537 AssertRC(rc2);
8538 }
8539 else if (RT_UNLIKELY(fLockReadFrom))
8540 {
8541 rc2 = vdThreadFinishRead(pDiskFrom);
8542 AssertRC(rc2);
8543 }
8544
8545 if (RT_SUCCESS(rc))
8546 {
8547 if (pIfProgress && pIfProgress->pfnProgress)
8548 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8549 if (pDstIfProgress && pDstIfProgress->pfnProgress)
8550 pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser, 100);
8551 }
8552
8553 LogFlowFunc(("returns %Rrc\n", rc));
8554 return rc;
8555}
8556
8557/**
8558 * Copies an image from one HDD container to another.
8559 * The copy is opened in the target HDD container.
8560 * It is possible to convert between different image formats, because the
8561 * backend for the destination may be different from the source.
8562 * If both the source and destination reference the same HDD container,
8563 * then the image is moved (by copying/deleting or renaming) to the new location.
8564 * The source container is unchanged if the move operation fails, otherwise
8565 * the image at the new location is opened in the same way as the old one was.
8566 *
8567 * @returns VBox status code.
8568 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8569 * @param pDiskFrom Pointer to source HDD container.
8570 * @param nImage Image number, counts from 0. 0 is always base image of container.
8571 * @param pDiskTo Pointer to destination HDD container.
8572 * @param pszBackend Name of the image file backend to use.
8573 * @param pszFilename New name of the image (may be NULL if pDiskFrom == pDiskTo).
8574 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8575 * @param cbSize New image size (0 means leave unchanged).
8576 * @param uImageFlags Flags specifying special destination image features.
8577 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8578 * This parameter is used if and only if a true copy is created.
8579 * In all rename/move cases the UUIDs are copied over.
8580 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8581 * Only used if the destination image is created.
8582 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8583 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8584 * destination image.
8585 * @param pDstVDIfsOperation Pointer to the per-image VD interface list,
8586 * for the destination image.
8587 */
8588VBOXDDU_DECL(int) VDCopy(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8589 const char *pszBackend, const char *pszFilename,
8590 bool fMoveByRename, uint64_t cbSize,
8591 unsigned uImageFlags, PCRTUUID pDstUuid,
8592 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8593 PVDINTERFACE pDstVDIfsImage,
8594 PVDINTERFACE pDstVDIfsOperation)
8595{
8596 return VDCopyEx(pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename,
8597 cbSize, VD_IMAGE_CONTENT_UNKNOWN, VD_IMAGE_CONTENT_UNKNOWN,
8598 uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation,
8599 pDstVDIfsImage, pDstVDIfsOperation);
8600}
8601
8602/**
8603 * Optimizes the storage consumption of an image. Typically the unused blocks
8604 * have to be wiped with zeroes to achieve a substantial reduced storage use.
8605 * Another optimization done is reordering the image blocks, which can provide
8606 * a significant performance boost, as reads and writes tend to use less random
8607 * file offsets.
8608 *
8609 * @return VBox status code.
8610 * @return VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8611 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8612 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8613 * the code for this isn't implemented yet.
8614 * @param pDisk Pointer to HDD container.
8615 * @param nImage Image number, counts from 0. 0 is always base image of container.
8616 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8617 */
8618VBOXDDU_DECL(int) VDCompact(PVBOXHDD pDisk, unsigned nImage,
8619 PVDINTERFACE pVDIfsOperation)
8620{
8621 int rc = VINF_SUCCESS;
8622 int rc2;
8623 bool fLockRead = false, fLockWrite = false;
8624 void *pvBuf = NULL;
8625 void *pvTmp = NULL;
8626
8627 LogFlowFunc(("pDisk=%#p nImage=%u pVDIfsOperation=%#p\n",
8628 pDisk, nImage, pVDIfsOperation));
8629
8630 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8631
8632 do {
8633 /* Check arguments. */
8634 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8635 rc = VERR_INVALID_PARAMETER);
8636 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8637 ("u32Signature=%08x\n", pDisk->u32Signature));
8638
8639 rc2 = vdThreadStartRead(pDisk);
8640 AssertRC(rc2);
8641 fLockRead = true;
8642
8643 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
8644 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
8645
8646 /* If there is no compact callback for not file based backends then
8647 * the backend doesn't need compaction. No need to make much fuss about
8648 * this. For file based ones signal this as not yet supported. */
8649 if (!pImage->Backend->pfnCompact)
8650 {
8651 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8652 rc = VERR_NOT_SUPPORTED;
8653 else
8654 rc = VINF_SUCCESS;
8655 break;
8656 }
8657
8658 /* Insert interface for reading parent state into per-operation list,
8659 * if there is a parent image. */
8660 VDINTERFACEPARENTSTATE VDIfParent;
8661 VDPARENTSTATEDESC ParentUser;
8662 if (pImage->pPrev)
8663 {
8664 VDIfParent.pfnParentRead = vdParentRead;
8665 ParentUser.pDisk = pDisk;
8666 ParentUser.pImage = pImage->pPrev;
8667 rc = VDInterfaceAdd(&VDIfParent.Core, "VDCompact_ParentState", VDINTERFACETYPE_PARENTSTATE,
8668 &ParentUser, sizeof(VDINTERFACEPARENTSTATE), &pVDIfsOperation);
8669 AssertRC(rc);
8670 }
8671
8672 rc2 = vdThreadFinishRead(pDisk);
8673 AssertRC(rc2);
8674 fLockRead = false;
8675
8676 rc2 = vdThreadStartWrite(pDisk);
8677 AssertRC(rc2);
8678 fLockWrite = true;
8679
8680 rc = pImage->Backend->pfnCompact(pImage->pBackendData,
8681 0, 99,
8682 pDisk->pVDIfsDisk,
8683 pImage->pVDIfsImage,
8684 pVDIfsOperation);
8685 } while (0);
8686
8687 if (RT_UNLIKELY(fLockWrite))
8688 {
8689 rc2 = vdThreadFinishWrite(pDisk);
8690 AssertRC(rc2);
8691 }
8692 else if (RT_UNLIKELY(fLockRead))
8693 {
8694 rc2 = vdThreadFinishRead(pDisk);
8695 AssertRC(rc2);
8696 }
8697
8698 if (pvBuf)
8699 RTMemTmpFree(pvBuf);
8700 if (pvTmp)
8701 RTMemTmpFree(pvTmp);
8702
8703 if (RT_SUCCESS(rc))
8704 {
8705 if (pIfProgress && pIfProgress->pfnProgress)
8706 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8707 }
8708
8709 LogFlowFunc(("returns %Rrc\n", rc));
8710 return rc;
8711}
8712
8713/**
8714 * Resizes the given disk image to the given size.
8715 *
8716 * @return VBox status
8717 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8718 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8719 *
8720 * @param pDisk Pointer to the HDD container.
8721 * @param cbSize New size of the image.
8722 * @param pPCHSGeometry Pointer to the new physical disk geometry <= (16383,16,63). Not NULL.
8723 * @param pLCHSGeometry Pointer to the new logical disk geometry <= (x,255,63). Not NULL.
8724 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8725 */
8726VBOXDDU_DECL(int) VDResize(PVBOXHDD pDisk, uint64_t cbSize,
8727 PCVDGEOMETRY pPCHSGeometry,
8728 PCVDGEOMETRY pLCHSGeometry,
8729 PVDINTERFACE pVDIfsOperation)
8730{
8731 /** @todo r=klaus resizing was designed to be part of VDCopy, so having a separate function is not desirable. */
8732 int rc = VINF_SUCCESS;
8733 int rc2;
8734 bool fLockRead = false, fLockWrite = false;
8735
8736 LogFlowFunc(("pDisk=%#p cbSize=%llu pVDIfsOperation=%#p\n",
8737 pDisk, cbSize, pVDIfsOperation));
8738
8739 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8740
8741 do {
8742 /* Check arguments. */
8743 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8744 rc = VERR_INVALID_PARAMETER);
8745 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8746 ("u32Signature=%08x\n", pDisk->u32Signature));
8747
8748 rc2 = vdThreadStartRead(pDisk);
8749 AssertRC(rc2);
8750 fLockRead = true;
8751
8752 /* Must have at least one image in the chain, will resize last. */
8753 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8754 rc = VERR_NOT_SUPPORTED);
8755
8756 PVDIMAGE pImage = pDisk->pLast;
8757
8758 /* If there is no compact callback for not file based backends then
8759 * the backend doesn't need compaction. No need to make much fuss about
8760 * this. For file based ones signal this as not yet supported. */
8761 if (!pImage->Backend->pfnResize)
8762 {
8763 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8764 rc = VERR_NOT_SUPPORTED;
8765 else
8766 rc = VINF_SUCCESS;
8767 break;
8768 }
8769
8770 rc2 = vdThreadFinishRead(pDisk);
8771 AssertRC(rc2);
8772 fLockRead = false;
8773
8774 rc2 = vdThreadStartWrite(pDisk);
8775 AssertRC(rc2);
8776 fLockWrite = true;
8777
8778 VDGEOMETRY PCHSGeometryOld;
8779 VDGEOMETRY LCHSGeometryOld;
8780 PCVDGEOMETRY pPCHSGeometryNew;
8781 PCVDGEOMETRY pLCHSGeometryNew;
8782
8783 if (pPCHSGeometry->cCylinders == 0)
8784 {
8785 /* Auto-detect marker, calculate new value ourself. */
8786 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData, &PCHSGeometryOld);
8787 if (RT_SUCCESS(rc) && (PCHSGeometryOld.cCylinders != 0))
8788 PCHSGeometryOld.cCylinders = RT_MIN(cbSize / 512 / PCHSGeometryOld.cHeads / PCHSGeometryOld.cSectors, 16383);
8789 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8790 rc = VINF_SUCCESS;
8791
8792 pPCHSGeometryNew = &PCHSGeometryOld;
8793 }
8794 else
8795 pPCHSGeometryNew = pPCHSGeometry;
8796
8797 if (pLCHSGeometry->cCylinders == 0)
8798 {
8799 /* Auto-detect marker, calculate new value ourself. */
8800 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData, &LCHSGeometryOld);
8801 if (RT_SUCCESS(rc) && (LCHSGeometryOld.cCylinders != 0))
8802 LCHSGeometryOld.cCylinders = cbSize / 512 / LCHSGeometryOld.cHeads / LCHSGeometryOld.cSectors;
8803 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8804 rc = VINF_SUCCESS;
8805
8806 pLCHSGeometryNew = &LCHSGeometryOld;
8807 }
8808 else
8809 pLCHSGeometryNew = pLCHSGeometry;
8810
8811 if (RT_SUCCESS(rc))
8812 rc = pImage->Backend->pfnResize(pImage->pBackendData,
8813 cbSize,
8814 pPCHSGeometryNew,
8815 pLCHSGeometryNew,
8816 0, 99,
8817 pDisk->pVDIfsDisk,
8818 pImage->pVDIfsImage,
8819 pVDIfsOperation);
8820 } while (0);
8821
8822 if (RT_UNLIKELY(fLockWrite))
8823 {
8824 rc2 = vdThreadFinishWrite(pDisk);
8825 AssertRC(rc2);
8826 }
8827 else if (RT_UNLIKELY(fLockRead))
8828 {
8829 rc2 = vdThreadFinishRead(pDisk);
8830 AssertRC(rc2);
8831 }
8832
8833 if (RT_SUCCESS(rc))
8834 {
8835 if (pIfProgress && pIfProgress->pfnProgress)
8836 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8837
8838 pDisk->cbSize = cbSize;
8839 }
8840
8841 LogFlowFunc(("returns %Rrc\n", rc));
8842 return rc;
8843}
8844
8845VBOXDDU_DECL(int) VDPrepareWithFilters(PVBOXHDD pDisk, PVDINTERFACE pVDIfsOperation)
8846{
8847 int rc = VINF_SUCCESS;
8848 int rc2;
8849 bool fLockRead = false, fLockWrite = false;
8850
8851 LogFlowFunc(("pDisk=%#p pVDIfsOperation=%#p\n", pDisk, pVDIfsOperation));
8852
8853 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8854
8855 do {
8856 /* Check arguments. */
8857 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8858 rc = VERR_INVALID_PARAMETER);
8859 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8860 ("u32Signature=%08x\n", pDisk->u32Signature));
8861
8862 rc2 = vdThreadStartRead(pDisk);
8863 AssertRC(rc2);
8864 fLockRead = true;
8865
8866 /* Must have at least one image in the chain. */
8867 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8868 rc = VERR_VD_NOT_OPENED);
8869
8870 unsigned uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
8871 AssertMsgBreakStmt(!(uOpenFlags & VD_OPEN_FLAGS_READONLY),
8872 ("Last image should be read write"),
8873 rc = VERR_VD_IMAGE_READ_ONLY);
8874
8875 rc2 = vdThreadFinishRead(pDisk);
8876 AssertRC(rc2);
8877 fLockRead = false;
8878
8879 rc2 = vdThreadStartWrite(pDisk);
8880 AssertRC(rc2);
8881 fLockWrite = true;
8882
8883 /*
8884 * Open all images in the chain in read write mode first to avoid running
8885 * into an error in the middle of the process.
8886 */
8887 PVDIMAGE pImage = pDisk->pBase;
8888
8889 while (pImage)
8890 {
8891 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8892 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
8893 {
8894 /*
8895 * Clear skip consistency checks because the image is made writable now and
8896 * skipping consistency checks is only possible for readonly images.
8897 */
8898 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
8899 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8900 if (RT_FAILURE(rc))
8901 break;
8902 }
8903 pImage = pImage->pNext;
8904 }
8905
8906 if (RT_SUCCESS(rc))
8907 {
8908 unsigned cImgCur = 0;
8909 unsigned uPercentStart = 0;
8910 unsigned uPercentSpan = 100 / pDisk->cImages - 1;
8911
8912 /* Allocate tmp buffer. */
8913 void *pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
8914 if (!pvBuf)
8915 {
8916 rc = VERR_NO_MEMORY;
8917 break;
8918 }
8919
8920 pImage = pDisk->pBase;
8921 pDisk->fLocked = true;
8922
8923 while ( pImage
8924 && RT_SUCCESS(rc))
8925 {
8926 /* Get size of image. */
8927 uint64_t cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
8928 uint64_t cbSizeFile = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
8929 uint64_t cbFileWritten = 0;
8930 uint64_t uOffset = 0;
8931 uint64_t cbRemaining = cbSize;
8932
8933 do
8934 {
8935 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8936 RTSGSEG SegmentBuf;
8937 RTSGBUF SgBuf;
8938 VDIOCTX IoCtx;
8939
8940 SegmentBuf.pvSeg = pvBuf;
8941 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8942 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8943 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8944 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8945
8946 rc = pImage->Backend->pfnRead(pImage->pBackendData, uOffset,
8947 cbThisRead, &IoCtx, &cbThisRead);
8948 if (rc != VERR_VD_BLOCK_FREE)
8949 {
8950 if (RT_FAILURE(rc))
8951 break;
8952
8953 /* Apply filter chains. */
8954 rc = vdFilterChainApplyRead(pDisk, uOffset, cbThisRead, &IoCtx);
8955 if (RT_FAILURE(rc))
8956 break;
8957
8958 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbThisRead, &IoCtx);
8959 if (RT_FAILURE(rc))
8960 break;
8961
8962 RTSgBufReset(&SgBuf);
8963 size_t cbThisWrite = 0;
8964 size_t cbPreRead = 0;
8965 size_t cbPostRead = 0;
8966 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset,
8967 cbThisRead, &IoCtx, &cbThisWrite,
8968 &cbPreRead, &cbPostRead, 0);
8969 if (RT_FAILURE(rc))
8970 break;
8971 Assert(cbThisWrite == cbThisRead);
8972 cbFileWritten += cbThisWrite;
8973 }
8974 else
8975 rc = VINF_SUCCESS;
8976
8977 uOffset += cbThisRead;
8978 cbRemaining -= cbThisRead;
8979
8980 if (pIfProgress && pIfProgress->pfnProgress)
8981 {
8982 rc2 = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8983 uPercentStart + cbFileWritten * uPercentSpan / cbSizeFile);
8984 AssertRC(rc2); /* Cancelling this operation without leaving an inconsistent state is not possible. */
8985 }
8986 } while (uOffset < cbSize);
8987
8988 pImage = pImage->pNext;
8989 cImgCur++;
8990 uPercentStart += uPercentSpan;
8991 }
8992
8993 pDisk->fLocked = false;
8994 if (pvBuf)
8995 RTMemTmpFree(pvBuf);
8996 }
8997
8998 /* Change images except last one back to readonly. */
8999 pImage = pDisk->pBase;
9000 while ( pImage != pDisk->pLast
9001 && pImage)
9002 {
9003 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9004 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
9005 rc2 = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9006 if (RT_FAILURE(rc2))
9007 {
9008 if (RT_SUCCESS(rc))
9009 rc = rc2;
9010 break;
9011 }
9012 pImage = pImage->pNext;
9013 }
9014 } while (0);
9015
9016 if (RT_UNLIKELY(fLockWrite))
9017 {
9018 rc2 = vdThreadFinishWrite(pDisk);
9019 AssertRC(rc2);
9020 }
9021 else if (RT_UNLIKELY(fLockRead))
9022 {
9023 rc2 = vdThreadFinishRead(pDisk);
9024 AssertRC(rc2);
9025 }
9026
9027 if ( RT_SUCCESS(rc)
9028 && pIfProgress
9029 && pIfProgress->pfnProgress)
9030 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
9031
9032 LogFlowFunc(("returns %Rrc\n", rc));
9033 return rc;
9034}
9035
9036/**
9037 * Closes the last opened image file in HDD container.
9038 * If previous image file was opened in read-only mode (the normal case) and
9039 * the last opened image is in read-write mode then the previous image will be
9040 * reopened in read/write mode.
9041 *
9042 * @returns VBox status code.
9043 * @returns VERR_VD_NOT_OPENED if no image is opened in HDD container.
9044 * @param pDisk Pointer to HDD container.
9045 * @param fDelete If true, delete the image from the host disk.
9046 */
9047VBOXDDU_DECL(int) VDClose(PVBOXHDD pDisk, bool fDelete)
9048{
9049 int rc = VINF_SUCCESS;
9050 int rc2;
9051 bool fLockWrite = false;
9052
9053 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9054 do
9055 {
9056 /* sanity check */
9057 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9058 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9059
9060 /* Not worth splitting this up into a read lock phase and write
9061 * lock phase, as closing an image is a relatively fast operation
9062 * dominated by the part which needs the write lock. */
9063 rc2 = vdThreadStartWrite(pDisk);
9064 AssertRC(rc2);
9065 fLockWrite = true;
9066
9067 PVDIMAGE pImage = pDisk->pLast;
9068 if (!pImage)
9069 {
9070 rc = VERR_VD_NOT_OPENED;
9071 break;
9072 }
9073
9074 /* Destroy the current discard state first which might still have pending blocks. */
9075 rc = vdDiscardStateDestroy(pDisk);
9076 if (RT_FAILURE(rc))
9077 break;
9078
9079 unsigned uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9080 /* Remove image from list of opened images. */
9081 vdRemoveImageFromList(pDisk, pImage);
9082 /* Close (and optionally delete) image. */
9083 rc = pImage->Backend->pfnClose(pImage->pBackendData, fDelete);
9084 /* Free remaining resources related to the image. */
9085 RTStrFree(pImage->pszFilename);
9086 RTMemFree(pImage);
9087
9088 pImage = pDisk->pLast;
9089 if (!pImage)
9090 break;
9091
9092 /* If disk was previously in read/write mode, make sure it will stay
9093 * like this (if possible) after closing this image. Set the open flags
9094 * accordingly. */
9095 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
9096 {
9097 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9098 uOpenFlags &= ~ VD_OPEN_FLAGS_READONLY;
9099 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9100 }
9101
9102 /* Cache disk information. */
9103 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9104
9105 /* Cache PCHS geometry. */
9106 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9107 &pDisk->PCHSGeometry);
9108 if (RT_FAILURE(rc2))
9109 {
9110 pDisk->PCHSGeometry.cCylinders = 0;
9111 pDisk->PCHSGeometry.cHeads = 0;
9112 pDisk->PCHSGeometry.cSectors = 0;
9113 }
9114 else
9115 {
9116 /* Make sure the PCHS geometry is properly clipped. */
9117 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
9118 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
9119 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9120 }
9121
9122 /* Cache LCHS geometry. */
9123 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9124 &pDisk->LCHSGeometry);
9125 if (RT_FAILURE(rc2))
9126 {
9127 pDisk->LCHSGeometry.cCylinders = 0;
9128 pDisk->LCHSGeometry.cHeads = 0;
9129 pDisk->LCHSGeometry.cSectors = 0;
9130 }
9131 else
9132 {
9133 /* Make sure the LCHS geometry is properly clipped. */
9134 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
9135 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
9136 }
9137 } while (0);
9138
9139 if (RT_UNLIKELY(fLockWrite))
9140 {
9141 rc2 = vdThreadFinishWrite(pDisk);
9142 AssertRC(rc2);
9143 }
9144
9145 LogFlowFunc(("returns %Rrc\n", rc));
9146 return rc;
9147}
9148
9149/**
9150 * Closes the currently opened cache image file in HDD container.
9151 *
9152 * @return VBox status code.
9153 * @return VERR_VD_NOT_OPENED if no cache is opened in HDD container.
9154 * @param pDisk Pointer to HDD container.
9155 * @param fDelete If true, delete the image from the host disk.
9156 */
9157VBOXDDU_DECL(int) VDCacheClose(PVBOXHDD pDisk, bool fDelete)
9158{
9159 int rc = VINF_SUCCESS;
9160 int rc2;
9161 bool fLockWrite = false;
9162 PVDCACHE pCache = NULL;
9163
9164 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9165
9166 do
9167 {
9168 /* sanity check */
9169 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9170 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9171
9172 rc2 = vdThreadStartWrite(pDisk);
9173 AssertRC(rc2);
9174 fLockWrite = true;
9175
9176 AssertPtrBreakStmt(pDisk->pCache, rc = VERR_VD_CACHE_NOT_FOUND);
9177
9178 pCache = pDisk->pCache;
9179 pDisk->pCache = NULL;
9180
9181 pCache->Backend->pfnClose(pCache->pBackendData, fDelete);
9182 if (pCache->pszFilename)
9183 RTStrFree(pCache->pszFilename);
9184 RTMemFree(pCache);
9185 } while (0);
9186
9187 if (RT_LIKELY(fLockWrite))
9188 {
9189 rc2 = vdThreadFinishWrite(pDisk);
9190 AssertRC(rc2);
9191 }
9192
9193 LogFlowFunc(("returns %Rrc\n", rc));
9194 return rc;
9195}
9196
9197VBOXDDU_DECL(int) VDFilterRemove(PVBOXHDD pDisk, uint32_t fFlags)
9198{
9199 int rc = VINF_SUCCESS;
9200 int rc2;
9201 bool fLockWrite = false;
9202 PVDFILTER pFilter = NULL;
9203
9204 LogFlowFunc(("pDisk=%#p\n", pDisk));
9205
9206 do
9207 {
9208 /* sanity check */
9209 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9210 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9211
9212 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
9213 ("Invalid flags set (fFlags=%#x)\n", fFlags),
9214 rc = VERR_INVALID_PARAMETER);
9215
9216 rc2 = vdThreadStartWrite(pDisk);
9217 AssertRC(rc2);
9218 fLockWrite = true;
9219
9220 if (fFlags & VD_FILTER_FLAGS_WRITE)
9221 {
9222 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainWrite), rc = VERR_VD_NOT_OPENED);
9223 pFilter = RTListGetLast(&pDisk->ListFilterChainWrite, VDFILTER, ListNodeChainWrite);
9224 AssertPtr(pFilter);
9225 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9226 vdFilterRelease(pFilter);
9227 }
9228
9229 if (fFlags & VD_FILTER_FLAGS_READ)
9230 {
9231 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainRead), rc = VERR_VD_NOT_OPENED);
9232 pFilter = RTListGetLast(&pDisk->ListFilterChainRead, VDFILTER, ListNodeChainRead);
9233 AssertPtr(pFilter);
9234 RTListNodeRemove(&pFilter->ListNodeChainRead);
9235 vdFilterRelease(pFilter);
9236 }
9237 } while (0);
9238
9239 if (RT_LIKELY(fLockWrite))
9240 {
9241 rc2 = vdThreadFinishWrite(pDisk);
9242 AssertRC(rc2);
9243 }
9244
9245 LogFlowFunc(("returns %Rrc\n", rc));
9246 return rc;
9247}
9248
9249/**
9250 * Closes all opened image files in HDD container.
9251 *
9252 * @returns VBox status code.
9253 * @param pDisk Pointer to HDD container.
9254 */
9255VBOXDDU_DECL(int) VDCloseAll(PVBOXHDD pDisk)
9256{
9257 int rc = VINF_SUCCESS;
9258 int rc2;
9259 bool fLockWrite = false;
9260
9261 LogFlowFunc(("pDisk=%#p\n", pDisk));
9262 do
9263 {
9264 /* sanity check */
9265 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9266 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9267
9268 /* Lock the entire operation. */
9269 rc2 = vdThreadStartWrite(pDisk);
9270 AssertRC(rc2);
9271 fLockWrite = true;
9272
9273 PVDCACHE pCache = pDisk->pCache;
9274 if (pCache)
9275 {
9276 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
9277 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9278 rc = rc2;
9279
9280 if (pCache->pszFilename)
9281 RTStrFree(pCache->pszFilename);
9282 RTMemFree(pCache);
9283 }
9284
9285 PVDIMAGE pImage = pDisk->pLast;
9286 while (VALID_PTR(pImage))
9287 {
9288 PVDIMAGE pPrev = pImage->pPrev;
9289 /* Remove image from list of opened images. */
9290 vdRemoveImageFromList(pDisk, pImage);
9291 /* Close image. */
9292 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
9293 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9294 rc = rc2;
9295 /* Free remaining resources related to the image. */
9296 RTStrFree(pImage->pszFilename);
9297 RTMemFree(pImage);
9298 pImage = pPrev;
9299 }
9300 Assert(!VALID_PTR(pDisk->pLast));
9301 } while (0);
9302
9303 if (RT_UNLIKELY(fLockWrite))
9304 {
9305 rc2 = vdThreadFinishWrite(pDisk);
9306 AssertRC(rc2);
9307 }
9308
9309 LogFlowFunc(("returns %Rrc\n", rc));
9310 return rc;
9311}
9312
9313/**
9314 * Removes all filters of the given HDD container.
9315 *
9316 * @return VBox status code.
9317 * @param pDisk Pointer to HDD container.
9318 */
9319VBOXDDU_DECL(int) VDFilterRemoveAll(PVBOXHDD pDisk)
9320{
9321 int rc = VINF_SUCCESS;
9322 int rc2;
9323 bool fLockWrite = false;
9324
9325 LogFlowFunc(("pDisk=%#p\n", pDisk));
9326 do
9327 {
9328 /* sanity check */
9329 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9330 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9331
9332 /* Lock the entire operation. */
9333 rc2 = vdThreadStartWrite(pDisk);
9334 AssertRC(rc2);
9335 fLockWrite = true;
9336
9337 PVDFILTER pFilter, pFilterNext;
9338 RTListForEachSafe(&pDisk->ListFilterChainWrite, pFilter, pFilterNext, VDFILTER, ListNodeChainWrite)
9339 {
9340 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9341 vdFilterRelease(pFilter);
9342 }
9343
9344 RTListForEachSafe(&pDisk->ListFilterChainRead, pFilter, pFilterNext, VDFILTER, ListNodeChainRead)
9345 {
9346 RTListNodeRemove(&pFilter->ListNodeChainRead);
9347 vdFilterRelease(pFilter);
9348 }
9349 Assert(RTListIsEmpty(&pDisk->ListFilterChainRead));
9350 Assert(RTListIsEmpty(&pDisk->ListFilterChainWrite));
9351 } while (0);
9352
9353 if (RT_UNLIKELY(fLockWrite))
9354 {
9355 rc2 = vdThreadFinishWrite(pDisk);
9356 AssertRC(rc2);
9357 }
9358
9359 LogFlowFunc(("returns %Rrc\n", rc));
9360 return rc;
9361}
9362
9363/**
9364 * Read data from virtual HDD.
9365 *
9366 * @returns VBox status code.
9367 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9368 * @param pDisk Pointer to HDD container.
9369 * @param uOffset Offset of first reading byte from start of disk.
9370 * @param pvBuf Pointer to buffer for reading data.
9371 * @param cbRead Number of bytes to read.
9372 */
9373VBOXDDU_DECL(int) VDRead(PVBOXHDD pDisk, uint64_t uOffset, void *pvBuf,
9374 size_t cbRead)
9375{
9376 int rc = VINF_SUCCESS;
9377 int rc2;
9378 bool fLockRead = false;
9379
9380 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbRead=%zu\n",
9381 pDisk, uOffset, pvBuf, cbRead));
9382 do
9383 {
9384 /* sanity check */
9385 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9386 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9387
9388 /* Check arguments. */
9389 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9390 ("pvBuf=%#p\n", pvBuf),
9391 rc = VERR_INVALID_PARAMETER);
9392 AssertMsgBreakStmt(cbRead,
9393 ("cbRead=%zu\n", cbRead),
9394 rc = VERR_INVALID_PARAMETER);
9395
9396 rc2 = vdThreadStartRead(pDisk);
9397 AssertRC(rc2);
9398 fLockRead = true;
9399
9400 PVDIMAGE pImage = pDisk->pLast;
9401 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9402
9403 if (uOffset + cbRead > pDisk->cbSize)
9404 {
9405 /* Floppy images might be smaller than the standard expected by
9406 the floppy controller code. So, we won't fail here. */
9407 AssertMsgBreakStmt(pDisk->enmType == VDTYPE_FLOPPY,
9408 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
9409 uOffset, cbRead, pDisk->cbSize),
9410 rc = VERR_EOF);
9411 memset(pvBuf, 0xf6, cbRead); /* f6h = format.com filler byte */
9412 if (uOffset >= pDisk->cbSize)
9413 break;
9414 cbRead = pDisk->cbSize - uOffset;
9415 }
9416
9417 rc = vdReadHelper(pDisk, pImage, uOffset, pvBuf, cbRead,
9418 true /* fUpdateCache */);
9419 } while (0);
9420
9421 if (RT_UNLIKELY(fLockRead))
9422 {
9423 rc2 = vdThreadFinishRead(pDisk);
9424 AssertRC(rc2);
9425 }
9426
9427 LogFlowFunc(("returns %Rrc\n", rc));
9428 return rc;
9429}
9430
9431/**
9432 * Write data to virtual HDD.
9433 *
9434 * @returns VBox status code.
9435 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9436 * @param pDisk Pointer to HDD container.
9437 * @param uOffset Offset of the first byte being
9438 * written from start of disk.
9439 * @param pvBuf Pointer to buffer for writing data.
9440 * @param cbWrite Number of bytes to write.
9441 */
9442VBOXDDU_DECL(int) VDWrite(PVBOXHDD pDisk, uint64_t uOffset, const void *pvBuf,
9443 size_t cbWrite)
9444{
9445 int rc = VINF_SUCCESS;
9446 int rc2;
9447 bool fLockWrite = false;
9448
9449 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbWrite=%zu\n",
9450 pDisk, uOffset, pvBuf, cbWrite));
9451 do
9452 {
9453 /* sanity check */
9454 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9455 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9456
9457 /* Check arguments. */
9458 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9459 ("pvBuf=%#p\n", pvBuf),
9460 rc = VERR_INVALID_PARAMETER);
9461 AssertMsgBreakStmt(cbWrite,
9462 ("cbWrite=%zu\n", cbWrite),
9463 rc = VERR_INVALID_PARAMETER);
9464
9465 rc2 = vdThreadStartWrite(pDisk);
9466 AssertRC(rc2);
9467 fLockWrite = true;
9468
9469 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
9470 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
9471 uOffset, cbWrite, pDisk->cbSize),
9472 rc = VERR_INVALID_PARAMETER);
9473
9474 PVDIMAGE pImage = pDisk->pLast;
9475 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9476
9477 vdSetModifiedFlag(pDisk);
9478 rc = vdWriteHelper(pDisk, pImage, uOffset, pvBuf, cbWrite,
9479 VDIOCTX_FLAGS_READ_UPDATE_CACHE);
9480 if (RT_FAILURE(rc))
9481 break;
9482
9483 /* If there is a merge (in the direction towards a parent) running
9484 * concurrently then we have to also "relay" the write to this parent,
9485 * as the merge position might be already past the position where
9486 * this write is going. The "context" of the write can come from the
9487 * natural chain, since merging either already did or will take care
9488 * of the "other" content which is might be needed to fill the block
9489 * to a full allocation size. The cache doesn't need to be touched
9490 * as this write is covered by the previous one. */
9491 if (RT_UNLIKELY(pDisk->pImageRelay))
9492 rc = vdWriteHelper(pDisk, pDisk->pImageRelay, uOffset,
9493 pvBuf, cbWrite, VDIOCTX_FLAGS_DEFAULT);
9494 } while (0);
9495
9496 if (RT_UNLIKELY(fLockWrite))
9497 {
9498 rc2 = vdThreadFinishWrite(pDisk);
9499 AssertRC(rc2);
9500 }
9501
9502 LogFlowFunc(("returns %Rrc\n", rc));
9503 return rc;
9504}
9505
9506/**
9507 * Make sure the on disk representation of a virtual HDD is up to date.
9508 *
9509 * @returns VBox status code.
9510 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9511 * @param pDisk Pointer to HDD container.
9512 */
9513VBOXDDU_DECL(int) VDFlush(PVBOXHDD pDisk)
9514{
9515 int rc = VINF_SUCCESS;
9516 int rc2;
9517 bool fLockWrite = false;
9518
9519 LogFlowFunc(("pDisk=%#p\n", pDisk));
9520 do
9521 {
9522 /* sanity check */
9523 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9524 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9525
9526 rc2 = vdThreadStartWrite(pDisk);
9527 AssertRC(rc2);
9528 fLockWrite = true;
9529
9530 PVDIMAGE pImage = pDisk->pLast;
9531 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9532
9533 VDIOCTX IoCtx;
9534 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
9535
9536 rc = RTSemEventCreate(&hEventComplete);
9537 if (RT_FAILURE(rc))
9538 break;
9539
9540 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, pImage, NULL,
9541 NULL, vdFlushHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
9542
9543 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
9544 IoCtx.Type.Root.pvUser1 = pDisk;
9545 IoCtx.Type.Root.pvUser2 = hEventComplete;
9546 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
9547
9548 RTSemEventDestroy(hEventComplete);
9549 } while (0);
9550
9551 if (RT_UNLIKELY(fLockWrite))
9552 {
9553 rc2 = vdThreadFinishWrite(pDisk);
9554 AssertRC(rc2);
9555 }
9556
9557 LogFlowFunc(("returns %Rrc\n", rc));
9558 return rc;
9559}
9560
9561/**
9562 * Get number of opened images in HDD container.
9563 *
9564 * @returns Number of opened images for HDD container. 0 if no images have been opened.
9565 * @param pDisk Pointer to HDD container.
9566 */
9567VBOXDDU_DECL(unsigned) VDGetCount(PVBOXHDD pDisk)
9568{
9569 unsigned cImages;
9570 int rc2;
9571 bool fLockRead = false;
9572
9573 LogFlowFunc(("pDisk=%#p\n", pDisk));
9574 do
9575 {
9576 /* sanity check */
9577 AssertPtrBreakStmt(pDisk, cImages = 0);
9578 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9579
9580 rc2 = vdThreadStartRead(pDisk);
9581 AssertRC(rc2);
9582 fLockRead = true;
9583
9584 cImages = pDisk->cImages;
9585 } while (0);
9586
9587 if (RT_UNLIKELY(fLockRead))
9588 {
9589 rc2 = vdThreadFinishRead(pDisk);
9590 AssertRC(rc2);
9591 }
9592
9593 LogFlowFunc(("returns %u\n", cImages));
9594 return cImages;
9595}
9596
9597/**
9598 * Get read/write mode of HDD container.
9599 *
9600 * @returns Virtual disk ReadOnly status.
9601 * @returns true if no image is opened in HDD container.
9602 * @param pDisk Pointer to HDD container.
9603 */
9604VBOXDDU_DECL(bool) VDIsReadOnly(PVBOXHDD pDisk)
9605{
9606 bool fReadOnly;
9607 int rc2;
9608 bool fLockRead = false;
9609
9610 LogFlowFunc(("pDisk=%#p\n", pDisk));
9611 do
9612 {
9613 /* sanity check */
9614 AssertPtrBreakStmt(pDisk, fReadOnly = false);
9615 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9616
9617 rc2 = vdThreadStartRead(pDisk);
9618 AssertRC(rc2);
9619 fLockRead = true;
9620
9621 PVDIMAGE pImage = pDisk->pLast;
9622 AssertPtrBreakStmt(pImage, fReadOnly = true);
9623
9624 unsigned uOpenFlags;
9625 uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
9626 fReadOnly = !!(uOpenFlags & VD_OPEN_FLAGS_READONLY);
9627 } while (0);
9628
9629 if (RT_UNLIKELY(fLockRead))
9630 {
9631 rc2 = vdThreadFinishRead(pDisk);
9632 AssertRC(rc2);
9633 }
9634
9635 LogFlowFunc(("returns %d\n", fReadOnly));
9636 return fReadOnly;
9637}
9638
9639/**
9640 * Get sector size of an image in HDD container.
9641 *
9642 * @return Virtual disk sector size in bytes.
9643 * @return 0 if image with specified number was not opened.
9644 * @param pDisk Pointer to HDD container.
9645 * @param nImage Image number, counts from 0. 0 is always base image of container.
9646 */
9647VBOXDDU_DECL(uint32_t) VDGetSectorSize(PVBOXHDD pDisk, unsigned nImage)
9648{
9649 uint64_t cbSector;
9650 int rc2;
9651 bool fLockRead = false;
9652
9653 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9654 do
9655 {
9656 /* sanity check */
9657 AssertPtrBreakStmt(pDisk, cbSector = 0);
9658 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9659
9660 rc2 = vdThreadStartRead(pDisk);
9661 AssertRC(rc2);
9662 fLockRead = true;
9663
9664 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9665 AssertPtrBreakStmt(pImage, cbSector = 0);
9666 cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
9667 } while (0);
9668
9669 if (RT_UNLIKELY(fLockRead))
9670 {
9671 rc2 = vdThreadFinishRead(pDisk);
9672 AssertRC(rc2);
9673 }
9674
9675 LogFlowFunc(("returns %u\n", cbSector));
9676 return cbSector;
9677}
9678
9679/**
9680 * Get total capacity of an image in HDD container.
9681 *
9682 * @returns Virtual disk size in bytes.
9683 * @returns 0 if no image with specified number was not opened.
9684 * @param pDisk Pointer to HDD container.
9685 * @param nImage Image number, counts from 0. 0 is always base image of container.
9686 */
9687VBOXDDU_DECL(uint64_t) VDGetSize(PVBOXHDD pDisk, unsigned nImage)
9688{
9689 uint64_t cbSize;
9690 int rc2;
9691 bool fLockRead = false;
9692
9693 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9694 do
9695 {
9696 /* sanity check */
9697 AssertPtrBreakStmt(pDisk, cbSize = 0);
9698 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9699
9700 rc2 = vdThreadStartRead(pDisk);
9701 AssertRC(rc2);
9702 fLockRead = true;
9703
9704 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9705 AssertPtrBreakStmt(pImage, cbSize = 0);
9706 cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9707 } while (0);
9708
9709 if (RT_UNLIKELY(fLockRead))
9710 {
9711 rc2 = vdThreadFinishRead(pDisk);
9712 AssertRC(rc2);
9713 }
9714
9715 LogFlowFunc(("returns %llu\n", cbSize));
9716 return cbSize;
9717}
9718
9719/**
9720 * Get total file size of an image in HDD container.
9721 *
9722 * @returns Virtual disk size in bytes.
9723 * @returns 0 if no image is opened in HDD container.
9724 * @param pDisk Pointer to HDD container.
9725 * @param nImage Image number, counts from 0. 0 is always base image of container.
9726 */
9727VBOXDDU_DECL(uint64_t) VDGetFileSize(PVBOXHDD pDisk, unsigned nImage)
9728{
9729 uint64_t cbSize;
9730 int rc2;
9731 bool fLockRead = false;
9732
9733 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9734 do
9735 {
9736 /* sanity check */
9737 AssertPtrBreakStmt(pDisk, cbSize = 0);
9738 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9739
9740 rc2 = vdThreadStartRead(pDisk);
9741 AssertRC(rc2);
9742 fLockRead = true;
9743
9744 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9745 AssertPtrBreakStmt(pImage, cbSize = 0);
9746 cbSize = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
9747 } while (0);
9748
9749 if (RT_UNLIKELY(fLockRead))
9750 {
9751 rc2 = vdThreadFinishRead(pDisk);
9752 AssertRC(rc2);
9753 }
9754
9755 LogFlowFunc(("returns %llu\n", cbSize));
9756 return cbSize;
9757}
9758
9759/**
9760 * Get virtual disk PCHS geometry stored in HDD container.
9761 *
9762 * @returns VBox status code.
9763 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9764 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9765 * @param pDisk Pointer to HDD container.
9766 * @param nImage Image number, counts from 0. 0 is always base image of container.
9767 * @param pPCHSGeometry Where to store PCHS geometry. Not NULL.
9768 */
9769VBOXDDU_DECL(int) VDGetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9770 PVDGEOMETRY pPCHSGeometry)
9771{
9772 int rc = VINF_SUCCESS;
9773 int rc2;
9774 bool fLockRead = false;
9775
9776 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p\n",
9777 pDisk, nImage, pPCHSGeometry));
9778 do
9779 {
9780 /* sanity check */
9781 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9782 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9783
9784 /* Check arguments. */
9785 AssertMsgBreakStmt(VALID_PTR(pPCHSGeometry),
9786 ("pPCHSGeometry=%#p\n", pPCHSGeometry),
9787 rc = VERR_INVALID_PARAMETER);
9788
9789 rc2 = vdThreadStartRead(pDisk);
9790 AssertRC(rc2);
9791 fLockRead = true;
9792
9793 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9794 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9795
9796 if (pImage == pDisk->pLast)
9797 {
9798 /* Use cached information if possible. */
9799 if (pDisk->PCHSGeometry.cCylinders != 0)
9800 *pPCHSGeometry = pDisk->PCHSGeometry;
9801 else
9802 rc = VERR_VD_GEOMETRY_NOT_SET;
9803 }
9804 else
9805 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9806 pPCHSGeometry);
9807 } while (0);
9808
9809 if (RT_UNLIKELY(fLockRead))
9810 {
9811 rc2 = vdThreadFinishRead(pDisk);
9812 AssertRC(rc2);
9813 }
9814
9815 LogFlowFunc(("%Rrc (PCHS=%u/%u/%u)\n", rc,
9816 pDisk->PCHSGeometry.cCylinders, pDisk->PCHSGeometry.cHeads,
9817 pDisk->PCHSGeometry.cSectors));
9818 return rc;
9819}
9820
9821/**
9822 * Store virtual disk PCHS geometry in HDD container.
9823 *
9824 * Note that in case of unrecoverable error all images in HDD container will be closed.
9825 *
9826 * @returns VBox status code.
9827 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9828 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9829 * @param pDisk Pointer to HDD container.
9830 * @param nImage Image number, counts from 0. 0 is always base image of container.
9831 * @param pPCHSGeometry Where to load PCHS geometry from. Not NULL.
9832 */
9833VBOXDDU_DECL(int) VDSetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9834 PCVDGEOMETRY pPCHSGeometry)
9835{
9836 int rc = VINF_SUCCESS;
9837 int rc2;
9838 bool fLockWrite = false;
9839
9840 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
9841 pDisk, nImage, pPCHSGeometry, pPCHSGeometry->cCylinders,
9842 pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
9843 do
9844 {
9845 /* sanity check */
9846 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9847 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9848
9849 /* Check arguments. */
9850 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
9851 && pPCHSGeometry->cHeads <= 16
9852 && pPCHSGeometry->cSectors <= 63,
9853 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
9854 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
9855 pPCHSGeometry->cSectors),
9856 rc = VERR_INVALID_PARAMETER);
9857
9858 rc2 = vdThreadStartWrite(pDisk);
9859 AssertRC(rc2);
9860 fLockWrite = true;
9861
9862 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9863 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9864
9865 if (pImage == pDisk->pLast)
9866 {
9867 if ( pPCHSGeometry->cCylinders != pDisk->PCHSGeometry.cCylinders
9868 || pPCHSGeometry->cHeads != pDisk->PCHSGeometry.cHeads
9869 || pPCHSGeometry->cSectors != pDisk->PCHSGeometry.cSectors)
9870 {
9871 /* Only update geometry if it is changed. Avoids similar checks
9872 * in every backend. Most of the time the new geometry is set
9873 * to the previous values, so no need to go through the hassle
9874 * of updating an image which could be opened in read-only mode
9875 * right now. */
9876 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9877 pPCHSGeometry);
9878
9879 /* Cache new geometry values in any case. */
9880 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9881 &pDisk->PCHSGeometry);
9882 if (RT_FAILURE(rc2))
9883 {
9884 pDisk->PCHSGeometry.cCylinders = 0;
9885 pDisk->PCHSGeometry.cHeads = 0;
9886 pDisk->PCHSGeometry.cSectors = 0;
9887 }
9888 else
9889 {
9890 /* Make sure the CHS geometry is properly clipped. */
9891 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 255);
9892 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9893 }
9894 }
9895 }
9896 else
9897 {
9898 VDGEOMETRY PCHS;
9899 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9900 &PCHS);
9901 if ( RT_FAILURE(rc)
9902 || pPCHSGeometry->cCylinders != PCHS.cCylinders
9903 || pPCHSGeometry->cHeads != PCHS.cHeads
9904 || pPCHSGeometry->cSectors != PCHS.cSectors)
9905 {
9906 /* Only update geometry if it is changed. Avoids similar checks
9907 * in every backend. Most of the time the new geometry is set
9908 * to the previous values, so no need to go through the hassle
9909 * of updating an image which could be opened in read-only mode
9910 * right now. */
9911 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9912 pPCHSGeometry);
9913 }
9914 }
9915 } while (0);
9916
9917 if (RT_UNLIKELY(fLockWrite))
9918 {
9919 rc2 = vdThreadFinishWrite(pDisk);
9920 AssertRC(rc2);
9921 }
9922
9923 LogFlowFunc(("returns %Rrc\n", rc));
9924 return rc;
9925}
9926
9927/**
9928 * Get virtual disk LCHS geometry stored in HDD container.
9929 *
9930 * @returns VBox status code.
9931 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9932 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9933 * @param pDisk Pointer to HDD container.
9934 * @param nImage Image number, counts from 0. 0 is always base image of container.
9935 * @param pLCHSGeometry Where to store LCHS geometry. Not NULL.
9936 */
9937VBOXDDU_DECL(int) VDGetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9938 PVDGEOMETRY pLCHSGeometry)
9939{
9940 int rc = VINF_SUCCESS;
9941 int rc2;
9942 bool fLockRead = false;
9943
9944 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p\n",
9945 pDisk, nImage, pLCHSGeometry));
9946 do
9947 {
9948 /* sanity check */
9949 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9950 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9951
9952 /* Check arguments. */
9953 AssertMsgBreakStmt(VALID_PTR(pLCHSGeometry),
9954 ("pLCHSGeometry=%#p\n", pLCHSGeometry),
9955 rc = VERR_INVALID_PARAMETER);
9956
9957 rc2 = vdThreadStartRead(pDisk);
9958 AssertRC(rc2);
9959 fLockRead = true;
9960
9961 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9962 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9963
9964 if (pImage == pDisk->pLast)
9965 {
9966 /* Use cached information if possible. */
9967 if (pDisk->LCHSGeometry.cCylinders != 0)
9968 *pLCHSGeometry = pDisk->LCHSGeometry;
9969 else
9970 rc = VERR_VD_GEOMETRY_NOT_SET;
9971 }
9972 else
9973 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9974 pLCHSGeometry);
9975 } while (0);
9976
9977 if (RT_UNLIKELY(fLockRead))
9978 {
9979 rc2 = vdThreadFinishRead(pDisk);
9980 AssertRC(rc2);
9981 }
9982
9983 LogFlowFunc((": %Rrc (LCHS=%u/%u/%u)\n", rc,
9984 pDisk->LCHSGeometry.cCylinders, pDisk->LCHSGeometry.cHeads,
9985 pDisk->LCHSGeometry.cSectors));
9986 return rc;
9987}
9988
9989/**
9990 * Store virtual disk LCHS geometry in HDD container.
9991 *
9992 * Note that in case of unrecoverable error all images in HDD container will be closed.
9993 *
9994 * @returns VBox status code.
9995 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9996 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9997 * @param pDisk Pointer to HDD container.
9998 * @param nImage Image number, counts from 0. 0 is always base image of container.
9999 * @param pLCHSGeometry Where to load LCHS geometry from. Not NULL.
10000 */
10001VBOXDDU_DECL(int) VDSetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
10002 PCVDGEOMETRY pLCHSGeometry)
10003{
10004 int rc = VINF_SUCCESS;
10005 int rc2;
10006 bool fLockWrite = false;
10007
10008 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
10009 pDisk, nImage, pLCHSGeometry, pLCHSGeometry->cCylinders,
10010 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
10011 do
10012 {
10013 /* sanity check */
10014 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10015 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10016
10017 /* Check arguments. */
10018 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
10019 && pLCHSGeometry->cHeads <= 255
10020 && pLCHSGeometry->cSectors <= 63,
10021 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
10022 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
10023 pLCHSGeometry->cSectors),
10024 rc = VERR_INVALID_PARAMETER);
10025
10026 rc2 = vdThreadStartWrite(pDisk);
10027 AssertRC(rc2);
10028 fLockWrite = true;
10029
10030 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10031 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10032
10033 if (pImage == pDisk->pLast)
10034 {
10035 if ( pLCHSGeometry->cCylinders != pDisk->LCHSGeometry.cCylinders
10036 || pLCHSGeometry->cHeads != pDisk->LCHSGeometry.cHeads
10037 || pLCHSGeometry->cSectors != pDisk->LCHSGeometry.cSectors)
10038 {
10039 /* Only update geometry if it is changed. Avoids similar checks
10040 * in every backend. Most of the time the new geometry is set
10041 * to the previous values, so no need to go through the hassle
10042 * of updating an image which could be opened in read-only mode
10043 * right now. */
10044 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10045 pLCHSGeometry);
10046
10047 /* Cache new geometry values in any case. */
10048 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10049 &pDisk->LCHSGeometry);
10050 if (RT_FAILURE(rc2))
10051 {
10052 pDisk->LCHSGeometry.cCylinders = 0;
10053 pDisk->LCHSGeometry.cHeads = 0;
10054 pDisk->LCHSGeometry.cSectors = 0;
10055 }
10056 else
10057 {
10058 /* Make sure the CHS geometry is properly clipped. */
10059 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
10060 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
10061 }
10062 }
10063 }
10064 else
10065 {
10066 VDGEOMETRY LCHS;
10067 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10068 &LCHS);
10069 if ( RT_FAILURE(rc)
10070 || pLCHSGeometry->cCylinders != LCHS.cCylinders
10071 || pLCHSGeometry->cHeads != LCHS.cHeads
10072 || pLCHSGeometry->cSectors != LCHS.cSectors)
10073 {
10074 /* Only update geometry if it is changed. Avoids similar checks
10075 * in every backend. Most of the time the new geometry is set
10076 * to the previous values, so no need to go through the hassle
10077 * of updating an image which could be opened in read-only mode
10078 * right now. */
10079 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10080 pLCHSGeometry);
10081 }
10082 }
10083 } while (0);
10084
10085 if (RT_UNLIKELY(fLockWrite))
10086 {
10087 rc2 = vdThreadFinishWrite(pDisk);
10088 AssertRC(rc2);
10089 }
10090
10091 LogFlowFunc(("returns %Rrc\n", rc));
10092 return rc;
10093}
10094
10095/**
10096 * Queries the available regions of an image in the given VD container.
10097 *
10098 * @return VBox status code.
10099 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10100 * @retval VERR_NOT_SUPPORTED if the image backend doesn't support region lists.
10101 * @param pDisk Pointer to HDD container.
10102 * @param nImage Image number, counts from 0. 0 is always base image of container.
10103 * @param fFlags Combination of VD_REGION_LIST_F_* flags.
10104 * @param ppRegionList Where to store the pointer to the region list on success, must be freed
10105 * with VDRegionListFree().
10106 */
10107VBOXDDU_DECL(int) VDQueryRegions(PVBOXHDD pDisk, unsigned nImage, uint32_t fFlags,
10108 PPVDREGIONLIST ppRegionList)
10109{
10110 int rc = VINF_SUCCESS;
10111 int rc2;
10112 bool fLockRead = false;
10113
10114 LogFlowFunc(("pDisk=%#p nImage=%u fFlags=%#x ppRegionList=%#p\n",
10115 pDisk, nImage, fFlags, ppRegionList));
10116 do
10117 {
10118 /* sanity check */
10119 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10120 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10121
10122 /* Check arguments. */
10123 AssertMsgBreakStmt(VALID_PTR(ppRegionList),
10124 ("ppRegionList=%#p\n", ppRegionList),
10125 rc = VERR_INVALID_PARAMETER);
10126
10127 rc2 = vdThreadStartRead(pDisk);
10128 AssertRC(rc2);
10129 fLockRead = true;
10130
10131 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10132 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10133
10134 if (pImage->Backend->pfnQueryRegions)
10135 {
10136 PCVDREGIONLIST pRegionList = NULL;
10137 rc = pImage->Backend->pfnQueryRegions(pImage->pBackendData, &pRegionList);
10138 if (RT_SUCCESS(rc))
10139 {
10140 rc = vdRegionListConv(pRegionList, fFlags, ppRegionList);
10141
10142 AssertPtr(pImage->Backend->pfnRegionListRelease);
10143 pImage->Backend->pfnRegionListRelease(pImage->pBackendData, pRegionList);
10144 }
10145 }
10146 else
10147 rc = VERR_NOT_SUPPORTED;
10148
10149 if (rc == VERR_NOT_SUPPORTED)
10150 {
10151 /*
10152 * Create a list with a single region containing the data gathered from the
10153 * image and sector size.
10154 */
10155 PVDREGIONLIST pRegionList = (PVDREGIONLIST)RTMemAllocZ(RT_UOFFSETOF(VDREGIONLIST, aRegions[1]));
10156 if (RT_LIKELY(pRegionList))
10157 {
10158 uint32_t cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
10159 uint64_t cbImage = pImage->Backend->pfnGetSize(pImage->pBackendData);
10160
10161 pRegionList->cRegions = 1;
10162 pRegionList->fFlags = fFlags;
10163
10164 /*
10165 * Single region starting at the first byte/block covering the whole image,
10166 * block size equals sector size and contains no metadata.
10167 */
10168 PVDREGIONDESC pRegion = &pRegionList->aRegions[0];
10169 pRegion->offRegion = 0; /* Disk start. */
10170 pRegion->cbBlock = cbSector;
10171 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
10172 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
10173 pRegion->cbData = cbSector;
10174 pRegion->cbMetadata = 0;
10175 if (fFlags & VD_REGION_LIST_F_LOC_SIZE_BLOCKS)
10176 pRegion->cRegionBlocksOrBytes = cbImage / cbSector;
10177 else
10178 pRegion->cRegionBlocksOrBytes = cbImage;
10179
10180 *ppRegionList = pRegionList;
10181 }
10182 else
10183 rc = VERR_NO_MEMORY;
10184 }
10185 } while (0);
10186
10187 if (RT_UNLIKELY(fLockRead))
10188 {
10189 rc2 = vdThreadFinishRead(pDisk);
10190 AssertRC(rc2);
10191 }
10192
10193 LogFlowFunc((": %Rrc\n", rc));
10194 return rc;
10195}
10196
10197/**
10198 * Frees a region list previously queried with VDQueryRegions().
10199 *
10200 * @return nothing.
10201 * @param pRegionList The region list to free.
10202 */
10203VBOXDDU_DECL(void) VDRegionListFree(PVDREGIONLIST pRegionList)
10204{
10205 RTMemFree(pRegionList);
10206}
10207
10208/**
10209 * Get version of image in HDD container.
10210 *
10211 * @returns VBox status code.
10212 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10213 * @param pDisk Pointer to HDD container.
10214 * @param nImage Image number, counts from 0. 0 is always base image of container.
10215 * @param puVersion Where to store the image version.
10216 */
10217VBOXDDU_DECL(int) VDGetVersion(PVBOXHDD pDisk, unsigned nImage,
10218 unsigned *puVersion)
10219{
10220 int rc = VINF_SUCCESS;
10221 int rc2;
10222 bool fLockRead = false;
10223
10224 LogFlowFunc(("pDisk=%#p nImage=%u puVersion=%#p\n",
10225 pDisk, nImage, puVersion));
10226 do
10227 {
10228 /* sanity check */
10229 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10230 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10231
10232 /* Check arguments. */
10233 AssertMsgBreakStmt(VALID_PTR(puVersion),
10234 ("puVersion=%#p\n", puVersion),
10235 rc = VERR_INVALID_PARAMETER);
10236
10237 rc2 = vdThreadStartRead(pDisk);
10238 AssertRC(rc2);
10239 fLockRead = true;
10240
10241 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10242 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10243
10244 *puVersion = pImage->Backend->pfnGetVersion(pImage->pBackendData);
10245 } while (0);
10246
10247 if (RT_UNLIKELY(fLockRead))
10248 {
10249 rc2 = vdThreadFinishRead(pDisk);
10250 AssertRC(rc2);
10251 }
10252
10253 LogFlowFunc(("returns %Rrc uVersion=%#x\n", rc, *puVersion));
10254 return rc;
10255}
10256
10257/**
10258 * List the capabilities of image backend in HDD container.
10259 *
10260 * @returns VBox status code.
10261 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10262 * @param pDisk Pointer to the HDD container.
10263 * @param nImage Image number, counts from 0. 0 is always base image of container.
10264 * @param pBackendInfo Where to store the backend information.
10265 */
10266VBOXDDU_DECL(int) VDBackendInfoSingle(PVBOXHDD pDisk, unsigned nImage,
10267 PVDBACKENDINFO pBackendInfo)
10268{
10269 int rc = VINF_SUCCESS;
10270 int rc2;
10271 bool fLockRead = false;
10272
10273 LogFlowFunc(("pDisk=%#p nImage=%u pBackendInfo=%#p\n",
10274 pDisk, nImage, pBackendInfo));
10275 do
10276 {
10277 /* sanity check */
10278 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10279 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10280
10281 /* Check arguments. */
10282 AssertMsgBreakStmt(VALID_PTR(pBackendInfo),
10283 ("pBackendInfo=%#p\n", pBackendInfo),
10284 rc = VERR_INVALID_PARAMETER);
10285
10286 rc2 = vdThreadStartRead(pDisk);
10287 AssertRC(rc2);
10288 fLockRead = true;
10289
10290 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10291 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10292
10293 pBackendInfo->pszBackend = pImage->Backend->pszBackendName;
10294 pBackendInfo->uBackendCaps = pImage->Backend->uBackendCaps;
10295 pBackendInfo->paFileExtensions = pImage->Backend->paFileExtensions;
10296 pBackendInfo->paConfigInfo = pImage->Backend->paConfigInfo;
10297 } while (0);
10298
10299 if (RT_UNLIKELY(fLockRead))
10300 {
10301 rc2 = vdThreadFinishRead(pDisk);
10302 AssertRC(rc2);
10303 }
10304
10305 LogFlowFunc(("returns %Rrc\n", rc));
10306 return rc;
10307}
10308
10309/**
10310 * Get flags of image in HDD container.
10311 *
10312 * @returns VBox status code.
10313 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10314 * @param pDisk Pointer to HDD container.
10315 * @param nImage Image number, counts from 0. 0 is always base image of container.
10316 * @param puImageFlags Where to store the image flags.
10317 */
10318VBOXDDU_DECL(int) VDGetImageFlags(PVBOXHDD pDisk, unsigned nImage,
10319 unsigned *puImageFlags)
10320{
10321 int rc = VINF_SUCCESS;
10322 int rc2;
10323 bool fLockRead = false;
10324
10325 LogFlowFunc(("pDisk=%#p nImage=%u puImageFlags=%#p\n",
10326 pDisk, nImage, puImageFlags));
10327 do
10328 {
10329 /* sanity check */
10330 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10331 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10332
10333 /* Check arguments. */
10334 AssertMsgBreakStmt(VALID_PTR(puImageFlags),
10335 ("puImageFlags=%#p\n", puImageFlags),
10336 rc = VERR_INVALID_PARAMETER);
10337
10338 rc2 = vdThreadStartRead(pDisk);
10339 AssertRC(rc2);
10340 fLockRead = true;
10341
10342 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10343 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10344
10345 *puImageFlags = pImage->uImageFlags;
10346 } while (0);
10347
10348 if (RT_UNLIKELY(fLockRead))
10349 {
10350 rc2 = vdThreadFinishRead(pDisk);
10351 AssertRC(rc2);
10352 }
10353
10354 LogFlowFunc(("returns %Rrc uImageFlags=%#x\n", rc, *puImageFlags));
10355 return rc;
10356}
10357
10358/**
10359 * Get open flags of image in HDD container.
10360 *
10361 * @returns VBox status code.
10362 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10363 * @param pDisk Pointer to HDD container.
10364 * @param nImage Image number, counts from 0. 0 is always base image of container.
10365 * @param puOpenFlags Where to store the image open flags.
10366 */
10367VBOXDDU_DECL(int) VDGetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10368 unsigned *puOpenFlags)
10369{
10370 int rc = VINF_SUCCESS;
10371 int rc2;
10372 bool fLockRead = false;
10373
10374 LogFlowFunc(("pDisk=%#p nImage=%u puOpenFlags=%#p\n",
10375 pDisk, nImage, puOpenFlags));
10376 do
10377 {
10378 /* sanity check */
10379 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10380 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10381
10382 /* Check arguments. */
10383 AssertMsgBreakStmt(VALID_PTR(puOpenFlags),
10384 ("puOpenFlags=%#p\n", puOpenFlags),
10385 rc = VERR_INVALID_PARAMETER);
10386
10387 rc2 = vdThreadStartRead(pDisk);
10388 AssertRC(rc2);
10389 fLockRead = true;
10390
10391 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10392 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10393
10394 *puOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
10395 } while (0);
10396
10397 if (RT_UNLIKELY(fLockRead))
10398 {
10399 rc2 = vdThreadFinishRead(pDisk);
10400 AssertRC(rc2);
10401 }
10402
10403 LogFlowFunc(("returns %Rrc uOpenFlags=%#x\n", rc, *puOpenFlags));
10404 return rc;
10405}
10406
10407/**
10408 * Set open flags of image in HDD container.
10409 * This operation may cause file locking changes and/or files being reopened.
10410 * Note that in case of unrecoverable error all images in HDD container will be closed.
10411 *
10412 * @returns VBox status code.
10413 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10414 * @param pDisk Pointer to HDD container.
10415 * @param nImage Image number, counts from 0. 0 is always base image of container.
10416 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
10417 */
10418VBOXDDU_DECL(int) VDSetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10419 unsigned uOpenFlags)
10420{
10421 int rc;
10422 int rc2;
10423 bool fLockWrite = false;
10424
10425 LogFlowFunc(("pDisk=%#p uOpenFlags=%#u\n", pDisk, uOpenFlags));
10426 do
10427 {
10428 /* sanity check */
10429 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10430 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10431
10432 /* Check arguments. */
10433 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
10434 ("uOpenFlags=%#x\n", uOpenFlags),
10435 rc = VERR_INVALID_PARAMETER);
10436
10437 rc2 = vdThreadStartWrite(pDisk);
10438 AssertRC(rc2);
10439 fLockWrite = true;
10440
10441 /* Destroy any discard state because the image might be changed to readonly mode. */
10442 rc = vdDiscardStateDestroy(pDisk);
10443 if (RT_FAILURE(rc))
10444 break;
10445
10446 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10447 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10448
10449 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData,
10450 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS));
10451 if (RT_SUCCESS(rc))
10452 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
10453 } while (0);
10454
10455 if (RT_UNLIKELY(fLockWrite))
10456 {
10457 rc2 = vdThreadFinishWrite(pDisk);
10458 AssertRC(rc2);
10459 }
10460
10461 LogFlowFunc(("returns %Rrc\n", rc));
10462 return rc;
10463}
10464
10465/**
10466 * Get base filename of image in HDD container. Some image formats use
10467 * other filenames as well, so don't use this for anything but informational
10468 * purposes.
10469 *
10470 * @returns VBox status code.
10471 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10472 * @returns VERR_BUFFER_OVERFLOW if pszFilename buffer too small to hold filename.
10473 * @param pDisk Pointer to HDD container.
10474 * @param nImage Image number, counts from 0. 0 is always base image of container.
10475 * @param pszFilename Where to store the image file name.
10476 * @param cbFilename Size of buffer pszFilename points to.
10477 */
10478VBOXDDU_DECL(int) VDGetFilename(PVBOXHDD pDisk, unsigned nImage,
10479 char *pszFilename, unsigned cbFilename)
10480{
10481 int rc;
10482 int rc2;
10483 bool fLockRead = false;
10484
10485 LogFlowFunc(("pDisk=%#p nImage=%u pszFilename=%#p cbFilename=%u\n",
10486 pDisk, nImage, pszFilename, cbFilename));
10487 do
10488 {
10489 /* sanity check */
10490 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10491 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10492
10493 /* Check arguments. */
10494 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
10495 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
10496 rc = VERR_INVALID_PARAMETER);
10497 AssertMsgBreakStmt(cbFilename,
10498 ("cbFilename=%u\n", cbFilename),
10499 rc = VERR_INVALID_PARAMETER);
10500
10501 rc2 = vdThreadStartRead(pDisk);
10502 AssertRC(rc2);
10503 fLockRead = true;
10504
10505 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10506 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10507
10508 size_t cb = strlen(pImage->pszFilename);
10509 if (cb <= cbFilename)
10510 {
10511 strcpy(pszFilename, pImage->pszFilename);
10512 rc = VINF_SUCCESS;
10513 }
10514 else
10515 {
10516 strncpy(pszFilename, pImage->pszFilename, cbFilename - 1);
10517 pszFilename[cbFilename - 1] = '\0';
10518 rc = VERR_BUFFER_OVERFLOW;
10519 }
10520 } while (0);
10521
10522 if (RT_UNLIKELY(fLockRead))
10523 {
10524 rc2 = vdThreadFinishRead(pDisk);
10525 AssertRC(rc2);
10526 }
10527
10528 LogFlowFunc(("returns %Rrc, pszFilename=\"%s\"\n", rc, pszFilename));
10529 return rc;
10530}
10531
10532/**
10533 * Get the comment line of image in HDD container.
10534 *
10535 * @returns VBox status code.
10536 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10537 * @returns VERR_BUFFER_OVERFLOW if pszComment buffer too small to hold comment text.
10538 * @param pDisk Pointer to HDD container.
10539 * @param nImage Image number, counts from 0. 0 is always base image of container.
10540 * @param pszComment Where to store the comment string of image. NULL is ok.
10541 * @param cbComment The size of pszComment buffer. 0 is ok.
10542 */
10543VBOXDDU_DECL(int) VDGetComment(PVBOXHDD pDisk, unsigned nImage,
10544 char *pszComment, unsigned cbComment)
10545{
10546 int rc;
10547 int rc2;
10548 bool fLockRead = false;
10549
10550 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p cbComment=%u\n",
10551 pDisk, nImage, pszComment, cbComment));
10552 do
10553 {
10554 /* sanity check */
10555 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10556 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10557
10558 /* Check arguments. */
10559 AssertMsgBreakStmt(VALID_PTR(pszComment),
10560 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10561 rc = VERR_INVALID_PARAMETER);
10562 AssertMsgBreakStmt(cbComment,
10563 ("cbComment=%u\n", cbComment),
10564 rc = VERR_INVALID_PARAMETER);
10565
10566 rc2 = vdThreadStartRead(pDisk);
10567 AssertRC(rc2);
10568 fLockRead = true;
10569
10570 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10571 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10572
10573 rc = pImage->Backend->pfnGetComment(pImage->pBackendData, pszComment,
10574 cbComment);
10575 } while (0);
10576
10577 if (RT_UNLIKELY(fLockRead))
10578 {
10579 rc2 = vdThreadFinishRead(pDisk);
10580 AssertRC(rc2);
10581 }
10582
10583 LogFlowFunc(("returns %Rrc, pszComment=\"%s\"\n", rc, pszComment));
10584 return rc;
10585}
10586
10587/**
10588 * Changes the comment line of image in HDD container.
10589 *
10590 * @returns VBox status code.
10591 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10592 * @param pDisk Pointer to HDD container.
10593 * @param nImage Image number, counts from 0. 0 is always base image of container.
10594 * @param pszComment New comment string (UTF-8). NULL is allowed to reset the comment.
10595 */
10596VBOXDDU_DECL(int) VDSetComment(PVBOXHDD pDisk, unsigned nImage,
10597 const char *pszComment)
10598{
10599 int rc;
10600 int rc2;
10601 bool fLockWrite = false;
10602
10603 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p \"%s\"\n",
10604 pDisk, nImage, pszComment, pszComment));
10605 do
10606 {
10607 /* sanity check */
10608 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10609 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10610
10611 /* Check arguments. */
10612 AssertMsgBreakStmt(VALID_PTR(pszComment) || pszComment == NULL,
10613 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10614 rc = VERR_INVALID_PARAMETER);
10615
10616 rc2 = vdThreadStartWrite(pDisk);
10617 AssertRC(rc2);
10618 fLockWrite = true;
10619
10620 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10621 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10622
10623 rc = pImage->Backend->pfnSetComment(pImage->pBackendData, pszComment);
10624 } while (0);
10625
10626 if (RT_UNLIKELY(fLockWrite))
10627 {
10628 rc2 = vdThreadFinishWrite(pDisk);
10629 AssertRC(rc2);
10630 }
10631
10632 LogFlowFunc(("returns %Rrc\n", rc));
10633 return rc;
10634}
10635
10636
10637/**
10638 * Get UUID of image in HDD container.
10639 *
10640 * @returns VBox status code.
10641 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10642 * @param pDisk Pointer to HDD container.
10643 * @param nImage Image number, counts from 0. 0 is always base image of container.
10644 * @param pUuid Where to store the image creation UUID.
10645 */
10646VBOXDDU_DECL(int) VDGetUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10647{
10648 int rc;
10649 int rc2;
10650 bool fLockRead = false;
10651
10652 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10653 do
10654 {
10655 /* sanity check */
10656 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10657 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10658
10659 /* Check arguments. */
10660 AssertMsgBreakStmt(VALID_PTR(pUuid),
10661 ("pUuid=%#p\n", pUuid),
10662 rc = VERR_INVALID_PARAMETER);
10663
10664 rc2 = vdThreadStartRead(pDisk);
10665 AssertRC(rc2);
10666 fLockRead = true;
10667
10668 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10669 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10670
10671 rc = pImage->Backend->pfnGetUuid(pImage->pBackendData, pUuid);
10672 } while (0);
10673
10674 if (RT_UNLIKELY(fLockRead))
10675 {
10676 rc2 = vdThreadFinishRead(pDisk);
10677 AssertRC(rc2);
10678 }
10679
10680 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10681 return rc;
10682}
10683
10684/**
10685 * Set the image's UUID. Should not be used by normal applications.
10686 *
10687 * @returns VBox status code.
10688 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10689 * @param pDisk Pointer to HDD container.
10690 * @param nImage Image number, counts from 0. 0 is always base image of container.
10691 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
10692 */
10693VBOXDDU_DECL(int) VDSetUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10694{
10695 int rc;
10696 int rc2;
10697 bool fLockWrite = false;
10698
10699 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10700 pDisk, nImage, pUuid, pUuid));
10701 do
10702 {
10703 /* sanity check */
10704 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10705 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10706
10707 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10708 ("pUuid=%#p\n", pUuid),
10709 rc = VERR_INVALID_PARAMETER);
10710
10711 rc2 = vdThreadStartWrite(pDisk);
10712 AssertRC(rc2);
10713 fLockWrite = true;
10714
10715 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10716 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10717
10718 RTUUID Uuid;
10719 if (!pUuid)
10720 {
10721 RTUuidCreate(&Uuid);
10722 pUuid = &Uuid;
10723 }
10724 rc = pImage->Backend->pfnSetUuid(pImage->pBackendData, pUuid);
10725 } while (0);
10726
10727 if (RT_UNLIKELY(fLockWrite))
10728 {
10729 rc2 = vdThreadFinishWrite(pDisk);
10730 AssertRC(rc2);
10731 }
10732
10733 LogFlowFunc(("returns %Rrc\n", rc));
10734 return rc;
10735}
10736
10737/**
10738 * Get last modification UUID of image in HDD container.
10739 *
10740 * @returns VBox status code.
10741 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10742 * @param pDisk Pointer to HDD container.
10743 * @param nImage Image number, counts from 0. 0 is always base image of container.
10744 * @param pUuid Where to store the image modification UUID.
10745 */
10746VBOXDDU_DECL(int) VDGetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10747{
10748 int rc = VINF_SUCCESS;
10749 int rc2;
10750 bool fLockRead = false;
10751
10752 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10753 do
10754 {
10755 /* sanity check */
10756 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10757 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10758
10759 /* Check arguments. */
10760 AssertMsgBreakStmt(VALID_PTR(pUuid),
10761 ("pUuid=%#p\n", pUuid),
10762 rc = VERR_INVALID_PARAMETER);
10763
10764 rc2 = vdThreadStartRead(pDisk);
10765 AssertRC(rc2);
10766 fLockRead = true;
10767
10768 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10769 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10770
10771 rc = pImage->Backend->pfnGetModificationUuid(pImage->pBackendData,
10772 pUuid);
10773 } while (0);
10774
10775 if (RT_UNLIKELY(fLockRead))
10776 {
10777 rc2 = vdThreadFinishRead(pDisk);
10778 AssertRC(rc2);
10779 }
10780
10781 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10782 return rc;
10783}
10784
10785/**
10786 * Set the image's last modification UUID. Should not be used by normal applications.
10787 *
10788 * @returns VBox status code.
10789 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10790 * @param pDisk Pointer to HDD container.
10791 * @param nImage Image number, counts from 0. 0 is always base image of container.
10792 * @param pUuid New modification UUID of the image. If NULL, a new UUID is created.
10793 */
10794VBOXDDU_DECL(int) VDSetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10795{
10796 int rc;
10797 int rc2;
10798 bool fLockWrite = false;
10799
10800 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10801 pDisk, nImage, pUuid, pUuid));
10802 do
10803 {
10804 /* sanity check */
10805 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10806 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10807
10808 /* Check arguments. */
10809 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10810 ("pUuid=%#p\n", pUuid),
10811 rc = VERR_INVALID_PARAMETER);
10812
10813 rc2 = vdThreadStartWrite(pDisk);
10814 AssertRC(rc2);
10815 fLockWrite = true;
10816
10817 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10818 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10819
10820 RTUUID Uuid;
10821 if (!pUuid)
10822 {
10823 RTUuidCreate(&Uuid);
10824 pUuid = &Uuid;
10825 }
10826 rc = pImage->Backend->pfnSetModificationUuid(pImage->pBackendData,
10827 pUuid);
10828 } while (0);
10829
10830 if (RT_UNLIKELY(fLockWrite))
10831 {
10832 rc2 = vdThreadFinishWrite(pDisk);
10833 AssertRC(rc2);
10834 }
10835
10836 LogFlowFunc(("returns %Rrc\n", rc));
10837 return rc;
10838}
10839
10840/**
10841 * Get parent UUID of image in HDD container.
10842 *
10843 * @returns VBox status code.
10844 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10845 * @param pDisk Pointer to HDD container.
10846 * @param nImage Image number, counts from 0. 0 is always base image of container.
10847 * @param pUuid Where to store the parent image UUID.
10848 */
10849VBOXDDU_DECL(int) VDGetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10850 PRTUUID pUuid)
10851{
10852 int rc = VINF_SUCCESS;
10853 int rc2;
10854 bool fLockRead = false;
10855
10856 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10857 do
10858 {
10859 /* sanity check */
10860 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10861 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10862
10863 /* Check arguments. */
10864 AssertMsgBreakStmt(VALID_PTR(pUuid),
10865 ("pUuid=%#p\n", pUuid),
10866 rc = VERR_INVALID_PARAMETER);
10867
10868 rc2 = vdThreadStartRead(pDisk);
10869 AssertRC(rc2);
10870 fLockRead = true;
10871
10872 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10873 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10874
10875 rc = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, pUuid);
10876 } while (0);
10877
10878 if (RT_UNLIKELY(fLockRead))
10879 {
10880 rc2 = vdThreadFinishRead(pDisk);
10881 AssertRC(rc2);
10882 }
10883
10884 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10885 return rc;
10886}
10887
10888/**
10889 * Set the image's parent UUID. Should not be used by normal applications.
10890 *
10891 * @returns VBox status code.
10892 * @param pDisk Pointer to HDD container.
10893 * @param nImage Image number, counts from 0. 0 is always base image of container.
10894 * @param pUuid New parent UUID of the image. If NULL, a new UUID is created.
10895 */
10896VBOXDDU_DECL(int) VDSetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10897 PCRTUUID pUuid)
10898{
10899 int rc;
10900 int rc2;
10901 bool fLockWrite = false;
10902
10903 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10904 pDisk, nImage, pUuid, pUuid));
10905 do
10906 {
10907 /* sanity check */
10908 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10909 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10910
10911 /* Check arguments. */
10912 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10913 ("pUuid=%#p\n", pUuid),
10914 rc = VERR_INVALID_PARAMETER);
10915
10916 rc2 = vdThreadStartWrite(pDisk);
10917 AssertRC(rc2);
10918 fLockWrite = true;
10919
10920 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10921 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10922
10923 RTUUID Uuid;
10924 if (!pUuid)
10925 {
10926 RTUuidCreate(&Uuid);
10927 pUuid = &Uuid;
10928 }
10929 rc = pImage->Backend->pfnSetParentUuid(pImage->pBackendData, pUuid);
10930 } while (0);
10931
10932 if (RT_UNLIKELY(fLockWrite))
10933 {
10934 rc2 = vdThreadFinishWrite(pDisk);
10935 AssertRC(rc2);
10936 }
10937
10938 LogFlowFunc(("returns %Rrc\n", rc));
10939 return rc;
10940}
10941
10942
10943/**
10944 * Debug helper - dumps all opened images in HDD container into the log file.
10945 *
10946 * @param pDisk Pointer to HDD container.
10947 */
10948VBOXDDU_DECL(void) VDDumpImages(PVBOXHDD pDisk)
10949{
10950 int rc2;
10951 bool fLockRead = false;
10952
10953 do
10954 {
10955 /* sanity check */
10956 AssertPtrBreak(pDisk);
10957 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10958
10959 if (!pDisk->pInterfaceError || !VALID_PTR(pDisk->pInterfaceError->pfnMessage))
10960 pDisk->pInterfaceError->pfnMessage = vdLogMessage;
10961
10962 rc2 = vdThreadStartRead(pDisk);
10963 AssertRC(rc2);
10964 fLockRead = true;
10965
10966 vdMessageWrapper(pDisk, "--- Dumping VD Disk, Images=%u\n", pDisk->cImages);
10967 for (PVDIMAGE pImage = pDisk->pBase; pImage; pImage = pImage->pNext)
10968 {
10969 vdMessageWrapper(pDisk, "Dumping VD image \"%s\" (Backend=%s)\n",
10970 pImage->pszFilename, pImage->Backend->pszBackendName);
10971 pImage->Backend->pfnDump(pImage->pBackendData);
10972 }
10973 } while (0);
10974
10975 if (RT_UNLIKELY(fLockRead))
10976 {
10977 rc2 = vdThreadFinishRead(pDisk);
10978 AssertRC(rc2);
10979 }
10980}
10981
10982
10983VBOXDDU_DECL(int) VDDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges)
10984{
10985 int rc;
10986 int rc2;
10987 bool fLockWrite = false;
10988
10989 LogFlowFunc(("pDisk=%#p paRanges=%#p cRanges=%u\n",
10990 pDisk, paRanges, cRanges));
10991 do
10992 {
10993 /* sanity check */
10994 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10995 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10996
10997 /* Check arguments. */
10998 AssertMsgBreakStmt(cRanges,
10999 ("cRanges=%u\n", cRanges),
11000 rc = VERR_INVALID_PARAMETER);
11001 AssertMsgBreakStmt(VALID_PTR(paRanges),
11002 ("paRanges=%#p\n", paRanges),
11003 rc = VERR_INVALID_PARAMETER);
11004
11005 rc2 = vdThreadStartWrite(pDisk);
11006 AssertRC(rc2);
11007 fLockWrite = true;
11008
11009 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11010
11011 AssertMsgBreakStmt(pDisk->pLast->uOpenFlags & VD_OPEN_FLAGS_DISCARD,
11012 ("Discarding not supported\n"),
11013 rc = VERR_NOT_SUPPORTED);
11014
11015 VDIOCTX IoCtx;
11016 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
11017
11018 rc = RTSemEventCreate(&hEventComplete);
11019 if (RT_FAILURE(rc))
11020 break;
11021
11022 vdIoCtxDiscardInit(&IoCtx, pDisk, paRanges, cRanges,
11023 vdIoCtxSyncComplete, pDisk, hEventComplete, NULL,
11024 vdDiscardHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
11025 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
11026
11027 RTSemEventDestroy(hEventComplete);
11028 } while (0);
11029
11030 if (RT_UNLIKELY(fLockWrite))
11031 {
11032 rc2 = vdThreadFinishWrite(pDisk);
11033 AssertRC(rc2);
11034 }
11035
11036 LogFlowFunc(("returns %Rrc\n", rc));
11037 return rc;
11038}
11039
11040
11041VBOXDDU_DECL(int) VDAsyncRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
11042 PCRTSGBUF pcSgBuf,
11043 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11044 void *pvUser1, void *pvUser2)
11045{
11046 int rc = VERR_VD_BLOCK_FREE;
11047 int rc2;
11048 bool fLockRead = false;
11049 PVDIOCTX pIoCtx = NULL;
11050
11051 LogFlowFunc(("pDisk=%#p uOffset=%llu pcSgBuf=%#p cbRead=%zu pvUser1=%#p pvUser2=%#p\n",
11052 pDisk, uOffset, pcSgBuf, cbRead, pvUser1, pvUser2));
11053
11054 do
11055 {
11056 /* sanity check */
11057 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11058 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11059
11060 /* Check arguments. */
11061 AssertMsgBreakStmt(cbRead,
11062 ("cbRead=%zu\n", cbRead),
11063 rc = VERR_INVALID_PARAMETER);
11064 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
11065 ("pcSgBuf=%#p\n", pcSgBuf),
11066 rc = VERR_INVALID_PARAMETER);
11067
11068 rc2 = vdThreadStartRead(pDisk);
11069 AssertRC(rc2);
11070 fLockRead = true;
11071
11072 AssertMsgBreakStmt(uOffset + cbRead <= pDisk->cbSize,
11073 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
11074 uOffset, cbRead, pDisk->cbSize),
11075 rc = VERR_INVALID_PARAMETER);
11076 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11077
11078 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_READ, uOffset,
11079 cbRead, pDisk->pLast, pcSgBuf,
11080 pfnComplete, pvUser1, pvUser2,
11081 NULL, vdReadHelperAsync,
11082 VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
11083 if (!pIoCtx)
11084 {
11085 rc = VERR_NO_MEMORY;
11086 break;
11087 }
11088
11089 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11090 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11091 {
11092 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11093 vdIoCtxFree(pDisk, pIoCtx);
11094 else
11095 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11096 }
11097 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11098 vdIoCtxFree(pDisk, pIoCtx);
11099
11100 } while (0);
11101
11102 if (RT_UNLIKELY(fLockRead) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11103 {
11104 rc2 = vdThreadFinishRead(pDisk);
11105 AssertRC(rc2);
11106 }
11107
11108 LogFlowFunc(("returns %Rrc\n", rc));
11109 return rc;
11110}
11111
11112
11113VBOXDDU_DECL(int) VDAsyncWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
11114 PCRTSGBUF pcSgBuf,
11115 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11116 void *pvUser1, void *pvUser2)
11117{
11118 int rc;
11119 int rc2;
11120 bool fLockWrite = false;
11121 PVDIOCTX pIoCtx = NULL;
11122
11123 LogFlowFunc(("pDisk=%#p uOffset=%llu cSgBuf=%#p cbWrite=%zu pvUser1=%#p pvUser2=%#p\n",
11124 pDisk, uOffset, pcSgBuf, cbWrite, pvUser1, pvUser2));
11125 do
11126 {
11127 /* sanity check */
11128 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11129 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11130
11131 /* Check arguments. */
11132 AssertMsgBreakStmt(cbWrite,
11133 ("cbWrite=%zu\n", cbWrite),
11134 rc = VERR_INVALID_PARAMETER);
11135 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
11136 ("pcSgBuf=%#p\n", pcSgBuf),
11137 rc = VERR_INVALID_PARAMETER);
11138
11139 rc2 = vdThreadStartWrite(pDisk);
11140 AssertRC(rc2);
11141 fLockWrite = true;
11142
11143 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
11144 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
11145 uOffset, cbWrite, pDisk->cbSize),
11146 rc = VERR_INVALID_PARAMETER);
11147 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11148
11149 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_WRITE, uOffset,
11150 cbWrite, pDisk->pLast, pcSgBuf,
11151 pfnComplete, pvUser1, pvUser2,
11152 NULL, vdWriteHelperAsync,
11153 VDIOCTX_FLAGS_DEFAULT);
11154 if (!pIoCtx)
11155 {
11156 rc = VERR_NO_MEMORY;
11157 break;
11158 }
11159
11160 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11161 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11162 {
11163 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11164 vdIoCtxFree(pDisk, pIoCtx);
11165 else
11166 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11167 }
11168 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11169 vdIoCtxFree(pDisk, pIoCtx);
11170 } while (0);
11171
11172 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11173 {
11174 rc2 = vdThreadFinishWrite(pDisk);
11175 AssertRC(rc2);
11176 }
11177
11178 LogFlowFunc(("returns %Rrc\n", rc));
11179 return rc;
11180}
11181
11182
11183VBOXDDU_DECL(int) VDAsyncFlush(PVBOXHDD pDisk, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11184 void *pvUser1, void *pvUser2)
11185{
11186 int rc;
11187 int rc2;
11188 bool fLockWrite = false;
11189 PVDIOCTX pIoCtx = NULL;
11190
11191 LogFlowFunc(("pDisk=%#p\n", pDisk));
11192
11193 do
11194 {
11195 /* sanity check */
11196 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11197 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11198
11199 rc2 = vdThreadStartWrite(pDisk);
11200 AssertRC(rc2);
11201 fLockWrite = true;
11202
11203 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11204
11205 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_FLUSH, 0,
11206 0, pDisk->pLast, NULL,
11207 pfnComplete, pvUser1, pvUser2,
11208 NULL, vdFlushHelperAsync,
11209 VDIOCTX_FLAGS_DEFAULT);
11210 if (!pIoCtx)
11211 {
11212 rc = VERR_NO_MEMORY;
11213 break;
11214 }
11215
11216 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11217 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11218 {
11219 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11220 vdIoCtxFree(pDisk, pIoCtx);
11221 else
11222 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11223 }
11224 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11225 vdIoCtxFree(pDisk, pIoCtx);
11226 } while (0);
11227
11228 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11229 {
11230 rc2 = vdThreadFinishWrite(pDisk);
11231 AssertRC(rc2);
11232 }
11233
11234 LogFlowFunc(("returns %Rrc\n", rc));
11235 return rc;
11236}
11237
11238VBOXDDU_DECL(int) VDAsyncDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges,
11239 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11240 void *pvUser1, void *pvUser2)
11241{
11242 int rc;
11243 int rc2;
11244 bool fLockWrite = false;
11245 PVDIOCTX pIoCtx = NULL;
11246
11247 LogFlowFunc(("pDisk=%#p\n", pDisk));
11248
11249 do
11250 {
11251 /* sanity check */
11252 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11253 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11254
11255 rc2 = vdThreadStartWrite(pDisk);
11256 AssertRC(rc2);
11257 fLockWrite = true;
11258
11259 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11260
11261 pIoCtx = vdIoCtxDiscardAlloc(pDisk, paRanges, cRanges,
11262 pfnComplete, pvUser1, pvUser2, NULL,
11263 vdDiscardHelperAsync,
11264 VDIOCTX_FLAGS_DEFAULT);
11265 if (!pIoCtx)
11266 {
11267 rc = VERR_NO_MEMORY;
11268 break;
11269 }
11270
11271 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11272 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11273 {
11274 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11275 vdIoCtxFree(pDisk, pIoCtx);
11276 else
11277 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11278 }
11279 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11280 vdIoCtxFree(pDisk, pIoCtx);
11281 } while (0);
11282
11283 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11284 {
11285 rc2 = vdThreadFinishWrite(pDisk);
11286 AssertRC(rc2);
11287 }
11288
11289 LogFlowFunc(("returns %Rrc\n", rc));
11290 return rc;
11291}
11292
11293VBOXDDU_DECL(int) VDRepair(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
11294 const char *pszFilename, const char *pszBackend,
11295 uint32_t fFlags)
11296{
11297 int rc = VERR_NOT_SUPPORTED;
11298 PCVDIMAGEBACKEND pBackend = NULL;
11299 VDINTERFACEIOINT VDIfIoInt;
11300 VDINTERFACEIO VDIfIoFallback;
11301 PVDINTERFACEIO pInterfaceIo;
11302
11303 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
11304 /* Check arguments. */
11305 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
11306 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
11307 VERR_INVALID_PARAMETER);
11308 AssertMsgReturn(VALID_PTR(pszBackend),
11309 ("pszBackend=%#p\n", pszBackend),
11310 VERR_INVALID_PARAMETER);
11311 AssertMsgReturn((fFlags & ~VD_REPAIR_FLAGS_MASK) == 0,
11312 ("fFlags=%#x\n", fFlags),
11313 VERR_INVALID_PARAMETER);
11314
11315 pInterfaceIo = VDIfIoGet(pVDIfsImage);
11316 if (!pInterfaceIo)
11317 {
11318 /*
11319 * Caller doesn't provide an I/O interface, create our own using the
11320 * native file API.
11321 */
11322 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
11323 pInterfaceIo = &VDIfIoFallback;
11324 }
11325
11326 /* Set up the internal I/O interface. */
11327 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
11328 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
11329 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
11330 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
11331 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
11332 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
11333 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
11334 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
11335 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
11336 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
11337 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
11338 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
11339 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
11340 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
11341 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
11342 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
11343 AssertRC(rc);
11344
11345 rc = vdFindBackend(pszBackend, &pBackend);
11346 if (RT_SUCCESS(rc))
11347 {
11348 if (pBackend->pfnRepair)
11349 rc = pBackend->pfnRepair(pszFilename, pVDIfsDisk, pVDIfsImage, fFlags);
11350 else
11351 rc = VERR_VD_IMAGE_REPAIR_NOT_SUPPORTED;
11352 }
11353
11354 LogFlowFunc(("returns %Rrc\n", rc));
11355 return rc;
11356}
11357
11358
11359/*
11360 * generic plugin functions
11361 */
11362
11363/**
11364 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeLocation}
11365 */
11366DECLCALLBACK(int) genericFileComposeLocation(PVDINTERFACE pConfig, char **pszLocation)
11367{
11368 RT_NOREF1(pConfig);
11369 *pszLocation = NULL;
11370 return VINF_SUCCESS;
11371}
11372
11373/**
11374 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeName}
11375 */
11376DECLCALLBACK(int) genericFileComposeName(PVDINTERFACE pConfig, char **pszName)
11377{
11378 RT_NOREF1(pConfig);
11379 *pszName = NULL;
11380 return VINF_SUCCESS;
11381}
11382
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette