VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 32536

Last change on this file since 32536 was 32536, checked in by vboxsync, 14 years ago

Storage/VBoxHDD: replace custom open flags with regular IPRT file open flags, introduce user-providable filesystem access interface, eliminate dependency on PGM geometry structure, change pvBuffer/cbBuffer parameter ordering to the usual conventions, eliminate the remains of the old I/O code, make more plugin methods optional to reduce redundancy, lots of cleanups

Storage/DrvVD+testcases,Main/Medium+Frontends: adapt to VBoxHDD changes, logging fixes

Storage/VDI+VMDK+DMG+Raw+VHD+Parallels+VCI: made as similar to each other as possible, added inline VFS wrappers to improve readability, full VFS support, VDI files are now 4K aligned, eliminate the remains of the old I/O code, various more or less severe bugfixes, code sort

Storage/iSCSI: support disks bigger than 2T, streamline the code to be more similar to the file-based backends, memory leak fix, error code usage like file-based backends, code sort

log+err: added new error codes/log groups and eliminated unused old ones

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 250.7 KB
Line 
1/* $Id: VmdkHDDCore.cpp 32536 2010-09-15 18:25:32Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/path.h>
30#include <iprt/string.h>
31#include <iprt/rand.h>
32#include <iprt/zip.h>
33
34
35/*******************************************************************************
36* Constants And Macros, Structures and Typedefs *
37*******************************************************************************/
38
39/** Maximum encoded string size (including NUL) we allow for VMDK images.
40 * Deliberately not set high to avoid running out of descriptor space. */
41#define VMDK_ENCODED_COMMENT_MAX 1024
42
43/** VMDK descriptor DDB entry for PCHS cylinders. */
44#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
45
46/** VMDK descriptor DDB entry for PCHS heads. */
47#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
48
49/** VMDK descriptor DDB entry for PCHS sectors. */
50#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
51
52/** VMDK descriptor DDB entry for LCHS cylinders. */
53#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
54
55/** VMDK descriptor DDB entry for LCHS heads. */
56#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
57
58/** VMDK descriptor DDB entry for LCHS sectors. */
59#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
60
61/** VMDK descriptor DDB entry for image UUID. */
62#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
63
64/** VMDK descriptor DDB entry for image modification UUID. */
65#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
66
67/** VMDK descriptor DDB entry for parent image UUID. */
68#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
69
70/** VMDK descriptor DDB entry for parent image modification UUID. */
71#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
72
73/** No compression for streamOptimized files. */
74#define VMDK_COMPRESSION_NONE 0
75
76/** Deflate compression for streamOptimized files. */
77#define VMDK_COMPRESSION_DEFLATE 1
78
79/** Marker that the actual GD value is stored in the footer. */
80#define VMDK_GD_AT_END 0xffffffffffffffffULL
81
82/** Marker for end-of-stream in streamOptimized images. */
83#define VMDK_MARKER_EOS 0
84
85/** Marker for grain table block in streamOptimized images. */
86#define VMDK_MARKER_GT 1
87
88/** Marker for grain directory block in streamOptimized images. */
89#define VMDK_MARKER_GD 2
90
91/** Marker for footer in streamOptimized images. */
92#define VMDK_MARKER_FOOTER 3
93
94/** Dummy marker for "don't check the marker value". */
95#define VMDK_MARKER_IGNORE 0xffffffffU
96
97/**
98 * Magic number for hosted images created by VMware Workstation 4, VMware
99 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
100 */
101#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
102
103/**
104 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
105 * this header is also used for monolithic flat images.
106 */
107#pragma pack(1)
108typedef struct SparseExtentHeader
109{
110 uint32_t magicNumber;
111 uint32_t version;
112 uint32_t flags;
113 uint64_t capacity;
114 uint64_t grainSize;
115 uint64_t descriptorOffset;
116 uint64_t descriptorSize;
117 uint32_t numGTEsPerGT;
118 uint64_t rgdOffset;
119 uint64_t gdOffset;
120 uint64_t overHead;
121 bool uncleanShutdown;
122 char singleEndLineChar;
123 char nonEndLineChar;
124 char doubleEndLineChar1;
125 char doubleEndLineChar2;
126 uint16_t compressAlgorithm;
127 uint8_t pad[433];
128} SparseExtentHeader;
129#pragma pack()
130
131/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
132 * divisible by the default grain size (64K) */
133#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
134
135/** VMDK streamOptimized file format marker. The type field may or may not
136 * be actually valid, but there's always data to read there. */
137#pragma pack(1)
138typedef struct VMDKMARKER
139{
140 uint64_t uSector;
141 uint32_t cbSize;
142 uint32_t uType;
143} VMDKMARKER, *PVMDKMARKER;
144#pragma pack()
145
146
147#ifdef VBOX_WITH_VMDK_ESX
148
149/** @todo the ESX code is not tested, not used, and lacks error messages. */
150
151/**
152 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
153 */
154#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
155
156#pragma pack(1)
157typedef struct COWDisk_Header
158{
159 uint32_t magicNumber;
160 uint32_t version;
161 uint32_t flags;
162 uint32_t numSectors;
163 uint32_t grainSize;
164 uint32_t gdOffset;
165 uint32_t numGDEntries;
166 uint32_t freeSector;
167 /* The spec incompletely documents quite a few further fields, but states
168 * that they are unused by the current format. Replace them by padding. */
169 char reserved1[1604];
170 uint32_t savedGeneration;
171 char reserved2[8];
172 uint32_t uncleanShutdown;
173 char padding[396];
174} COWDisk_Header;
175#pragma pack()
176#endif /* VBOX_WITH_VMDK_ESX */
177
178
179/** Convert sector number/size to byte offset/size. */
180#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
181
182/** Convert byte offset/size to sector number/size. */
183#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
184
185/**
186 * VMDK extent type.
187 */
188typedef enum VMDKETYPE
189{
190 /** Hosted sparse extent. */
191 VMDKETYPE_HOSTED_SPARSE = 1,
192 /** Flat extent. */
193 VMDKETYPE_FLAT,
194 /** Zero extent. */
195 VMDKETYPE_ZERO,
196 /** VMFS extent, used by ESX. */
197 VMDKETYPE_VMFS
198#ifdef VBOX_WITH_VMDK_ESX
199 ,
200 /** ESX sparse extent. */
201 VMDKETYPE_ESX_SPARSE
202#endif /* VBOX_WITH_VMDK_ESX */
203} VMDKETYPE, *PVMDKETYPE;
204
205/**
206 * VMDK access type for a extent.
207 */
208typedef enum VMDKACCESS
209{
210 /** No access allowed. */
211 VMDKACCESS_NOACCESS = 0,
212 /** Read-only access. */
213 VMDKACCESS_READONLY,
214 /** Read-write access. */
215 VMDKACCESS_READWRITE
216} VMDKACCESS, *PVMDKACCESS;
217
218/** Forward declaration for PVMDKIMAGE. */
219typedef struct VMDKIMAGE *PVMDKIMAGE;
220
221/**
222 * Extents files entry. Used for opening a particular file only once.
223 */
224typedef struct VMDKFILE
225{
226 /** Pointer to filename. Local copy. */
227 const char *pszFilename;
228 /** File open flags for consistency checking. */
229 unsigned fOpen;
230 /** Flag whether this file has been opened for async I/O. */
231 bool fAsyncIO;
232 /** Handle for sync/async file abstraction.*/
233 PVDIOSTORAGE pStorage;
234 /** Reference counter. */
235 unsigned uReferences;
236 /** Flag whether the file should be deleted on last close. */
237 bool fDelete;
238 /** Pointer to the image we belong to (for debugging purposes). */
239 PVMDKIMAGE pImage;
240 /** Pointer to next file descriptor. */
241 struct VMDKFILE *pNext;
242 /** Pointer to the previous file descriptor. */
243 struct VMDKFILE *pPrev;
244} VMDKFILE, *PVMDKFILE;
245
246/**
247 * VMDK extent data structure.
248 */
249typedef struct VMDKEXTENT
250{
251 /** File handle. */
252 PVMDKFILE pFile;
253 /** Base name of the image extent. */
254 const char *pszBasename;
255 /** Full name of the image extent. */
256 const char *pszFullname;
257 /** Number of sectors in this extent. */
258 uint64_t cSectors;
259 /** Number of sectors per block (grain in VMDK speak). */
260 uint64_t cSectorsPerGrain;
261 /** Starting sector number of descriptor. */
262 uint64_t uDescriptorSector;
263 /** Size of descriptor in sectors. */
264 uint64_t cDescriptorSectors;
265 /** Starting sector number of grain directory. */
266 uint64_t uSectorGD;
267 /** Starting sector number of redundant grain directory. */
268 uint64_t uSectorRGD;
269 /** Total number of metadata sectors. */
270 uint64_t cOverheadSectors;
271 /** Nominal size (i.e. as described by the descriptor) of this extent. */
272 uint64_t cNominalSectors;
273 /** Sector offset (i.e. as described by the descriptor) of this extent. */
274 uint64_t uSectorOffset;
275 /** Number of entries in a grain table. */
276 uint32_t cGTEntries;
277 /** Number of sectors reachable via a grain directory entry. */
278 uint32_t cSectorsPerGDE;
279 /** Number of entries in the grain directory. */
280 uint32_t cGDEntries;
281 /** Pointer to the next free sector. Legacy information. Do not use. */
282 uint32_t uFreeSector;
283 /** Number of this extent in the list of images. */
284 uint32_t uExtent;
285 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
286 char *pDescData;
287 /** Pointer to the grain directory. */
288 uint32_t *pGD;
289 /** Pointer to the redundant grain directory. */
290 uint32_t *pRGD;
291 /** VMDK version of this extent. 1=1.0/1.1 */
292 uint32_t uVersion;
293 /** Type of this extent. */
294 VMDKETYPE enmType;
295 /** Access to this extent. */
296 VMDKACCESS enmAccess;
297 /** Flag whether this extent is marked as unclean. */
298 bool fUncleanShutdown;
299 /** Flag whether the metadata in the extent header needs to be updated. */
300 bool fMetaDirty;
301 /** Flag whether there is a footer in this extent. */
302 bool fFooter;
303 /** Compression type for this extent. */
304 uint16_t uCompression;
305 /** Last grain which has been written to. Only for streamOptimized extents. */
306 uint32_t uLastGrainWritten;
307 /** Sector number of last grain which has been written to. Only for
308 * streamOptimized extents. */
309 uint32_t uLastGrainSector;
310 /** Data size of last grain which has been written to. Only for
311 * streamOptimized extents. */
312 uint32_t cbLastGrainWritten;
313 /** Starting sector of the decompressed grain buffer. */
314 uint32_t uGrainSector;
315 /** Decompressed grain buffer for streamOptimized extents. */
316 void *pvGrain;
317 /** Reference to the image in which this extent is used. Do not use this
318 * on a regular basis to avoid passing pImage references to functions
319 * explicitly. */
320 struct VMDKIMAGE *pImage;
321} VMDKEXTENT, *PVMDKEXTENT;
322
323/**
324 * Grain table cache size. Allocated per image.
325 */
326#define VMDK_GT_CACHE_SIZE 256
327
328/**
329 * Grain table block size. Smaller than an actual grain table block to allow
330 * more grain table blocks to be cached without having to allocate excessive
331 * amounts of memory for the cache.
332 */
333#define VMDK_GT_CACHELINE_SIZE 128
334
335
336/**
337 * Maximum number of lines in a descriptor file. Not worth the effort of
338 * making it variable. Descriptor files are generally very short (~20 lines),
339 * with the exception of sparse files split in 2G chunks, which need for the
340 * maximum size (almost 2T) exactly 1025 lines for the disk database.
341 */
342#define VMDK_DESCRIPTOR_LINES_MAX 1100U
343
344/**
345 * Parsed descriptor information. Allows easy access and update of the
346 * descriptor (whether separate file or not). Free form text files suck.
347 */
348typedef struct VMDKDESCRIPTOR
349{
350 /** Line number of first entry of the disk descriptor. */
351 unsigned uFirstDesc;
352 /** Line number of first entry in the extent description. */
353 unsigned uFirstExtent;
354 /** Line number of first disk database entry. */
355 unsigned uFirstDDB;
356 /** Total number of lines. */
357 unsigned cLines;
358 /** Total amount of memory available for the descriptor. */
359 size_t cbDescAlloc;
360 /** Set if descriptor has been changed and not yet written to disk. */
361 bool fDirty;
362 /** Array of pointers to the data in the descriptor. */
363 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
364 /** Array of line indices pointing to the next non-comment line. */
365 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
366} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
367
368
369/**
370 * Cache entry for translating extent/sector to a sector number in that
371 * extent.
372 */
373typedef struct VMDKGTCACHEENTRY
374{
375 /** Extent number for which this entry is valid. */
376 uint32_t uExtent;
377 /** GT data block number. */
378 uint64_t uGTBlock;
379 /** Data part of the cache entry. */
380 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
381} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
382
383/**
384 * Cache data structure for blocks of grain table entries. For now this is a
385 * fixed size direct mapping cache, but this should be adapted to the size of
386 * the sparse image and maybe converted to a set-associative cache. The
387 * implementation below implements a write-through cache with write allocate.
388 */
389typedef struct VMDKGTCACHE
390{
391 /** Cache entries. */
392 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
393 /** Number of cache entries (currently unused). */
394 unsigned cEntries;
395} VMDKGTCACHE, *PVMDKGTCACHE;
396
397/**
398 * Complete VMDK image data structure. Mainly a collection of extents and a few
399 * extra global data fields.
400 */
401typedef struct VMDKIMAGE
402{
403 /** Image name. */
404 const char *pszFilename;
405 /** Descriptor file if applicable. */
406 PVMDKFILE pFile;
407 /** I/O interface. */
408 PVDINTERFACE pInterfaceIO;
409 /** I/O interface callbacks. */
410 PVDINTERFACEIO pInterfaceIOCallbacks;
411
412 /** Pointer to the per-disk VD interface list. */
413 PVDINTERFACE pVDIfsDisk;
414 /** Pointer to the per-image VD interface list. */
415 PVDINTERFACE pVDIfsImage;
416
417 /** Error interface. */
418 PVDINTERFACE pInterfaceError;
419 /** Error interface callbacks. */
420 PVDINTERFACEERROR pInterfaceErrorCallbacks;
421
422 /** Pointer to the image extents. */
423 PVMDKEXTENT pExtents;
424 /** Number of image extents. */
425 unsigned cExtents;
426 /** Pointer to the files list, for opening a file referenced multiple
427 * times only once (happens mainly with raw partition access). */
428 PVMDKFILE pFiles;
429
430 /**
431 * Pointer to an array of segment entries for async I/O.
432 * This is an optimization because the task number to submit is not known
433 * and allocating/freeing an array in the read/write functions every time
434 * is too expensive.
435 */
436 PPDMDATASEG paSegments;
437 /** Entries available in the segments array. */
438 unsigned cSegments;
439
440 /** Open flags passed by VBoxHD layer. */
441 unsigned uOpenFlags;
442 /** Image flags defined during creation or determined during open. */
443 unsigned uImageFlags;
444 /** Total size of the image. */
445 uint64_t cbSize;
446 /** Physical geometry of this image. */
447 VDGEOMETRY PCHSGeometry;
448 /** Logical geometry of this image. */
449 VDGEOMETRY LCHSGeometry;
450 /** Image UUID. */
451 RTUUID ImageUuid;
452 /** Image modification UUID. */
453 RTUUID ModificationUuid;
454 /** Parent image UUID. */
455 RTUUID ParentUuid;
456 /** Parent image modification UUID. */
457 RTUUID ParentModificationUuid;
458
459 /** Pointer to grain table cache, if this image contains sparse extents. */
460 PVMDKGTCACHE pGTCache;
461 /** Pointer to the descriptor (NULL if no separate descriptor file). */
462 char *pDescData;
463 /** Allocation size of the descriptor file. */
464 size_t cbDescAlloc;
465 /** Parsed descriptor file content. */
466 VMDKDESCRIPTOR Descriptor;
467} VMDKIMAGE;
468
469
470/** State for the input callout of the inflate reader. */
471typedef struct VMDKINFLATESTATE
472{
473 /* Image this operation relates to. */
474 PVMDKIMAGE pImage;
475 /* File where the data is stored. */
476 PVMDKFILE pFile;
477 /* Total size of the data to read. */
478 size_t cbSize;
479 /* Offset in the file to read. */
480 uint64_t uFileOffset;
481 /* Current read position. */
482 ssize_t iOffset;
483} VMDKINFLATESTATE;
484
485/** State for the output callout of the deflate writer. */
486typedef struct VMDKDEFLATESTATE
487{
488 /* Image this operation relates to. */
489 PVMDKIMAGE pImage;
490 /* File where the data is to be stored. */
491 PVMDKFILE pFile;
492 /* Offset in the file to write at. */
493 uint64_t uFileOffset;
494 /* Current write position. */
495 ssize_t iOffset;
496} VMDKDEFLATESTATE;
497
498/** Tracks async grain allocation. */
499typedef struct VMDKGRAINALLOCASYNC
500{
501 /** Old size of the extent. Used for rollback after an error. */
502 uint64_t cbExtentOld;
503 /** Flag whether the allocation failed. */
504 bool fIoErr;
505 /** Current number of transfers pending.
506 * If reached 0 and there is an error the old state is restored. */
507 unsigned cIoXfersPending;
508 /** Sector number */
509 uint64_t uSector;
510 /** Flag whether the grain table needs to be updated. */
511 bool fGTUpdateNeeded;
512 /** Extent the allocation happens. */
513 PVMDKEXTENT pExtent;
514 /** New size of the extent, required for the grain table update. */
515 uint64_t cbExtentSize;
516 /** Grain table sector. */
517 uint64_t uGTSector;
518 /** Backup grain table sector. */
519 uint64_t uRGTSector;
520} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
521
522/*******************************************************************************
523* Static Variables *
524*******************************************************************************/
525
526/** NULL-terminated array of supported file extensions. */
527static const char *const s_apszVmdkFileExtensions[] =
528{
529 "vmdk",
530 NULL
531};
532
533/*******************************************************************************
534* Internal Functions *
535*******************************************************************************/
536
537static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
538
539static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
540 bool fDelete);
541
542static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
543static int vmdkFlushImage(PVMDKIMAGE pImage);
544static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
545static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
546
547static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
548
549/**
550 * Internal: signal an error to the frontend.
551 */
552DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
553 const char *pszFormat, ...)
554{
555 va_list va;
556 va_start(va, pszFormat);
557 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
558 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
559 pszFormat, va);
560 va_end(va);
561 return rc;
562}
563
564/**
565 * Internal: signal an informational message to the frontend.
566 */
567DECLINLINE(int) vmdkMessage(PVMDKIMAGE pImage, const char *pszFormat, ...)
568{
569 int rc = VINF_SUCCESS;
570 va_list va;
571 va_start(va, pszFormat);
572 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
573 rc = pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser,
574 pszFormat, va);
575 va_end(va);
576 return rc;
577}
578
579/**
580 * Internal: open a file (using a file descriptor cache to ensure each file
581 * is only opened once - anything else can cause locking problems).
582 */
583static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
584 const char *pszFilename, uint32_t fOpen, bool fAsyncIO)
585{
586 int rc = VINF_SUCCESS;
587 PVMDKFILE pVmdkFile;
588
589 for (pVmdkFile = pImage->pFiles;
590 pVmdkFile != NULL;
591 pVmdkFile = pVmdkFile->pNext)
592 {
593 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
594 {
595 Assert(fOpen == pVmdkFile->fOpen);
596 pVmdkFile->uReferences++;
597
598 *ppVmdkFile = pVmdkFile;
599
600 return rc;
601 }
602 }
603
604 /* If we get here, there's no matching entry in the cache. */
605 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
606 if (!VALID_PTR(pVmdkFile))
607 {
608 *ppVmdkFile = NULL;
609 return VERR_NO_MEMORY;
610 }
611
612 pVmdkFile->pszFilename = RTStrDup(pszFilename);
613 if (!VALID_PTR(pVmdkFile->pszFilename))
614 {
615 RTMemFree(pVmdkFile);
616 *ppVmdkFile = NULL;
617 return VERR_NO_MEMORY;
618 }
619 pVmdkFile->fOpen = fOpen;
620 pVmdkFile->fAsyncIO = fAsyncIO;
621
622 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
623 pszFilename, fOpen,
624 &pVmdkFile->pStorage);
625 if (RT_SUCCESS(rc))
626 {
627 pVmdkFile->uReferences = 1;
628 pVmdkFile->pImage = pImage;
629 pVmdkFile->pNext = pImage->pFiles;
630 if (pImage->pFiles)
631 pImage->pFiles->pPrev = pVmdkFile;
632 pImage->pFiles = pVmdkFile;
633 *ppVmdkFile = pVmdkFile;
634 }
635 else
636 {
637 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
638 RTMemFree(pVmdkFile);
639 *ppVmdkFile = NULL;
640 }
641
642 return rc;
643}
644
645/**
646 * Internal: close a file, updating the file descriptor cache.
647 */
648static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
649{
650 int rc = VINF_SUCCESS;
651 PVMDKFILE pVmdkFile = *ppVmdkFile;
652
653 AssertPtr(pVmdkFile);
654
655 pVmdkFile->fDelete |= fDelete;
656 Assert(pVmdkFile->uReferences);
657 pVmdkFile->uReferences--;
658 if (pVmdkFile->uReferences == 0)
659 {
660 PVMDKFILE pPrev;
661 PVMDKFILE pNext;
662
663 /* Unchain the element from the list. */
664 pPrev = pVmdkFile->pPrev;
665 pNext = pVmdkFile->pNext;
666
667 if (pNext)
668 pNext->pPrev = pPrev;
669 if (pPrev)
670 pPrev->pNext = pNext;
671 else
672 pImage->pFiles = pNext;
673
674 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
675 pVmdkFile->pStorage);
676 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
677 rc = pImage->pInterfaceIOCallbacks->pfnDelete(pImage->pInterfaceIO->pvUser,
678 pVmdkFile->pszFilename);
679 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
680 RTMemFree(pVmdkFile);
681 }
682
683 *ppVmdkFile = NULL;
684 return rc;
685}
686
687/**
688 * Internal: rename a file (sync)
689 */
690DECLINLINE(int) vmdkFileMove(PVMDKIMAGE pImage, const char *pszSrc,
691 const char *pszDst, unsigned fMove)
692{
693 return pImage->pInterfaceIOCallbacks->pfnMove(pImage->pInterfaceIO->pvUser,
694 pszSrc, pszDst, fMove);
695}
696
697/**
698 * Internal: get the size of a file (sync/async)
699 */
700DECLINLINE(int) vmdkFileGetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
701 uint64_t *pcbSize)
702{
703 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
704 pVmdkFile->pStorage,
705 pcbSize);
706}
707
708/**
709 * Internal: set the size of a file (sync/async)
710 */
711DECLINLINE(int) vmdkFileSetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
712 uint64_t cbSize)
713{
714 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
715 pVmdkFile->pStorage,
716 cbSize);
717}
718
719/**
720 * Internal: read from a file (sync)
721 */
722DECLINLINE(int) vmdkFileReadSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
723 uint64_t uOffset, void *pvBuf,
724 size_t cbToRead, size_t *pcbRead)
725{
726 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
727 pVmdkFile->pStorage, uOffset,
728 pvBuf, cbToRead, pcbRead);
729}
730
731/**
732 * Internal: write to a file (sync)
733 */
734DECLINLINE(int) vmdkFileWriteSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
735 uint64_t uOffset, const void *pvBuf,
736 size_t cbToWrite, size_t *pcbWritten)
737{
738 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
739 pVmdkFile->pStorage, uOffset,
740 pvBuf, cbToWrite, pcbWritten);
741}
742
743/**
744 * Internal: flush a file (sync)
745 */
746DECLINLINE(int) vmdkFileFlush(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile)
747{
748 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
749 pVmdkFile->pStorage);
750}
751
752/**
753 * Internal: read user data (async)
754 */
755DECLINLINE(int) vmdkFileReadUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
756 uint64_t uOffset, PVDIOCTX pIoCtx,
757 size_t cbRead)
758{
759 return pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
760 pVmdkFile->pStorage,
761 uOffset, pIoCtx,
762 cbRead);
763}
764
765/**
766 * Internal: write user data (async)
767 */
768DECLINLINE(int) vmdkFileWriteUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
769 uint64_t uOffset, PVDIOCTX pIoCtx,
770 size_t cbWrite,
771 PFNVDXFERCOMPLETED pfnComplete,
772 void *pvCompleteUser)
773{
774 return pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
775 pVmdkFile->pStorage,
776 uOffset, pIoCtx,
777 cbWrite,
778 pfnComplete,
779 pvCompleteUser);
780}
781
782/**
783 * Internal: read metadata (async)
784 */
785DECLINLINE(int) vmdkFileReadMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
786 uint64_t uOffset, void *pvBuffer,
787 size_t cbBuffer, PVDIOCTX pIoCtx,
788 PPVDMETAXFER ppMetaXfer,
789 PFNVDXFERCOMPLETED pfnComplete,
790 void *pvCompleteUser)
791{
792 return pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pImage->pInterfaceIO->pvUser,
793 pVmdkFile->pStorage,
794 uOffset, pvBuffer,
795 cbBuffer, pIoCtx,
796 ppMetaXfer,
797 pfnComplete,
798 pvCompleteUser);
799}
800
801/**
802 * Internal: write metadata (async)
803 */
804DECLINLINE(int) vmdkFileWriteMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
805 uint64_t uOffset, void *pvBuffer,
806 size_t cbBuffer, PVDIOCTX pIoCtx,
807 PFNVDXFERCOMPLETED pfnComplete,
808 void *pvCompleteUser)
809{
810 return pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pImage->pInterfaceIO->pvUser,
811 pVmdkFile->pStorage,
812 uOffset, pvBuffer,
813 cbBuffer, pIoCtx,
814 pfnComplete,
815 pvCompleteUser);
816}
817
818/**
819 * Internal: releases a metadata transfer handle (async)
820 */
821DECLINLINE(void) vmdkFileMetaXferRelease(PVMDKIMAGE pImage, PVDMETAXFER pMetaXfer)
822{
823 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pImage->pInterfaceIO->pvUser,
824 pMetaXfer);
825}
826
827/**
828 * Internal: flush a file (async)
829 */
830DECLINLINE(int) vmdkFileFlushAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
831 PVDIOCTX pIoCtx)
832{
833 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
834 pVmdkFile->pStorage, pIoCtx,
835 NULL, NULL);
836}
837
838/**
839 * Internal: sets the buffer to a specific byte (async)
840 */
841DECLINLINE(int) vmdkFileIoCtxSet(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
842 int ch, size_t cbSet)
843{
844 return pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
845 pIoCtx, ch, cbSet);
846}
847
848
849static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
850{
851 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
852
853 Assert(cbBuf);
854 if (pInflateState->iOffset < 0)
855 {
856 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
857 if (pcbBuf)
858 *pcbBuf = 1;
859 pInflateState->iOffset = 0;
860 return VINF_SUCCESS;
861 }
862 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
863 int rc = vmdkFileReadSync(pInflateState->pImage, pInflateState->pFile,
864 pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
865 if (RT_FAILURE(rc))
866 return rc;
867 pInflateState->uFileOffset += cbBuf;
868 pInflateState->iOffset += cbBuf;
869 pInflateState->cbSize -= cbBuf;
870 Assert(pcbBuf);
871 *pcbBuf = cbBuf;
872 return VINF_SUCCESS;
873}
874
875/**
876 * Internal: read from a file and inflate the compressed data,
877 * distinguishing between async and normal operation
878 */
879DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
880 uint64_t uOffset, void *pvBuf,
881 size_t cbToRead, unsigned uMarker,
882 uint64_t *puLBA, uint32_t *pcbMarkerData)
883{
884 if (pVmdkFile->fAsyncIO)
885 {
886 AssertMsgFailed(("TODO\n"));
887 return VERR_NOT_SUPPORTED;
888 }
889 else
890 {
891 int rc;
892 PRTZIPDECOMP pZip = NULL;
893 VMDKMARKER Marker;
894 uint64_t uCompOffset, cbComp;
895 VMDKINFLATESTATE InflateState;
896 size_t cbActuallyRead;
897 size_t cbMarker = sizeof(Marker);
898
899 if (uMarker == VMDK_MARKER_IGNORE)
900 cbMarker -= sizeof(Marker.uType);
901 rc = vmdkFileReadSync(pImage, pVmdkFile, uOffset, &Marker, cbMarker, NULL);
902 if (RT_FAILURE(rc))
903 return rc;
904 Marker.uSector = RT_LE2H_U64(Marker.uSector);
905 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
906 if ( uMarker != VMDK_MARKER_IGNORE
907 && ( RT_LE2H_U32(Marker.uType) != uMarker
908 || Marker.cbSize != 0))
909 return VERR_VD_VMDK_INVALID_FORMAT;
910 if (Marker.cbSize != 0)
911 {
912 /* Compressed grain marker. Data follows immediately. */
913 uCompOffset = uOffset + 12;
914 cbComp = Marker.cbSize;
915 if (puLBA)
916 *puLBA = Marker.uSector;
917 if (pcbMarkerData)
918 *pcbMarkerData = cbComp + 12;
919 }
920 else
921 {
922 Marker.uType = RT_LE2H_U32(Marker.uType);
923 if (Marker.uType == VMDK_MARKER_EOS)
924 {
925 Assert(uMarker != VMDK_MARKER_EOS);
926 return VERR_VD_VMDK_INVALID_FORMAT;
927 }
928 else if ( Marker.uType == VMDK_MARKER_GT
929 || Marker.uType == VMDK_MARKER_GD
930 || Marker.uType == VMDK_MARKER_FOOTER)
931 {
932 uCompOffset = uOffset + 512;
933 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
934 if (pcbMarkerData)
935 *pcbMarkerData = cbComp + 512;
936 }
937 else
938 {
939 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
940 return VERR_VD_VMDK_INVALID_FORMAT;
941 }
942 }
943 InflateState.pImage = pImage;
944 InflateState.pFile = pVmdkFile;
945 InflateState.cbSize = cbComp;
946 InflateState.uFileOffset = uCompOffset;
947 InflateState.iOffset = -1;
948 /* Sanity check - the expansion ratio should be much less than 2. */
949 Assert(cbComp < 2 * cbToRead);
950 if (cbComp >= 2 * cbToRead)
951 return VERR_VD_VMDK_INVALID_FORMAT;
952
953 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
954 if (RT_FAILURE(rc))
955 return rc;
956 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
957 RTZipDecompDestroy(pZip);
958 if (RT_FAILURE(rc))
959 return rc;
960 if (cbActuallyRead != cbToRead)
961 rc = VERR_VD_VMDK_INVALID_FORMAT;
962 return rc;
963 }
964}
965
966static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
967{
968 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
969
970 Assert(cbBuf);
971 if (pDeflateState->iOffset < 0)
972 {
973 pvBuf = (const uint8_t *)pvBuf + 1;
974 cbBuf--;
975 pDeflateState->iOffset = 0;
976 }
977 if (!cbBuf)
978 return VINF_SUCCESS;
979 int rc = vmdkFileWriteSync(pDeflateState->pImage, pDeflateState->pFile,
980 pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
981 if (RT_FAILURE(rc))
982 return rc;
983 pDeflateState->uFileOffset += cbBuf;
984 pDeflateState->iOffset += cbBuf;
985 return VINF_SUCCESS;
986}
987
988/**
989 * Internal: deflate the uncompressed data and write to a file,
990 * distinguishing between async and normal operation
991 */
992DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
993 uint64_t uOffset, const void *pvBuf,
994 size_t cbToWrite, unsigned uMarker,
995 uint64_t uLBA, uint32_t *pcbMarkerData)
996{
997 if (pVmdkFile->fAsyncIO)
998 {
999 AssertMsgFailed(("TODO\n"));
1000 return VERR_NOT_SUPPORTED;
1001 }
1002 else
1003 {
1004 int rc;
1005 PRTZIPCOMP pZip = NULL;
1006 VMDKMARKER Marker;
1007 uint64_t uCompOffset, cbDecomp;
1008 VMDKDEFLATESTATE DeflateState;
1009
1010 Marker.uSector = RT_H2LE_U64(uLBA);
1011 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
1012 if (uMarker == VMDK_MARKER_IGNORE)
1013 {
1014 /* Compressed grain marker. Data follows immediately. */
1015 uCompOffset = uOffset + 12;
1016 cbDecomp = cbToWrite;
1017 }
1018 else
1019 {
1020 /** @todo implement creating the other marker types */
1021 return VERR_NOT_IMPLEMENTED;
1022 }
1023 DeflateState.pImage = pImage;
1024 DeflateState.pFile = pVmdkFile;
1025 DeflateState.uFileOffset = uCompOffset;
1026 DeflateState.iOffset = -1;
1027
1028 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
1029 if (RT_FAILURE(rc))
1030 return rc;
1031 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
1032 if (RT_SUCCESS(rc))
1033 rc = RTZipCompFinish(pZip);
1034 RTZipCompDestroy(pZip);
1035 if (RT_SUCCESS(rc))
1036 {
1037 if (pcbMarkerData)
1038 *pcbMarkerData = 12 + DeflateState.iOffset;
1039 /* Set the file size to remove old garbage in case the block is
1040 * rewritten. Cannot cause data loss as the code calling this
1041 * guarantees that data gets only appended. */
1042 Assert(DeflateState.uFileOffset > uCompOffset);
1043
1044 /*
1045 * Change the file size only if the size really changed,
1046 * because this is very expensive on some filesystems
1047 * like XFS.
1048 */
1049 uint64_t cbOld;
1050 rc = vmdkFileGetSize(pImage, pVmdkFile, &cbOld);
1051 if (RT_FAILURE(rc))
1052 return rc;
1053
1054 if (cbOld != DeflateState.uFileOffset)
1055 rc = vmdkFileSetSize(pImage, pVmdkFile, DeflateState.uFileOffset);
1056
1057 if (uMarker == VMDK_MARKER_IGNORE)
1058 {
1059 /* Compressed grain marker. */
1060 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
1061 rc = vmdkFileWriteSync(pImage, pVmdkFile, uOffset, &Marker, 12, NULL);
1062 if (RT_FAILURE(rc))
1063 return rc;
1064 }
1065 else
1066 {
1067 /** @todo implement creating the other marker types */
1068 return VERR_NOT_IMPLEMENTED;
1069 }
1070 }
1071 return rc;
1072 }
1073}
1074
1075/**
1076 * Internal: check if all files are closed, prevent leaking resources.
1077 */
1078static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1079{
1080 int rc = VINF_SUCCESS, rc2;
1081 PVMDKFILE pVmdkFile;
1082
1083 Assert(pImage->pFiles == NULL);
1084 for (pVmdkFile = pImage->pFiles;
1085 pVmdkFile != NULL;
1086 pVmdkFile = pVmdkFile->pNext)
1087 {
1088 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1089 pVmdkFile->pszFilename));
1090 pImage->pFiles = pVmdkFile->pNext;
1091
1092 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1093
1094 if (RT_SUCCESS(rc))
1095 rc = rc2;
1096 }
1097 return rc;
1098}
1099
1100/**
1101 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1102 * critical non-ASCII characters.
1103 */
1104static char *vmdkEncodeString(const char *psz)
1105{
1106 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1107 char *pszDst = szEnc;
1108
1109 AssertPtr(psz);
1110
1111 for (; *psz; psz = RTStrNextCp(psz))
1112 {
1113 char *pszDstPrev = pszDst;
1114 RTUNICP Cp = RTStrGetCp(psz);
1115 if (Cp == '\\')
1116 {
1117 pszDst = RTStrPutCp(pszDst, Cp);
1118 pszDst = RTStrPutCp(pszDst, Cp);
1119 }
1120 else if (Cp == '\n')
1121 {
1122 pszDst = RTStrPutCp(pszDst, '\\');
1123 pszDst = RTStrPutCp(pszDst, 'n');
1124 }
1125 else if (Cp == '\r')
1126 {
1127 pszDst = RTStrPutCp(pszDst, '\\');
1128 pszDst = RTStrPutCp(pszDst, 'r');
1129 }
1130 else
1131 pszDst = RTStrPutCp(pszDst, Cp);
1132 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1133 {
1134 pszDst = pszDstPrev;
1135 break;
1136 }
1137 }
1138 *pszDst = '\0';
1139 return RTStrDup(szEnc);
1140}
1141
1142/**
1143 * Internal: decode a string and store it into the specified string.
1144 */
1145static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1146{
1147 int rc = VINF_SUCCESS;
1148 char szBuf[4];
1149
1150 if (!cb)
1151 return VERR_BUFFER_OVERFLOW;
1152
1153 AssertPtr(psz);
1154
1155 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1156 {
1157 char *pszDst = szBuf;
1158 RTUNICP Cp = RTStrGetCp(pszEncoded);
1159 if (Cp == '\\')
1160 {
1161 pszEncoded = RTStrNextCp(pszEncoded);
1162 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1163 if (CpQ == 'n')
1164 RTStrPutCp(pszDst, '\n');
1165 else if (CpQ == 'r')
1166 RTStrPutCp(pszDst, '\r');
1167 else if (CpQ == '\0')
1168 {
1169 rc = VERR_VD_VMDK_INVALID_HEADER;
1170 break;
1171 }
1172 else
1173 RTStrPutCp(pszDst, CpQ);
1174 }
1175 else
1176 pszDst = RTStrPutCp(pszDst, Cp);
1177
1178 /* Need to leave space for terminating NUL. */
1179 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1180 {
1181 rc = VERR_BUFFER_OVERFLOW;
1182 break;
1183 }
1184 memcpy(psz, szBuf, pszDst - szBuf);
1185 psz += pszDst - szBuf;
1186 }
1187 *psz = '\0';
1188 return rc;
1189}
1190
1191static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1192{
1193 int rc = VINF_SUCCESS;
1194 unsigned i;
1195 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1196 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1197
1198 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1199 goto out;
1200
1201 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1202 if (!pGD)
1203 {
1204 rc = VERR_NO_MEMORY;
1205 goto out;
1206 }
1207 pExtent->pGD = pGD;
1208 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1209 * life files don't have them. The spec is wrong in creative ways. */
1210 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1211 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1212 pGD, cbGD, NULL);
1213 AssertRC(rc);
1214 if (RT_FAILURE(rc))
1215 {
1216 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1217 goto out;
1218 }
1219 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1220 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1221
1222 if (pExtent->uSectorRGD)
1223 {
1224 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1225 if (!pRGD)
1226 {
1227 rc = VERR_NO_MEMORY;
1228 goto out;
1229 }
1230 pExtent->pRGD = pRGD;
1231 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1232 * life files don't have them. The spec is wrong in creative ways. */
1233 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1234 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1235 pRGD, cbGD, NULL);
1236 AssertRC(rc);
1237 if (RT_FAILURE(rc))
1238 {
1239 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1240 goto out;
1241 }
1242 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1243 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1244
1245 /* Check grain table and redundant grain table for consistency. */
1246 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1247 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1248 if (!pTmpGT1)
1249 {
1250 rc = VERR_NO_MEMORY;
1251 goto out;
1252 }
1253 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1254 if (!pTmpGT2)
1255 {
1256 RTMemTmpFree(pTmpGT1);
1257 rc = VERR_NO_MEMORY;
1258 goto out;
1259 }
1260
1261 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1262 i < pExtent->cGDEntries;
1263 i++, pGDTmp++, pRGDTmp++)
1264 {
1265 /* If no grain table is allocated skip the entry. */
1266 if (*pGDTmp == 0 && *pRGDTmp == 0)
1267 continue;
1268
1269 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1270 {
1271 /* Just one grain directory entry refers to a not yet allocated
1272 * grain table or both grain directory copies refer to the same
1273 * grain table. Not allowed. */
1274 RTMemTmpFree(pTmpGT1);
1275 RTMemTmpFree(pTmpGT2);
1276 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1277 goto out;
1278 }
1279 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1280 * life files don't have them. The spec is wrong in creative ways. */
1281 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1282 VMDK_SECTOR2BYTE(*pGDTmp),
1283 pTmpGT1, cbGT, NULL);
1284 if (RT_FAILURE(rc))
1285 {
1286 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1287 RTMemTmpFree(pTmpGT1);
1288 RTMemTmpFree(pTmpGT2);
1289 goto out;
1290 }
1291 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1292 * life files don't have them. The spec is wrong in creative ways. */
1293 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1294 VMDK_SECTOR2BYTE(*pRGDTmp),
1295 pTmpGT2, cbGT, NULL);
1296 if (RT_FAILURE(rc))
1297 {
1298 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1299 RTMemTmpFree(pTmpGT1);
1300 RTMemTmpFree(pTmpGT2);
1301 goto out;
1302 }
1303 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1304 {
1305 RTMemTmpFree(pTmpGT1);
1306 RTMemTmpFree(pTmpGT2);
1307 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1308 goto out;
1309 }
1310 }
1311
1312 /** @todo figure out what to do for unclean VMDKs. */
1313 RTMemTmpFree(pTmpGT1);
1314 RTMemTmpFree(pTmpGT2);
1315 }
1316
1317 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1318 {
1319 uint32_t uLastGrainWritten = 0;
1320 uint32_t uLastGrainSector = 0;
1321 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1322 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1323 if (!pTmpGT)
1324 {
1325 rc = VERR_NO_MEMORY;
1326 goto out;
1327 }
1328 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1329 {
1330 /* If no grain table is allocated skip the entry. */
1331 if (*pGDTmp == 0)
1332 continue;
1333
1334 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1335 * life files don't have them. The spec is wrong in creative ways. */
1336 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1337 VMDK_SECTOR2BYTE(*pGDTmp),
1338 pTmpGT, cbGT, NULL);
1339 if (RT_FAILURE(rc))
1340 {
1341 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1342 RTMemTmpFree(pTmpGT);
1343 goto out;
1344 }
1345 uint32_t j;
1346 uint32_t *pGTTmp;
1347 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1348 {
1349 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1350
1351 /* If no grain is allocated skip the entry. */
1352 if (uGTTmp == 0)
1353 continue;
1354
1355 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1356 {
1357 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1358 RTMemTmpFree(pTmpGT);
1359 goto out;
1360 }
1361 uLastGrainSector = uGTTmp;
1362 uLastGrainWritten = i * pExtent->cGTEntries + j;
1363 }
1364 }
1365 RTMemTmpFree(pTmpGT);
1366
1367 /* streamOptimized extents need a grain decompress buffer. */
1368 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1369 if (!pExtent->pvGrain)
1370 {
1371 rc = VERR_NO_MEMORY;
1372 goto out;
1373 }
1374
1375 if (uLastGrainSector)
1376 {
1377 uint64_t uLBA = 0;
1378 uint32_t cbMarker = 0;
1379 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1380 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1381 if (RT_FAILURE(rc))
1382 goto out;
1383
1384 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1385 pExtent->uGrainSector = uLastGrainSector;
1386 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1387 }
1388 pExtent->uLastGrainWritten = uLastGrainWritten;
1389 pExtent->uLastGrainSector = uLastGrainSector;
1390 }
1391
1392out:
1393 if (RT_FAILURE(rc))
1394 vmdkFreeGrainDirectory(pExtent);
1395 return rc;
1396}
1397
1398static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1399 uint64_t uStartSector, bool fPreAlloc)
1400{
1401 int rc = VINF_SUCCESS;
1402 unsigned i;
1403 uint32_t *pGD = NULL, *pRGD = NULL;
1404 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1405 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1406 size_t cbGTRounded;
1407 uint64_t cbOverhead;
1408
1409 if (fPreAlloc)
1410 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1411 else
1412 cbGTRounded = 0;
1413
1414 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1415 if (!pGD)
1416 {
1417 rc = VERR_NO_MEMORY;
1418 goto out;
1419 }
1420 pExtent->pGD = pGD;
1421 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1422 if (!pRGD)
1423 {
1424 rc = VERR_NO_MEMORY;
1425 goto out;
1426 }
1427 pExtent->pRGD = pRGD;
1428
1429 if (uStartSector != VMDK_GD_AT_END)
1430 {
1431 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1432 /* For streamOptimized extents put the end-of-stream marker at the end. */
1433 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1434 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead + 512);
1435 else
1436 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead);
1437 if (RT_FAILURE(rc))
1438 goto out;
1439 pExtent->uSectorRGD = uStartSector;
1440 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1441 }
1442 else
1443 {
1444 cbOverhead = VMDK_SECTOR2BYTE(uStartSector);
1445 pExtent->uSectorRGD = uStartSector;
1446 pExtent->uSectorGD = uStartSector;
1447 }
1448
1449 if (fPreAlloc)
1450 {
1451 uint32_t uGTSectorLE;
1452 uint64_t uOffsetSectors;
1453
1454 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1455 for (i = 0; i < pExtent->cGDEntries; i++)
1456 {
1457 pRGD[i] = uOffsetSectors;
1458 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1459 /* Write the redundant grain directory entry to disk. */
1460 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1461 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1462 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1463 if (RT_FAILURE(rc))
1464 {
1465 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1466 goto out;
1467 }
1468 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1469 }
1470
1471 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1472 for (i = 0; i < pExtent->cGDEntries; i++)
1473 {
1474 pGD[i] = uOffsetSectors;
1475 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1476 /* Write the grain directory entry to disk. */
1477 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1478 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1479 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1480 if (RT_FAILURE(rc))
1481 {
1482 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1483 goto out;
1484 }
1485 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1486 }
1487 }
1488 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1489
1490 /* streamOptimized extents need a grain decompress buffer. */
1491 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1492 {
1493 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1494 if (!pExtent->pvGrain)
1495 {
1496 rc = VERR_NO_MEMORY;
1497 goto out;
1498 }
1499 }
1500
1501out:
1502 if (RT_FAILURE(rc))
1503 vmdkFreeGrainDirectory(pExtent);
1504 return rc;
1505}
1506
1507static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1508{
1509 if (pExtent->pGD)
1510 {
1511 RTMemFree(pExtent->pGD);
1512 pExtent->pGD = NULL;
1513 }
1514 if (pExtent->pRGD)
1515 {
1516 RTMemFree(pExtent->pRGD);
1517 pExtent->pRGD = NULL;
1518 }
1519}
1520
1521static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1522 char **ppszUnquoted, char **ppszNext)
1523{
1524 char *pszQ;
1525 char *pszUnquoted;
1526
1527 /* Skip over whitespace. */
1528 while (*pszStr == ' ' || *pszStr == '\t')
1529 pszStr++;
1530
1531 if (*pszStr != '"')
1532 {
1533 pszQ = (char *)pszStr;
1534 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1535 pszQ++;
1536 }
1537 else
1538 {
1539 pszStr++;
1540 pszQ = (char *)strchr(pszStr, '"');
1541 if (pszQ == NULL)
1542 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1543 }
1544
1545 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1546 if (!pszUnquoted)
1547 return VERR_NO_MEMORY;
1548 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1549 pszUnquoted[pszQ - pszStr] = '\0';
1550 *ppszUnquoted = pszUnquoted;
1551 if (ppszNext)
1552 *ppszNext = pszQ + 1;
1553 return VINF_SUCCESS;
1554}
1555
1556static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1557 const char *pszLine)
1558{
1559 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1560 ssize_t cbDiff = strlen(pszLine) + 1;
1561
1562 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1563 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1564 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1565
1566 memcpy(pEnd, pszLine, cbDiff);
1567 pDescriptor->cLines++;
1568 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1569 pDescriptor->fDirty = true;
1570
1571 return VINF_SUCCESS;
1572}
1573
1574static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1575 const char *pszKey, const char **ppszValue)
1576{
1577 size_t cbKey = strlen(pszKey);
1578 const char *pszValue;
1579
1580 while (uStart != 0)
1581 {
1582 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1583 {
1584 /* Key matches, check for a '=' (preceded by whitespace). */
1585 pszValue = pDescriptor->aLines[uStart] + cbKey;
1586 while (*pszValue == ' ' || *pszValue == '\t')
1587 pszValue++;
1588 if (*pszValue == '=')
1589 {
1590 *ppszValue = pszValue + 1;
1591 break;
1592 }
1593 }
1594 uStart = pDescriptor->aNextLines[uStart];
1595 }
1596 return !!uStart;
1597}
1598
1599static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1600 unsigned uStart,
1601 const char *pszKey, const char *pszValue)
1602{
1603 char *pszTmp;
1604 size_t cbKey = strlen(pszKey);
1605 unsigned uLast = 0;
1606
1607 while (uStart != 0)
1608 {
1609 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1610 {
1611 /* Key matches, check for a '=' (preceded by whitespace). */
1612 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1613 while (*pszTmp == ' ' || *pszTmp == '\t')
1614 pszTmp++;
1615 if (*pszTmp == '=')
1616 {
1617 pszTmp++;
1618 while (*pszTmp == ' ' || *pszTmp == '\t')
1619 pszTmp++;
1620 break;
1621 }
1622 }
1623 if (!pDescriptor->aNextLines[uStart])
1624 uLast = uStart;
1625 uStart = pDescriptor->aNextLines[uStart];
1626 }
1627 if (uStart)
1628 {
1629 if (pszValue)
1630 {
1631 /* Key already exists, replace existing value. */
1632 size_t cbOldVal = strlen(pszTmp);
1633 size_t cbNewVal = strlen(pszValue);
1634 ssize_t cbDiff = cbNewVal - cbOldVal;
1635 /* Check for buffer overflow. */
1636 if ( pDescriptor->aLines[pDescriptor->cLines]
1637 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1638 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1639
1640 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1641 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1642 memcpy(pszTmp, pszValue, cbNewVal + 1);
1643 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1644 pDescriptor->aLines[i] += cbDiff;
1645 }
1646 else
1647 {
1648 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1649 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1650 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1651 {
1652 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1653 if (pDescriptor->aNextLines[i])
1654 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1655 else
1656 pDescriptor->aNextLines[i-1] = 0;
1657 }
1658 pDescriptor->cLines--;
1659 /* Adjust starting line numbers of following descriptor sections. */
1660 if (uStart < pDescriptor->uFirstExtent)
1661 pDescriptor->uFirstExtent--;
1662 if (uStart < pDescriptor->uFirstDDB)
1663 pDescriptor->uFirstDDB--;
1664 }
1665 }
1666 else
1667 {
1668 /* Key doesn't exist, append after the last entry in this category. */
1669 if (!pszValue)
1670 {
1671 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1672 return VINF_SUCCESS;
1673 }
1674 cbKey = strlen(pszKey);
1675 size_t cbValue = strlen(pszValue);
1676 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1677 /* Check for buffer overflow. */
1678 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1679 || ( pDescriptor->aLines[pDescriptor->cLines]
1680 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1681 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1682 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1683 {
1684 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1685 if (pDescriptor->aNextLines[i - 1])
1686 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1687 else
1688 pDescriptor->aNextLines[i] = 0;
1689 }
1690 uStart = uLast + 1;
1691 pDescriptor->aNextLines[uLast] = uStart;
1692 pDescriptor->aNextLines[uStart] = 0;
1693 pDescriptor->cLines++;
1694 pszTmp = pDescriptor->aLines[uStart];
1695 memmove(pszTmp + cbDiff, pszTmp,
1696 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1697 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1698 pDescriptor->aLines[uStart][cbKey] = '=';
1699 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1700 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1701 pDescriptor->aLines[i] += cbDiff;
1702
1703 /* Adjust starting line numbers of following descriptor sections. */
1704 if (uStart <= pDescriptor->uFirstExtent)
1705 pDescriptor->uFirstExtent++;
1706 if (uStart <= pDescriptor->uFirstDDB)
1707 pDescriptor->uFirstDDB++;
1708 }
1709 pDescriptor->fDirty = true;
1710 return VINF_SUCCESS;
1711}
1712
1713static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1714 uint32_t *puValue)
1715{
1716 const char *pszValue;
1717
1718 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1719 &pszValue))
1720 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1721 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1722}
1723
1724static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1725 const char *pszKey, const char **ppszValue)
1726{
1727 const char *pszValue;
1728 char *pszValueUnquoted;
1729
1730 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1731 &pszValue))
1732 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1733 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1734 if (RT_FAILURE(rc))
1735 return rc;
1736 *ppszValue = pszValueUnquoted;
1737 return rc;
1738}
1739
1740static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1741 const char *pszKey, const char *pszValue)
1742{
1743 char *pszValueQuoted;
1744
1745 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1749 pszValueQuoted);
1750 RTStrFree(pszValueQuoted);
1751 return rc;
1752}
1753
1754static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1755 PVMDKDESCRIPTOR pDescriptor)
1756{
1757 unsigned uEntry = pDescriptor->uFirstExtent;
1758 ssize_t cbDiff;
1759
1760 if (!uEntry)
1761 return;
1762
1763 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1764 /* Move everything including \0 in the entry marking the end of buffer. */
1765 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1766 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1767 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1768 {
1769 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1770 if (pDescriptor->aNextLines[i])
1771 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1772 else
1773 pDescriptor->aNextLines[i - 1] = 0;
1774 }
1775 pDescriptor->cLines--;
1776 if (pDescriptor->uFirstDDB)
1777 pDescriptor->uFirstDDB--;
1778
1779 return;
1780}
1781
1782static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1783 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1784 VMDKETYPE enmType, const char *pszBasename,
1785 uint64_t uSectorOffset)
1786{
1787 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1788 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1789 char *pszTmp;
1790 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1791 char szExt[1024];
1792 ssize_t cbDiff;
1793
1794 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1795 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1796
1797 /* Find last entry in extent description. */
1798 while (uStart)
1799 {
1800 if (!pDescriptor->aNextLines[uStart])
1801 uLast = uStart;
1802 uStart = pDescriptor->aNextLines[uStart];
1803 }
1804
1805 if (enmType == VMDKETYPE_ZERO)
1806 {
1807 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1808 cNominalSectors, apszType[enmType]);
1809 }
1810 else if (enmType == VMDKETYPE_FLAT)
1811 {
1812 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1813 apszAccess[enmAccess], cNominalSectors,
1814 apszType[enmType], pszBasename, uSectorOffset);
1815 }
1816 else
1817 {
1818 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1819 apszAccess[enmAccess], cNominalSectors,
1820 apszType[enmType], pszBasename);
1821 }
1822 cbDiff = strlen(szExt) + 1;
1823
1824 /* Check for buffer overflow. */
1825 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1826 || ( pDescriptor->aLines[pDescriptor->cLines]
1827 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1828 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1829
1830 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1831 {
1832 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1833 if (pDescriptor->aNextLines[i - 1])
1834 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1835 else
1836 pDescriptor->aNextLines[i] = 0;
1837 }
1838 uStart = uLast + 1;
1839 pDescriptor->aNextLines[uLast] = uStart;
1840 pDescriptor->aNextLines[uStart] = 0;
1841 pDescriptor->cLines++;
1842 pszTmp = pDescriptor->aLines[uStart];
1843 memmove(pszTmp + cbDiff, pszTmp,
1844 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1845 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1846 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1847 pDescriptor->aLines[i] += cbDiff;
1848
1849 /* Adjust starting line numbers of following descriptor sections. */
1850 if (uStart <= pDescriptor->uFirstDDB)
1851 pDescriptor->uFirstDDB++;
1852
1853 pDescriptor->fDirty = true;
1854 return VINF_SUCCESS;
1855}
1856
1857static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1858 const char *pszKey, const char **ppszValue)
1859{
1860 const char *pszValue;
1861 char *pszValueUnquoted;
1862
1863 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1864 &pszValue))
1865 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1866 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1867 if (RT_FAILURE(rc))
1868 return rc;
1869 *ppszValue = pszValueUnquoted;
1870 return rc;
1871}
1872
1873static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1874 const char *pszKey, uint32_t *puValue)
1875{
1876 const char *pszValue;
1877 char *pszValueUnquoted;
1878
1879 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1880 &pszValue))
1881 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1882 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1883 if (RT_FAILURE(rc))
1884 return rc;
1885 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1886 RTMemTmpFree(pszValueUnquoted);
1887 return rc;
1888}
1889
1890static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1891 const char *pszKey, PRTUUID pUuid)
1892{
1893 const char *pszValue;
1894 char *pszValueUnquoted;
1895
1896 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1897 &pszValue))
1898 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1899 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1900 if (RT_FAILURE(rc))
1901 return rc;
1902 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1903 RTMemTmpFree(pszValueUnquoted);
1904 return rc;
1905}
1906
1907static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1908 const char *pszKey, const char *pszVal)
1909{
1910 int rc;
1911 char *pszValQuoted;
1912
1913 if (pszVal)
1914 {
1915 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1916 if (RT_FAILURE(rc))
1917 return rc;
1918 }
1919 else
1920 pszValQuoted = NULL;
1921 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1922 pszValQuoted);
1923 if (pszValQuoted)
1924 RTStrFree(pszValQuoted);
1925 return rc;
1926}
1927
1928static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1929 const char *pszKey, PCRTUUID pUuid)
1930{
1931 char *pszUuid;
1932
1933 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1934 if (RT_FAILURE(rc))
1935 return rc;
1936 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1937 pszUuid);
1938 RTStrFree(pszUuid);
1939 return rc;
1940}
1941
1942static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1943 const char *pszKey, uint32_t uValue)
1944{
1945 char *pszValue;
1946
1947 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1948 if (RT_FAILURE(rc))
1949 return rc;
1950 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1951 pszValue);
1952 RTStrFree(pszValue);
1953 return rc;
1954}
1955
1956static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1957 size_t cbDescData,
1958 PVMDKDESCRIPTOR pDescriptor)
1959{
1960 int rc = VINF_SUCCESS;
1961 unsigned cLine = 0, uLastNonEmptyLine = 0;
1962 char *pTmp = pDescData;
1963
1964 pDescriptor->cbDescAlloc = cbDescData;
1965 while (*pTmp != '\0')
1966 {
1967 pDescriptor->aLines[cLine++] = pTmp;
1968 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1969 {
1970 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1971 goto out;
1972 }
1973
1974 while (*pTmp != '\0' && *pTmp != '\n')
1975 {
1976 if (*pTmp == '\r')
1977 {
1978 if (*(pTmp + 1) != '\n')
1979 {
1980 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1981 goto out;
1982 }
1983 else
1984 {
1985 /* Get rid of CR character. */
1986 *pTmp = '\0';
1987 }
1988 }
1989 pTmp++;
1990 }
1991 /* Get rid of LF character. */
1992 if (*pTmp == '\n')
1993 {
1994 *pTmp = '\0';
1995 pTmp++;
1996 }
1997 }
1998 pDescriptor->cLines = cLine;
1999 /* Pointer right after the end of the used part of the buffer. */
2000 pDescriptor->aLines[cLine] = pTmp;
2001
2002 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2003 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
2004 {
2005 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2006 goto out;
2007 }
2008
2009 /* Initialize those, because we need to be able to reopen an image. */
2010 pDescriptor->uFirstDesc = 0;
2011 pDescriptor->uFirstExtent = 0;
2012 pDescriptor->uFirstDDB = 0;
2013 for (unsigned i = 0; i < cLine; i++)
2014 {
2015 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2016 {
2017 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2018 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2019 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2020 {
2021 /* An extent descriptor. */
2022 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2023 {
2024 /* Incorrect ordering of entries. */
2025 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2026 goto out;
2027 }
2028 if (!pDescriptor->uFirstExtent)
2029 {
2030 pDescriptor->uFirstExtent = i;
2031 uLastNonEmptyLine = 0;
2032 }
2033 }
2034 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2035 {
2036 /* A disk database entry. */
2037 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2038 {
2039 /* Incorrect ordering of entries. */
2040 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2041 goto out;
2042 }
2043 if (!pDescriptor->uFirstDDB)
2044 {
2045 pDescriptor->uFirstDDB = i;
2046 uLastNonEmptyLine = 0;
2047 }
2048 }
2049 else
2050 {
2051 /* A normal entry. */
2052 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2053 {
2054 /* Incorrect ordering of entries. */
2055 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2056 goto out;
2057 }
2058 if (!pDescriptor->uFirstDesc)
2059 {
2060 pDescriptor->uFirstDesc = i;
2061 uLastNonEmptyLine = 0;
2062 }
2063 }
2064 if (uLastNonEmptyLine)
2065 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2066 uLastNonEmptyLine = i;
2067 }
2068 }
2069
2070out:
2071 return rc;
2072}
2073
2074static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2075 PCVDGEOMETRY pPCHSGeometry)
2076{
2077 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2078 VMDK_DDB_GEO_PCHS_CYLINDERS,
2079 pPCHSGeometry->cCylinders);
2080 if (RT_FAILURE(rc))
2081 return rc;
2082 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2083 VMDK_DDB_GEO_PCHS_HEADS,
2084 pPCHSGeometry->cHeads);
2085 if (RT_FAILURE(rc))
2086 return rc;
2087 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2088 VMDK_DDB_GEO_PCHS_SECTORS,
2089 pPCHSGeometry->cSectors);
2090 return rc;
2091}
2092
2093static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2094 PCVDGEOMETRY pLCHSGeometry)
2095{
2096 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2097 VMDK_DDB_GEO_LCHS_CYLINDERS,
2098 pLCHSGeometry->cCylinders);
2099 if (RT_FAILURE(rc))
2100 return rc;
2101 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2102 VMDK_DDB_GEO_LCHS_HEADS,
2103
2104 pLCHSGeometry->cHeads);
2105 if (RT_FAILURE(rc))
2106 return rc;
2107 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2108 VMDK_DDB_GEO_LCHS_SECTORS,
2109 pLCHSGeometry->cSectors);
2110 return rc;
2111}
2112
2113static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2114 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2115{
2116 int rc;
2117
2118 pDescriptor->uFirstDesc = 0;
2119 pDescriptor->uFirstExtent = 0;
2120 pDescriptor->uFirstDDB = 0;
2121 pDescriptor->cLines = 0;
2122 pDescriptor->cbDescAlloc = cbDescData;
2123 pDescriptor->fDirty = false;
2124 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2125 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2126
2127 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2128 if (RT_FAILURE(rc))
2129 goto out;
2130 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2131 if (RT_FAILURE(rc))
2132 goto out;
2133 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2134 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2135 if (RT_FAILURE(rc))
2136 goto out;
2137 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2138 if (RT_FAILURE(rc))
2139 goto out;
2140 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2141 if (RT_FAILURE(rc))
2142 goto out;
2143 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2144 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2145 if (RT_FAILURE(rc))
2146 goto out;
2147 /* The trailing space is created by VMware, too. */
2148 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2149 if (RT_FAILURE(rc))
2150 goto out;
2151 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2152 if (RT_FAILURE(rc))
2153 goto out;
2154 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2155 if (RT_FAILURE(rc))
2156 goto out;
2157 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2158 if (RT_FAILURE(rc))
2159 goto out;
2160 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2161
2162 /* Now that the framework is in place, use the normal functions to insert
2163 * the remaining keys. */
2164 char szBuf[9];
2165 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2166 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2167 "CID", szBuf);
2168 if (RT_FAILURE(rc))
2169 goto out;
2170 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2171 "parentCID", "ffffffff");
2172 if (RT_FAILURE(rc))
2173 goto out;
2174
2175 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2176 if (RT_FAILURE(rc))
2177 goto out;
2178
2179out:
2180 return rc;
2181}
2182
2183static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2184 size_t cbDescData)
2185{
2186 int rc;
2187 unsigned cExtents;
2188 unsigned uLine;
2189 unsigned i;
2190
2191 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2192 &pImage->Descriptor);
2193 if (RT_FAILURE(rc))
2194 return rc;
2195
2196 /* Check version, must be 1. */
2197 uint32_t uVersion;
2198 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2199 if (RT_FAILURE(rc))
2200 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2201 if (uVersion != 1)
2202 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2203
2204 /* Get image creation type and determine image flags. */
2205 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2206 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2207 &pszCreateType);
2208 if (RT_FAILURE(rc))
2209 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2210 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2211 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2212 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2213 else if ( !strcmp(pszCreateType, "partitionedDevice")
2214 || !strcmp(pszCreateType, "fullDevice"))
2215 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2216 else if (!strcmp(pszCreateType, "streamOptimized"))
2217 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2218 else if (!strcmp(pszCreateType, "vmfs"))
2219 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2220 RTStrFree((char *)(void *)pszCreateType);
2221
2222 /* Count the number of extent config entries. */
2223 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2224 uLine != 0;
2225 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2226 /* nothing */;
2227
2228 if (!pImage->pDescData && cExtents != 1)
2229 {
2230 /* Monolithic image, must have only one extent (already opened). */
2231 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2232 }
2233
2234 if (pImage->pDescData)
2235 {
2236 /* Non-monolithic image, extents need to be allocated. */
2237 rc = vmdkCreateExtents(pImage, cExtents);
2238 if (RT_FAILURE(rc))
2239 return rc;
2240 }
2241
2242 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2243 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2244 {
2245 char *pszLine = pImage->Descriptor.aLines[uLine];
2246
2247 /* Access type of the extent. */
2248 if (!strncmp(pszLine, "RW", 2))
2249 {
2250 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2251 pszLine += 2;
2252 }
2253 else if (!strncmp(pszLine, "RDONLY", 6))
2254 {
2255 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2256 pszLine += 6;
2257 }
2258 else if (!strncmp(pszLine, "NOACCESS", 8))
2259 {
2260 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2261 pszLine += 8;
2262 }
2263 else
2264 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2265 if (*pszLine++ != ' ')
2266 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2267
2268 /* Nominal size of the extent. */
2269 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2270 &pImage->pExtents[i].cNominalSectors);
2271 if (RT_FAILURE(rc))
2272 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2273 if (*pszLine++ != ' ')
2274 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2275
2276 /* Type of the extent. */
2277#ifdef VBOX_WITH_VMDK_ESX
2278 /** @todo Add the ESX extent types. Not necessary for now because
2279 * the ESX extent types are only used inside an ESX server. They are
2280 * automatically converted if the VMDK is exported. */
2281#endif /* VBOX_WITH_VMDK_ESX */
2282 if (!strncmp(pszLine, "SPARSE", 6))
2283 {
2284 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2285 pszLine += 6;
2286 }
2287 else if (!strncmp(pszLine, "FLAT", 4))
2288 {
2289 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2290 pszLine += 4;
2291 }
2292 else if (!strncmp(pszLine, "ZERO", 4))
2293 {
2294 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2295 pszLine += 4;
2296 }
2297 else if (!strncmp(pszLine, "VMFS", 4))
2298 {
2299 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2300 pszLine += 4;
2301 }
2302 else
2303 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2304
2305 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2306 {
2307 /* This one has no basename or offset. */
2308 if (*pszLine == ' ')
2309 pszLine++;
2310 if (*pszLine != '\0')
2311 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2312 pImage->pExtents[i].pszBasename = NULL;
2313 }
2314 else
2315 {
2316 /* All other extent types have basename and optional offset. */
2317 if (*pszLine++ != ' ')
2318 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2319
2320 /* Basename of the image. Surrounded by quotes. */
2321 char *pszBasename;
2322 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2323 if (RT_FAILURE(rc))
2324 return rc;
2325 pImage->pExtents[i].pszBasename = pszBasename;
2326 if (*pszLine == ' ')
2327 {
2328 pszLine++;
2329 if (*pszLine != '\0')
2330 {
2331 /* Optional offset in extent specified. */
2332 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2333 &pImage->pExtents[i].uSectorOffset);
2334 if (RT_FAILURE(rc))
2335 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2336 }
2337 }
2338
2339 if (*pszLine != '\0')
2340 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2341 }
2342 }
2343
2344 /* Determine PCHS geometry (autogenerate if necessary). */
2345 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2346 VMDK_DDB_GEO_PCHS_CYLINDERS,
2347 &pImage->PCHSGeometry.cCylinders);
2348 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2349 pImage->PCHSGeometry.cCylinders = 0;
2350 else if (RT_FAILURE(rc))
2351 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2352 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2353 VMDK_DDB_GEO_PCHS_HEADS,
2354 &pImage->PCHSGeometry.cHeads);
2355 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2356 pImage->PCHSGeometry.cHeads = 0;
2357 else if (RT_FAILURE(rc))
2358 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2359 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2360 VMDK_DDB_GEO_PCHS_SECTORS,
2361 &pImage->PCHSGeometry.cSectors);
2362 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2363 pImage->PCHSGeometry.cSectors = 0;
2364 else if (RT_FAILURE(rc))
2365 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2366 if ( pImage->PCHSGeometry.cCylinders == 0
2367 || pImage->PCHSGeometry.cHeads == 0
2368 || pImage->PCHSGeometry.cHeads > 16
2369 || pImage->PCHSGeometry.cSectors == 0
2370 || pImage->PCHSGeometry.cSectors > 63)
2371 {
2372 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2373 * as the total image size isn't known yet). */
2374 pImage->PCHSGeometry.cCylinders = 0;
2375 pImage->PCHSGeometry.cHeads = 16;
2376 pImage->PCHSGeometry.cSectors = 63;
2377 }
2378
2379 /* Determine LCHS geometry (set to 0 if not specified). */
2380 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2381 VMDK_DDB_GEO_LCHS_CYLINDERS,
2382 &pImage->LCHSGeometry.cCylinders);
2383 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2384 pImage->LCHSGeometry.cCylinders = 0;
2385 else if (RT_FAILURE(rc))
2386 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2387 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2388 VMDK_DDB_GEO_LCHS_HEADS,
2389 &pImage->LCHSGeometry.cHeads);
2390 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2391 pImage->LCHSGeometry.cHeads = 0;
2392 else if (RT_FAILURE(rc))
2393 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2394 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2395 VMDK_DDB_GEO_LCHS_SECTORS,
2396 &pImage->LCHSGeometry.cSectors);
2397 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2398 pImage->LCHSGeometry.cSectors = 0;
2399 else if (RT_FAILURE(rc))
2400 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2401 if ( pImage->LCHSGeometry.cCylinders == 0
2402 || pImage->LCHSGeometry.cHeads == 0
2403 || pImage->LCHSGeometry.cSectors == 0)
2404 {
2405 pImage->LCHSGeometry.cCylinders = 0;
2406 pImage->LCHSGeometry.cHeads = 0;
2407 pImage->LCHSGeometry.cSectors = 0;
2408 }
2409
2410 /* Get image UUID. */
2411 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2412 &pImage->ImageUuid);
2413 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2414 {
2415 /* Image without UUID. Probably created by VMware and not yet used
2416 * by VirtualBox. Can only be added for images opened in read/write
2417 * mode, so don't bother producing a sensible UUID otherwise. */
2418 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2419 RTUuidClear(&pImage->ImageUuid);
2420 else
2421 {
2422 rc = RTUuidCreate(&pImage->ImageUuid);
2423 if (RT_FAILURE(rc))
2424 return rc;
2425 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2426 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2427 if (RT_FAILURE(rc))
2428 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2429 }
2430 }
2431 else if (RT_FAILURE(rc))
2432 return rc;
2433
2434 /* Get image modification UUID. */
2435 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2436 VMDK_DDB_MODIFICATION_UUID,
2437 &pImage->ModificationUuid);
2438 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2439 {
2440 /* Image without UUID. Probably created by VMware and not yet used
2441 * by VirtualBox. Can only be added for images opened in read/write
2442 * mode, so don't bother producing a sensible UUID otherwise. */
2443 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2444 RTUuidClear(&pImage->ModificationUuid);
2445 else
2446 {
2447 rc = RTUuidCreate(&pImage->ModificationUuid);
2448 if (RT_FAILURE(rc))
2449 return rc;
2450 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2451 VMDK_DDB_MODIFICATION_UUID,
2452 &pImage->ModificationUuid);
2453 if (RT_FAILURE(rc))
2454 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2455 }
2456 }
2457 else if (RT_FAILURE(rc))
2458 return rc;
2459
2460 /* Get UUID of parent image. */
2461 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2462 &pImage->ParentUuid);
2463 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2464 {
2465 /* Image without UUID. Probably created by VMware and not yet used
2466 * by VirtualBox. Can only be added for images opened in read/write
2467 * mode, so don't bother producing a sensible UUID otherwise. */
2468 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2469 RTUuidClear(&pImage->ParentUuid);
2470 else
2471 {
2472 rc = RTUuidClear(&pImage->ParentUuid);
2473 if (RT_FAILURE(rc))
2474 return rc;
2475 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2476 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2477 if (RT_FAILURE(rc))
2478 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2479 }
2480 }
2481 else if (RT_FAILURE(rc))
2482 return rc;
2483
2484 /* Get parent image modification UUID. */
2485 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2486 VMDK_DDB_PARENT_MODIFICATION_UUID,
2487 &pImage->ParentModificationUuid);
2488 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2489 {
2490 /* Image without UUID. Probably created by VMware and not yet used
2491 * by VirtualBox. Can only be added for images opened in read/write
2492 * mode, so don't bother producing a sensible UUID otherwise. */
2493 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2494 RTUuidClear(&pImage->ParentModificationUuid);
2495 else
2496 {
2497 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2498 if (RT_FAILURE(rc))
2499 return rc;
2500 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2501 VMDK_DDB_PARENT_MODIFICATION_UUID,
2502 &pImage->ParentModificationUuid);
2503 if (RT_FAILURE(rc))
2504 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2505 }
2506 }
2507 else if (RT_FAILURE(rc))
2508 return rc;
2509
2510 return VINF_SUCCESS;
2511}
2512
2513/**
2514 * Internal: write/update the descriptor part of the image.
2515 */
2516static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2517{
2518 int rc = VINF_SUCCESS;
2519 uint64_t cbLimit;
2520 uint64_t uOffset;
2521 PVMDKFILE pDescFile;
2522
2523 if (pImage->pDescData)
2524 {
2525 /* Separate descriptor file. */
2526 uOffset = 0;
2527 cbLimit = 0;
2528 pDescFile = pImage->pFile;
2529 }
2530 else
2531 {
2532 /* Embedded descriptor file. */
2533 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2534 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2535 pDescFile = pImage->pExtents[0].pFile;
2536 }
2537 /* Bail out if there is no file to write to. */
2538 if (pDescFile == NULL)
2539 return VERR_INVALID_PARAMETER;
2540
2541 /*
2542 * Allocate temporary descriptor buffer.
2543 * In case there is no limit allocate a default
2544 * and increase if required.
2545 */
2546 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2547 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2548 unsigned offDescriptor = 0;
2549
2550 if (!pszDescriptor)
2551 return VERR_NO_MEMORY;
2552
2553 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2554 {
2555 const char *psz = pImage->Descriptor.aLines[i];
2556 size_t cb = strlen(psz);
2557
2558 /*
2559 * Increase the descriptor if there is no limit and
2560 * there is not enough room left for this line.
2561 */
2562 if (offDescriptor + cb + 1 > cbDescriptor)
2563 {
2564 if (cbLimit)
2565 {
2566 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2567 break;
2568 }
2569 else
2570 {
2571 char *pszDescriptorNew = NULL;
2572 LogFlow(("Increasing descriptor cache\n"));
2573
2574 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2575 if (!pszDescriptorNew)
2576 {
2577 rc = VERR_NO_MEMORY;
2578 break;
2579 }
2580 pszDescriptorNew = pszDescriptor;
2581 cbDescriptor += cb + 4 * _1K;
2582 }
2583 }
2584
2585 if (cb > 0)
2586 {
2587 memcpy(pszDescriptor + offDescriptor, psz, cb);
2588 offDescriptor += cb;
2589 }
2590
2591 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2592 offDescriptor++;
2593 }
2594
2595 if (RT_SUCCESS(rc))
2596 {
2597 rc = vmdkFileWriteSync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2598 if (RT_FAILURE(rc))
2599 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2600 }
2601
2602 if (RT_SUCCESS(rc) && !cbLimit)
2603 {
2604 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2605 if (RT_FAILURE(rc))
2606 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2607 }
2608
2609 if (RT_SUCCESS(rc))
2610 pImage->Descriptor.fDirty = false;
2611
2612 RTMemFree(pszDescriptor);
2613 return rc;
2614}
2615
2616/**
2617 * Internal: write/update the descriptor part of the image - async version.
2618 */
2619static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2620{
2621 int rc = VINF_SUCCESS;
2622 uint64_t cbLimit;
2623 uint64_t uOffset;
2624 PVMDKFILE pDescFile;
2625
2626 if (pImage->pDescData)
2627 {
2628 /* Separate descriptor file. */
2629 uOffset = 0;
2630 cbLimit = 0;
2631 pDescFile = pImage->pFile;
2632 }
2633 else
2634 {
2635 /* Embedded descriptor file. */
2636 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2637 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2638 pDescFile = pImage->pExtents[0].pFile;
2639 }
2640 /* Bail out if there is no file to write to. */
2641 if (pDescFile == NULL)
2642 return VERR_INVALID_PARAMETER;
2643
2644 /*
2645 * Allocate temporary descriptor buffer.
2646 * In case there is no limit allocate a default
2647 * and increase if required.
2648 */
2649 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2650 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2651 unsigned offDescriptor = 0;
2652
2653 if (!pszDescriptor)
2654 return VERR_NO_MEMORY;
2655
2656 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2657 {
2658 const char *psz = pImage->Descriptor.aLines[i];
2659 size_t cb = strlen(psz);
2660
2661 /*
2662 * Increase the descriptor if there is no limit and
2663 * there is not enough room left for this line.
2664 */
2665 if (offDescriptor + cb + 1 > cbDescriptor)
2666 {
2667 if (cbLimit)
2668 {
2669 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2670 break;
2671 }
2672 else
2673 {
2674 char *pszDescriptorNew = NULL;
2675 LogFlow(("Increasing descriptor cache\n"));
2676
2677 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2678 if (!pszDescriptorNew)
2679 {
2680 rc = VERR_NO_MEMORY;
2681 break;
2682 }
2683 pszDescriptorNew = pszDescriptor;
2684 cbDescriptor += cb + 4 * _1K;
2685 }
2686 }
2687
2688 if (cb > 0)
2689 {
2690 memcpy(pszDescriptor + offDescriptor, psz, cb);
2691 offDescriptor += cb;
2692 }
2693
2694 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2695 offDescriptor++;
2696 }
2697
2698 if (RT_SUCCESS(rc))
2699 {
2700 rc = vmdkFileWriteSync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2701 if (RT_FAILURE(rc))
2702 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2703 }
2704
2705 if (RT_SUCCESS(rc) && !cbLimit)
2706 {
2707 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2708 if (RT_FAILURE(rc))
2709 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2710 }
2711
2712 if (RT_SUCCESS(rc))
2713 pImage->Descriptor.fDirty = false;
2714
2715 RTMemFree(pszDescriptor);
2716 return rc;
2717
2718}
2719
2720/**
2721 * Internal: validate the consistency check values in a binary header.
2722 */
2723static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2724{
2725 int rc = VINF_SUCCESS;
2726 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2727 {
2728 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2729 return rc;
2730 }
2731 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2732 {
2733 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2734 return rc;
2735 }
2736 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2737 && ( pHeader->singleEndLineChar != '\n'
2738 || pHeader->nonEndLineChar != ' '
2739 || pHeader->doubleEndLineChar1 != '\r'
2740 || pHeader->doubleEndLineChar2 != '\n') )
2741 {
2742 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2743 return rc;
2744 }
2745 return rc;
2746}
2747
2748/**
2749 * Internal: read metadata belonging to an extent with binary header, i.e.
2750 * as found in monolithic files.
2751 */
2752static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2753{
2754 SparseExtentHeader Header;
2755 uint64_t cSectorsPerGDE;
2756
2757 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2758 AssertRC(rc);
2759 if (RT_FAILURE(rc))
2760 {
2761 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2762 goto out;
2763 }
2764 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2765 if (RT_FAILURE(rc))
2766 goto out;
2767 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2768 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2769 {
2770 /* Read the footer, which isn't compressed and comes before the
2771 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2772 * VMware reality. Theory and practice have very little in common. */
2773 uint64_t cbSize;
2774 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
2775 AssertRC(rc);
2776 if (RT_FAILURE(rc))
2777 {
2778 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2779 goto out;
2780 }
2781 cbSize = RT_ALIGN_64(cbSize, 512);
2782 rc = vmdkFileReadSync(pImage, pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2783 AssertRC(rc);
2784 if (RT_FAILURE(rc))
2785 {
2786 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2787 goto out;
2788 }
2789 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2790 if (RT_FAILURE(rc))
2791 goto out;
2792 pExtent->fFooter = true;
2793 }
2794 pExtent->uVersion = RT_LE2H_U32(Header.version);
2795 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2796 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2797 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2798 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2799 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2800 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2801 {
2802 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2803 goto out;
2804 }
2805 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2806 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2807 {
2808 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2809 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2810 }
2811 else
2812 {
2813 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2814 pExtent->uSectorRGD = 0;
2815 }
2816 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2817 {
2818 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2819 goto out;
2820 }
2821 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2822 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2823 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2824 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2825 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2826 {
2827 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2828 goto out;
2829 }
2830 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2831 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2832
2833 /* Fix up the number of descriptor sectors, as some flat images have
2834 * really just one, and this causes failures when inserting the UUID
2835 * values and other extra information. */
2836 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2837 {
2838 /* Do it the easy way - just fix it for flat images which have no
2839 * other complicated metadata which needs space too. */
2840 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2841 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2842 pExtent->cDescriptorSectors = 4;
2843 }
2844
2845out:
2846 if (RT_FAILURE(rc))
2847 vmdkFreeExtentData(pImage, pExtent, false);
2848
2849 return rc;
2850}
2851
2852/**
2853 * Internal: read additional metadata belonging to an extent. For those
2854 * extents which have no additional metadata just verify the information.
2855 */
2856static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2857{
2858 int rc = VINF_SUCCESS;
2859 uint64_t cbExtentSize;
2860
2861 /* The image must be a multiple of a sector in size and contain the data
2862 * area (flat images only). If not, it means the image is at least
2863 * truncated, or even seriously garbled. */
2864 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
2865 if (RT_FAILURE(rc))
2866 {
2867 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2868 goto out;
2869 }
2870/* disabled the size check again as there are too many too short vmdks out there */
2871#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2872 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2873 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2874 {
2875 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2876 goto out;
2877 }
2878#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2879 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2880 goto out;
2881
2882 /* The spec says that this must be a power of two and greater than 8,
2883 * but probably they meant not less than 8. */
2884 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2885 || pExtent->cSectorsPerGrain < 8)
2886 {
2887 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2888 goto out;
2889 }
2890
2891 /* This code requires that a grain table must hold a power of two multiple
2892 * of the number of entries per GT cache entry. */
2893 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2894 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2895 {
2896 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2897 goto out;
2898 }
2899
2900 rc = vmdkReadGrainDirectory(pImage, pExtent);
2901
2902out:
2903 if (RT_FAILURE(rc))
2904 vmdkFreeExtentData(pImage, pExtent, false);
2905
2906 return rc;
2907}
2908
2909/**
2910 * Internal: write/update the metadata for a sparse extent.
2911 */
2912static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2913 uint64_t uOffset)
2914{
2915 SparseExtentHeader Header;
2916
2917 memset(&Header, '\0', sizeof(Header));
2918 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2919 Header.version = RT_H2LE_U32(pExtent->uVersion);
2920 Header.flags = RT_H2LE_U32(RT_BIT(0));
2921 if (pExtent->pRGD)
2922 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2923 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2924 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2925 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2926 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2927 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2928 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2929 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2930 if (pExtent->fFooter && uOffset == 0)
2931 {
2932 if (pExtent->pRGD)
2933 {
2934 Assert(pExtent->uSectorRGD);
2935 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2936 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2937 }
2938 else
2939 {
2940 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2941 }
2942 }
2943 else
2944 {
2945 if (pExtent->pRGD)
2946 {
2947 Assert(pExtent->uSectorRGD);
2948 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2949 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2950 }
2951 else
2952 {
2953 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2954 }
2955 }
2956 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2957 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2958 Header.singleEndLineChar = '\n';
2959 Header.nonEndLineChar = ' ';
2960 Header.doubleEndLineChar1 = '\r';
2961 Header.doubleEndLineChar2 = '\n';
2962 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2963
2964 int rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2965 AssertRC(rc);
2966 if (RT_FAILURE(rc))
2967 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2968 return rc;
2969}
2970
2971/**
2972 * Internal: write/update the metadata for a sparse extent - async version.
2973 */
2974static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2975 uint64_t uOffset, PVDIOCTX pIoCtx)
2976{
2977 SparseExtentHeader Header;
2978
2979 memset(&Header, '\0', sizeof(Header));
2980 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2981 Header.version = RT_H2LE_U32(pExtent->uVersion);
2982 Header.flags = RT_H2LE_U32(RT_BIT(0));
2983 if (pExtent->pRGD)
2984 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2985 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2986 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2987 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2988 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2989 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2990 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2991 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2992 if (pExtent->fFooter && uOffset == 0)
2993 {
2994 if (pExtent->pRGD)
2995 {
2996 Assert(pExtent->uSectorRGD);
2997 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2998 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2999 }
3000 else
3001 {
3002 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3003 }
3004 }
3005 else
3006 {
3007 if (pExtent->pRGD)
3008 {
3009 Assert(pExtent->uSectorRGD);
3010 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3011 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3012 }
3013 else
3014 {
3015 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3016 }
3017 }
3018 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3019 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3020 Header.singleEndLineChar = '\n';
3021 Header.nonEndLineChar = ' ';
3022 Header.doubleEndLineChar1 = '\r';
3023 Header.doubleEndLineChar2 = '\n';
3024 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3025
3026 int rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
3027 uOffset, &Header, sizeof(Header),
3028 pIoCtx, NULL, NULL);
3029 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3030 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3031 return rc;
3032}
3033
3034#ifdef VBOX_WITH_VMDK_ESX
3035/**
3036 * Internal: unused code to read the metadata of a sparse ESX extent.
3037 *
3038 * Such extents never leave ESX server, so this isn't ever used.
3039 */
3040static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
3041{
3042 COWDisk_Header Header;
3043 uint64_t cSectorsPerGDE;
3044
3045 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
3046 AssertRC(rc);
3047 if (RT_FAILURE(rc))
3048 goto out;
3049 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
3050 || RT_LE2H_U32(Header.version) != 1
3051 || RT_LE2H_U32(Header.flags) != 3)
3052 {
3053 rc = VERR_VD_VMDK_INVALID_HEADER;
3054 goto out;
3055 }
3056 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
3057 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
3058 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
3059 /* The spec says that this must be between 1 sector and 1MB. This code
3060 * assumes it's a power of two, so check that requirement, too. */
3061 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
3062 || pExtent->cSectorsPerGrain == 0
3063 || pExtent->cSectorsPerGrain > 2048)
3064 {
3065 rc = VERR_VD_VMDK_INVALID_HEADER;
3066 goto out;
3067 }
3068 pExtent->uDescriptorSector = 0;
3069 pExtent->cDescriptorSectors = 0;
3070 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
3071 pExtent->uSectorRGD = 0;
3072 pExtent->cOverheadSectors = 0;
3073 pExtent->cGTEntries = 4096;
3074 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3075 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
3076 {
3077 rc = VERR_VD_VMDK_INVALID_HEADER;
3078 goto out;
3079 }
3080 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3081 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3082 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
3083 {
3084 /* Inconsistency detected. Computed number of GD entries doesn't match
3085 * stored value. Better be safe than sorry. */
3086 rc = VERR_VD_VMDK_INVALID_HEADER;
3087 goto out;
3088 }
3089 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
3090 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
3091
3092 rc = vmdkReadGrainDirectory(pImage, pExtent);
3093
3094out:
3095 if (RT_FAILURE(rc))
3096 vmdkFreeExtentData(pImage, pExtent, false);
3097
3098 return rc;
3099}
3100#endif /* VBOX_WITH_VMDK_ESX */
3101
3102/**
3103 * Internal: free the memory used by the extent data structure, optionally
3104 * deleting the referenced files.
3105 */
3106static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3107 bool fDelete)
3108{
3109 vmdkFreeGrainDirectory(pExtent);
3110 if (pExtent->pDescData)
3111 {
3112 RTMemFree(pExtent->pDescData);
3113 pExtent->pDescData = NULL;
3114 }
3115 if (pExtent->pFile != NULL)
3116 {
3117 /* Do not delete raw extents, these have full and base names equal. */
3118 vmdkFileClose(pImage, &pExtent->pFile,
3119 fDelete
3120 && pExtent->pszFullname
3121 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3122 }
3123 if (pExtent->pszBasename)
3124 {
3125 RTMemTmpFree((void *)pExtent->pszBasename);
3126 pExtent->pszBasename = NULL;
3127 }
3128 if (pExtent->pszFullname)
3129 {
3130 RTStrFree((char *)(void *)pExtent->pszFullname);
3131 pExtent->pszFullname = NULL;
3132 }
3133 if (pExtent->pvGrain)
3134 {
3135 RTMemFree(pExtent->pvGrain);
3136 pExtent->pvGrain = NULL;
3137 }
3138}
3139
3140/**
3141 * Internal: allocate grain table cache if necessary for this image.
3142 */
3143static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3144{
3145 PVMDKEXTENT pExtent;
3146
3147 /* Allocate grain table cache if any sparse extent is present. */
3148 for (unsigned i = 0; i < pImage->cExtents; i++)
3149 {
3150 pExtent = &pImage->pExtents[i];
3151 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3152#ifdef VBOX_WITH_VMDK_ESX
3153 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3154#endif /* VBOX_WITH_VMDK_ESX */
3155 )
3156 {
3157 /* Allocate grain table cache. */
3158 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3159 if (!pImage->pGTCache)
3160 return VERR_NO_MEMORY;
3161 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3162 {
3163 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3164 pGCE->uExtent = UINT32_MAX;
3165 }
3166 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3167 break;
3168 }
3169 }
3170
3171 return VINF_SUCCESS;
3172}
3173
3174/**
3175 * Internal: allocate the given number of extents.
3176 */
3177static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3178{
3179 int rc = VINF_SUCCESS;
3180 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3181 if (pImage)
3182 {
3183 for (unsigned i = 0; i < cExtents; i++)
3184 {
3185 pExtents[i].pFile = NULL;
3186 pExtents[i].pszBasename = NULL;
3187 pExtents[i].pszFullname = NULL;
3188 pExtents[i].pGD = NULL;
3189 pExtents[i].pRGD = NULL;
3190 pExtents[i].pDescData = NULL;
3191 pExtents[i].uVersion = 1;
3192 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3193 pExtents[i].uExtent = i;
3194 pExtents[i].pImage = pImage;
3195 }
3196 pImage->pExtents = pExtents;
3197 pImage->cExtents = cExtents;
3198 }
3199 else
3200 rc = VERR_NO_MEMORY;
3201
3202 return rc;
3203}
3204
3205/**
3206 * Internal: Open an image, constructing all necessary data structures.
3207 */
3208static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3209{
3210 int rc;
3211 uint32_t u32Magic;
3212 PVMDKFILE pFile;
3213 PVMDKEXTENT pExtent;
3214
3215 pImage->uOpenFlags = uOpenFlags;
3216
3217 /* Try to get error interface. */
3218 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3219 if (pImage->pInterfaceError)
3220 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3221
3222 /* Get I/O interface. */
3223 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
3224 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
3225 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
3226 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
3227
3228 /*
3229 * Open the image.
3230 * We don't have to check for asynchronous access because
3231 * we only support raw access and the opened file is a description
3232 * file were no data is stored.
3233 */
3234
3235 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3236 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */),
3237 false /* fAsyncIO */);
3238 if (RT_FAILURE(rc))
3239 {
3240 /* Do NOT signal an appropriate error here, as the VD layer has the
3241 * choice of retrying the open if it failed. */
3242 goto out;
3243 }
3244 pImage->pFile = pFile;
3245
3246 /* Read magic (if present). */
3247 rc = vmdkFileReadSync(pImage, pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3248 if (RT_FAILURE(rc))
3249 {
3250 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3251 goto out;
3252 }
3253
3254 /* Handle the file according to its magic number. */
3255 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3256 {
3257 /* It's a hosted single-extent image. */
3258 rc = vmdkCreateExtents(pImage, 1);
3259 if (RT_FAILURE(rc))
3260 goto out;
3261 /* The opened file is passed to the extent. No separate descriptor
3262 * file, so no need to keep anything open for the image. */
3263 pExtent = &pImage->pExtents[0];
3264 pExtent->pFile = pFile;
3265 pImage->pFile = NULL;
3266 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3267 if (!pExtent->pszFullname)
3268 {
3269 rc = VERR_NO_MEMORY;
3270 goto out;
3271 }
3272 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3273 if (RT_FAILURE(rc))
3274 goto out;
3275
3276 /* As we're dealing with a monolithic image here, there must
3277 * be a descriptor embedded in the image file. */
3278 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3279 {
3280 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3281 goto out;
3282 }
3283 /* HACK: extend the descriptor if it is unusually small and it fits in
3284 * the unused space after the image header. Allows opening VMDK files
3285 * with extremely small descriptor in read/write mode. */
3286 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3287 && pExtent->cDescriptorSectors < 3
3288 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3289 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3290 {
3291 pExtent->cDescriptorSectors = 4;
3292 pExtent->fMetaDirty = true;
3293 }
3294 /* Read the descriptor from the extent. */
3295 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3296 if (!pExtent->pDescData)
3297 {
3298 rc = VERR_NO_MEMORY;
3299 goto out;
3300 }
3301 rc = vmdkFileReadSync(pImage, pExtent->pFile,
3302 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3303 pExtent->pDescData,
3304 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3305 AssertRC(rc);
3306 if (RT_FAILURE(rc))
3307 {
3308 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3309 goto out;
3310 }
3311
3312 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3313 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3314 if (RT_FAILURE(rc))
3315 goto out;
3316
3317 rc = vmdkReadMetaExtent(pImage, pExtent);
3318 if (RT_FAILURE(rc))
3319 goto out;
3320
3321 /* Mark the extent as unclean if opened in read-write mode. */
3322 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3323 {
3324 pExtent->fUncleanShutdown = true;
3325 pExtent->fMetaDirty = true;
3326 }
3327 }
3328 else
3329 {
3330 /* Allocate at least 10K, and make sure that there is 5K free space
3331 * in case new entries need to be added to the descriptor. Never
3332 * alocate more than 128K, because that's no valid descriptor file
3333 * and will result in the correct "truncated read" error handling. */
3334 uint64_t cbFileSize;
3335 rc = vmdkFileGetSize(pImage, pFile, &cbFileSize);
3336 if (RT_FAILURE(rc))
3337 goto out;
3338
3339 uint64_t cbSize = cbFileSize;
3340 if (cbSize % VMDK_SECTOR2BYTE(10))
3341 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3342 else
3343 cbSize += VMDK_SECTOR2BYTE(10);
3344 cbSize = RT_MIN(cbSize, _128K);
3345 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3346 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3347 if (!pImage->pDescData)
3348 {
3349 rc = VERR_NO_MEMORY;
3350 goto out;
3351 }
3352
3353 size_t cbRead;
3354 rc = vmdkFileReadSync(pImage, pImage->pFile, 0, pImage->pDescData,
3355 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3356 &cbRead);
3357 if (RT_FAILURE(rc))
3358 {
3359 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3360 goto out;
3361 }
3362 if (cbRead == pImage->cbDescAlloc)
3363 {
3364 /* Likely the read is truncated. Better fail a bit too early
3365 * (normally the descriptor is much smaller than our buffer). */
3366 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3367 goto out;
3368 }
3369
3370 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3371 pImage->cbDescAlloc);
3372 if (RT_FAILURE(rc))
3373 goto out;
3374
3375 /*
3376 * We have to check for the asynchronous open flag. The
3377 * extents are parsed and the type of all are known now.
3378 * Check if every extent is either FLAT or ZERO.
3379 */
3380 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3381 {
3382 unsigned cFlatExtents = 0;
3383
3384 for (unsigned i = 0; i < pImage->cExtents; i++)
3385 {
3386 pExtent = &pImage->pExtents[i];
3387
3388 if (( pExtent->enmType != VMDKETYPE_FLAT
3389 && pExtent->enmType != VMDKETYPE_ZERO
3390 && pExtent->enmType != VMDKETYPE_VMFS)
3391 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3392 {
3393 /*
3394 * Opened image contains at least one none flat or zero extent.
3395 * Return error but don't set error message as the caller
3396 * has the chance to open in non async I/O mode.
3397 */
3398 rc = VERR_NOT_SUPPORTED;
3399 goto out;
3400 }
3401 if (pExtent->enmType == VMDKETYPE_FLAT)
3402 cFlatExtents++;
3403 }
3404 }
3405
3406 for (unsigned i = 0; i < pImage->cExtents; i++)
3407 {
3408 pExtent = &pImage->pExtents[i];
3409
3410 if (pExtent->pszBasename)
3411 {
3412 /* Hack to figure out whether the specified name in the
3413 * extent descriptor is absolute. Doesn't always work, but
3414 * should be good enough for now. */
3415 char *pszFullname;
3416 /** @todo implement proper path absolute check. */
3417 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3418 {
3419 pszFullname = RTStrDup(pExtent->pszBasename);
3420 if (!pszFullname)
3421 {
3422 rc = VERR_NO_MEMORY;
3423 goto out;
3424 }
3425 }
3426 else
3427 {
3428 size_t cbDirname;
3429 char *pszDirname = RTStrDup(pImage->pszFilename);
3430 if (!pszDirname)
3431 {
3432 rc = VERR_NO_MEMORY;
3433 goto out;
3434 }
3435 RTPathStripFilename(pszDirname);
3436 cbDirname = strlen(pszDirname);
3437 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3438 RTPATH_SLASH, pExtent->pszBasename);
3439 RTStrFree(pszDirname);
3440 if (RT_FAILURE(rc))
3441 goto out;
3442 }
3443 pExtent->pszFullname = pszFullname;
3444 }
3445 else
3446 pExtent->pszFullname = NULL;
3447
3448 switch (pExtent->enmType)
3449 {
3450 case VMDKETYPE_HOSTED_SPARSE:
3451 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3452 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3453 false /* fCreate */),
3454 false /* fAsyncIO */);
3455 if (RT_FAILURE(rc))
3456 {
3457 /* Do NOT signal an appropriate error here, as the VD
3458 * layer has the choice of retrying the open if it
3459 * failed. */
3460 goto out;
3461 }
3462 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3463 if (RT_FAILURE(rc))
3464 goto out;
3465 rc = vmdkReadMetaExtent(pImage, pExtent);
3466 if (RT_FAILURE(rc))
3467 goto out;
3468
3469 /* Mark extent as unclean if opened in read-write mode. */
3470 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3471 {
3472 pExtent->fUncleanShutdown = true;
3473 pExtent->fMetaDirty = true;
3474 }
3475 break;
3476 case VMDKETYPE_VMFS:
3477 case VMDKETYPE_FLAT:
3478 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3479 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3480 false /* fCreate */),
3481 true /* fAsyncIO */);
3482 if (RT_FAILURE(rc))
3483 {
3484 /* Do NOT signal an appropriate error here, as the VD
3485 * layer has the choice of retrying the open if it
3486 * failed. */
3487 goto out;
3488 }
3489 break;
3490 case VMDKETYPE_ZERO:
3491 /* Nothing to do. */
3492 break;
3493 default:
3494 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3495 }
3496 }
3497 }
3498
3499 /* Make sure this is not reached accidentally with an error status. */
3500 AssertRC(rc);
3501
3502 /* Determine PCHS geometry if not set. */
3503 if (pImage->PCHSGeometry.cCylinders == 0)
3504 {
3505 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3506 / pImage->PCHSGeometry.cHeads
3507 / pImage->PCHSGeometry.cSectors;
3508 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3509 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3510 {
3511 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3512 AssertRC(rc);
3513 }
3514 }
3515
3516 /* Update the image metadata now in case has changed. */
3517 rc = vmdkFlushImage(pImage);
3518 if (RT_FAILURE(rc))
3519 goto out;
3520
3521 /* Figure out a few per-image constants from the extents. */
3522 pImage->cbSize = 0;
3523 for (unsigned i = 0; i < pImage->cExtents; i++)
3524 {
3525 pExtent = &pImage->pExtents[i];
3526 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3527#ifdef VBOX_WITH_VMDK_ESX
3528 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3529#endif /* VBOX_WITH_VMDK_ESX */
3530 )
3531 {
3532 /* Here used to be a check whether the nominal size of an extent
3533 * is a multiple of the grain size. The spec says that this is
3534 * always the case, but unfortunately some files out there in the
3535 * wild violate the spec (e.g. ReactOS 0.3.1). */
3536 }
3537 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3538 }
3539
3540 for (unsigned i = 0; i < pImage->cExtents; i++)
3541 {
3542 pExtent = &pImage->pExtents[i];
3543 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3544 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3545 {
3546 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3547 break;
3548 }
3549 }
3550
3551 rc = vmdkAllocateGrainTableCache(pImage);
3552 if (RT_FAILURE(rc))
3553 goto out;
3554
3555out:
3556 if (RT_FAILURE(rc))
3557 vmdkFreeImage(pImage, false);
3558 return rc;
3559}
3560
3561/**
3562 * Internal: create VMDK images for raw disk/partition access.
3563 */
3564static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3565 uint64_t cbSize)
3566{
3567 int rc = VINF_SUCCESS;
3568 PVMDKEXTENT pExtent;
3569
3570 if (pRaw->fRawDisk)
3571 {
3572 /* Full raw disk access. This requires setting up a descriptor
3573 * file and open the (flat) raw disk. */
3574 rc = vmdkCreateExtents(pImage, 1);
3575 if (RT_FAILURE(rc))
3576 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3577 pExtent = &pImage->pExtents[0];
3578 /* Create raw disk descriptor file. */
3579 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3580 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3581 true /* fCreate */),
3582 false /* fAsyncIO */);
3583 if (RT_FAILURE(rc))
3584 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3585
3586 /* Set up basename for extent description. Cannot use StrDup. */
3587 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3588 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3589 if (!pszBasename)
3590 return VERR_NO_MEMORY;
3591 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3592 pExtent->pszBasename = pszBasename;
3593 /* For raw disks the full name is identical to the base name. */
3594 pExtent->pszFullname = RTStrDup(pszBasename);
3595 if (!pExtent->pszFullname)
3596 return VERR_NO_MEMORY;
3597 pExtent->enmType = VMDKETYPE_FLAT;
3598 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3599 pExtent->uSectorOffset = 0;
3600 pExtent->enmAccess = VMDKACCESS_READWRITE;
3601 pExtent->fMetaDirty = false;
3602
3603 /* Open flat image, the raw disk. */
3604 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3605 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3606 false /* fCreate */),
3607 false /* fAsyncIO */);
3608 if (RT_FAILURE(rc))
3609 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3610 }
3611 else
3612 {
3613 /* Raw partition access. This requires setting up a descriptor
3614 * file, write the partition information to a flat extent and
3615 * open all the (flat) raw disk partitions. */
3616
3617 /* First pass over the partition data areas to determine how many
3618 * extents we need. One data area can require up to 2 extents, as
3619 * it might be necessary to skip over unpartitioned space. */
3620 unsigned cExtents = 0;
3621 uint64_t uStart = 0;
3622 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3623 {
3624 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3625 if (uStart > pPart->uStart)
3626 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3627
3628 if (uStart < pPart->uStart)
3629 cExtents++;
3630 uStart = pPart->uStart + pPart->cbData;
3631 cExtents++;
3632 }
3633 /* Another extent for filling up the rest of the image. */
3634 if (uStart != cbSize)
3635 cExtents++;
3636
3637 rc = vmdkCreateExtents(pImage, cExtents);
3638 if (RT_FAILURE(rc))
3639 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3640
3641 /* Create raw partition descriptor file. */
3642 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3643 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3644 true /* fCreate */),
3645 false /* fAsyncIO */);
3646 if (RT_FAILURE(rc))
3647 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3648
3649 /* Create base filename for the partition table extent. */
3650 /** @todo remove fixed buffer without creating memory leaks. */
3651 char pszPartition[1024];
3652 const char *pszBase = RTPathFilename(pImage->pszFilename);
3653 const char *pszExt = RTPathExt(pszBase);
3654 if (pszExt == NULL)
3655 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3656 char *pszBaseBase = RTStrDup(pszBase);
3657 if (!pszBaseBase)
3658 return VERR_NO_MEMORY;
3659 RTPathStripExt(pszBaseBase);
3660 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3661 pszBaseBase, pszExt);
3662 RTStrFree(pszBaseBase);
3663
3664 /* Second pass over the partitions, now define all extents. */
3665 uint64_t uPartOffset = 0;
3666 cExtents = 0;
3667 uStart = 0;
3668 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3669 {
3670 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3671 pExtent = &pImage->pExtents[cExtents++];
3672
3673 if (uStart < pPart->uStart)
3674 {
3675 pExtent->pszBasename = NULL;
3676 pExtent->pszFullname = NULL;
3677 pExtent->enmType = VMDKETYPE_ZERO;
3678 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3679 pExtent->uSectorOffset = 0;
3680 pExtent->enmAccess = VMDKACCESS_READWRITE;
3681 pExtent->fMetaDirty = false;
3682 /* go to next extent */
3683 pExtent = &pImage->pExtents[cExtents++];
3684 }
3685 uStart = pPart->uStart + pPart->cbData;
3686
3687 if (pPart->pvPartitionData)
3688 {
3689 /* Set up basename for extent description. Can't use StrDup. */
3690 size_t cbBasename = strlen(pszPartition) + 1;
3691 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3692 if (!pszBasename)
3693 return VERR_NO_MEMORY;
3694 memcpy(pszBasename, pszPartition, cbBasename);
3695 pExtent->pszBasename = pszBasename;
3696
3697 /* Set up full name for partition extent. */
3698 size_t cbDirname;
3699 char *pszDirname = RTStrDup(pImage->pszFilename);
3700 if (!pszDirname)
3701 return VERR_NO_MEMORY;
3702 RTPathStripFilename(pszDirname);
3703 cbDirname = strlen(pszDirname);
3704 char *pszFullname;
3705 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3706 RTPATH_SLASH, pExtent->pszBasename);
3707 RTStrFree(pszDirname);
3708 if (RT_FAILURE(rc))
3709 return rc;
3710 pExtent->pszFullname = pszFullname;
3711 pExtent->enmType = VMDKETYPE_FLAT;
3712 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3713 pExtent->uSectorOffset = uPartOffset;
3714 pExtent->enmAccess = VMDKACCESS_READWRITE;
3715 pExtent->fMetaDirty = false;
3716
3717 /* Create partition table flat image. */
3718 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3719 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3720 true /* fCreate */),
3721 false /* fAsyncIO */);
3722 if (RT_FAILURE(rc))
3723 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3724 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
3725 VMDK_SECTOR2BYTE(uPartOffset),
3726 pPart->pvPartitionData,
3727 pPart->cbData, NULL);
3728 if (RT_FAILURE(rc))
3729 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3730 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3731 }
3732 else
3733 {
3734 if (pPart->pszRawDevice)
3735 {
3736 /* Set up basename for extent descr. Can't use StrDup. */
3737 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3738 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3739 if (!pszBasename)
3740 return VERR_NO_MEMORY;
3741 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3742 pExtent->pszBasename = pszBasename;
3743 /* For raw disks full name is identical to base name. */
3744 pExtent->pszFullname = RTStrDup(pszBasename);
3745 if (!pExtent->pszFullname)
3746 return VERR_NO_MEMORY;
3747 pExtent->enmType = VMDKETYPE_FLAT;
3748 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3749 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3750 pExtent->enmAccess = VMDKACCESS_READWRITE;
3751 pExtent->fMetaDirty = false;
3752
3753 /* Open flat image, the raw partition. */
3754 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3755 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3756 false /* fCreate */),
3757 false /* fAsyncIO */);
3758 if (RT_FAILURE(rc))
3759 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3760 }
3761 else
3762 {
3763 pExtent->pszBasename = NULL;
3764 pExtent->pszFullname = NULL;
3765 pExtent->enmType = VMDKETYPE_ZERO;
3766 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3767 pExtent->uSectorOffset = 0;
3768 pExtent->enmAccess = VMDKACCESS_READWRITE;
3769 pExtent->fMetaDirty = false;
3770 }
3771 }
3772 }
3773 /* Another extent for filling up the rest of the image. */
3774 if (uStart != cbSize)
3775 {
3776 pExtent = &pImage->pExtents[cExtents++];
3777 pExtent->pszBasename = NULL;
3778 pExtent->pszFullname = NULL;
3779 pExtent->enmType = VMDKETYPE_ZERO;
3780 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3781 pExtent->uSectorOffset = 0;
3782 pExtent->enmAccess = VMDKACCESS_READWRITE;
3783 pExtent->fMetaDirty = false;
3784 }
3785 }
3786
3787 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3788 pRaw->fRawDisk ?
3789 "fullDevice" : "partitionedDevice");
3790 if (RT_FAILURE(rc))
3791 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3792 return rc;
3793}
3794
3795/**
3796 * Internal: create a regular (i.e. file-backed) VMDK image.
3797 */
3798static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3799 unsigned uImageFlags,
3800 PFNVDPROGRESS pfnProgress, void *pvUser,
3801 unsigned uPercentStart, unsigned uPercentSpan)
3802{
3803 int rc = VINF_SUCCESS;
3804 unsigned cExtents = 1;
3805 uint64_t cbOffset = 0;
3806 uint64_t cbRemaining = cbSize;
3807
3808 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3809 {
3810 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3811 /* Do proper extent computation: need one smaller extent if the total
3812 * size isn't evenly divisible by the split size. */
3813 if (cbSize % VMDK_2G_SPLIT_SIZE)
3814 cExtents++;
3815 }
3816 rc = vmdkCreateExtents(pImage, cExtents);
3817 if (RT_FAILURE(rc))
3818 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3819
3820 /* Basename strings needed for constructing the extent names. */
3821 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3822 AssertPtr(pszBasenameSubstr);
3823 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3824
3825 /* Create searate descriptor file if necessary. */
3826 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3827 {
3828 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3829 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3830 true /* fCreate */),
3831 false /* fAsyncIO */);
3832 if (RT_FAILURE(rc))
3833 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3834 }
3835 else
3836 pImage->pFile = NULL;
3837
3838 /* Set up all extents. */
3839 for (unsigned i = 0; i < cExtents; i++)
3840 {
3841 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3842 uint64_t cbExtent = cbRemaining;
3843
3844 /* Set up fullname/basename for extent description. Cannot use StrDup
3845 * for basename, as it is not guaranteed that the memory can be freed
3846 * with RTMemTmpFree, which must be used as in other code paths
3847 * StrDup is not usable. */
3848 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3849 {
3850 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3851 if (!pszBasename)
3852 return VERR_NO_MEMORY;
3853 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3854 pExtent->pszBasename = pszBasename;
3855 }
3856 else
3857 {
3858 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3859 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3860 RTPathStripExt(pszBasenameBase);
3861 char *pszTmp;
3862 size_t cbTmp;
3863 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3864 {
3865 if (cExtents == 1)
3866 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3867 pszBasenameExt);
3868 else
3869 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3870 i+1, pszBasenameExt);
3871 }
3872 else
3873 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3874 pszBasenameExt);
3875 RTStrFree(pszBasenameBase);
3876 if (RT_FAILURE(rc))
3877 return rc;
3878 cbTmp = strlen(pszTmp) + 1;
3879 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3880 if (!pszBasename)
3881 return VERR_NO_MEMORY;
3882 memcpy(pszBasename, pszTmp, cbTmp);
3883 RTStrFree(pszTmp);
3884 pExtent->pszBasename = pszBasename;
3885 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3886 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3887 }
3888 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3889 RTPathStripFilename(pszBasedirectory);
3890 char *pszFullname;
3891 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3892 RTPATH_SLASH, pExtent->pszBasename);
3893 RTStrFree(pszBasedirectory);
3894 if (RT_FAILURE(rc))
3895 return rc;
3896 pExtent->pszFullname = pszFullname;
3897
3898 /* Create file for extent. */
3899 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3900 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3901 true /* fCreate */),
3902 false /* fAsyncIO */);
3903 if (RT_FAILURE(rc))
3904 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3905 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3906 {
3907 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbExtent);
3908 if (RT_FAILURE(rc))
3909 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3910
3911 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3912 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3913 * file and the guest could complain about an ATA timeout. */
3914
3915 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3916 * Currently supported file systems are ext4 and ocfs2. */
3917
3918 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3919 const size_t cbBuf = 128 * _1K;
3920 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3921 if (!pvBuf)
3922 return VERR_NO_MEMORY;
3923
3924 uint64_t uOff = 0;
3925 /* Write data to all image blocks. */
3926 while (uOff < cbExtent)
3927 {
3928 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3929
3930 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3931 if (RT_FAILURE(rc))
3932 {
3933 RTMemFree(pvBuf);
3934 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3935 }
3936
3937 uOff += cbChunk;
3938
3939 if (pfnProgress)
3940 {
3941 rc = pfnProgress(pvUser,
3942 uPercentStart + uOff * uPercentSpan / cbExtent);
3943 if (RT_FAILURE(rc))
3944 {
3945 RTMemFree(pvBuf);
3946 return rc;
3947 }
3948 }
3949 }
3950 RTMemTmpFree(pvBuf);
3951 }
3952
3953 /* Place descriptor file information (where integrated). */
3954 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3955 {
3956 pExtent->uDescriptorSector = 1;
3957 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3958 /* The descriptor is part of the (only) extent. */
3959 pExtent->pDescData = pImage->pDescData;
3960 pImage->pDescData = NULL;
3961 }
3962
3963 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3964 {
3965 uint64_t cSectorsPerGDE, cSectorsPerGD;
3966 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3967 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3968 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3969 pExtent->cGTEntries = 512;
3970 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3971 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3972 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3973 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3974 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3975 {
3976 /* The spec says version is 1 for all VMDKs, but the vast
3977 * majority of streamOptimized VMDKs actually contain
3978 * version 3 - so go with the majority. Both are acepted. */
3979 pExtent->uVersion = 3;
3980 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3981 }
3982 }
3983 else
3984 {
3985 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3986 pExtent->enmType = VMDKETYPE_VMFS;
3987 else
3988 pExtent->enmType = VMDKETYPE_FLAT;
3989 }
3990
3991 pExtent->enmAccess = VMDKACCESS_READWRITE;
3992 pExtent->fUncleanShutdown = true;
3993 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3994 pExtent->uSectorOffset = 0;
3995 pExtent->fMetaDirty = true;
3996
3997 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3998 {
3999 /* fPreAlloc should never be false because VMware can't use such images. */
4000 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4001 RT_MAX( pExtent->uDescriptorSector
4002 + pExtent->cDescriptorSectors,
4003 1),
4004 true /* fPreAlloc */);
4005 if (RT_FAILURE(rc))
4006 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4007 }
4008
4009 if (RT_SUCCESS(rc) && pfnProgress)
4010 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
4011
4012 cbRemaining -= cbExtent;
4013 cbOffset += cbExtent;
4014 }
4015
4016 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4017 {
4018 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
4019 * controller type is set in an image. */
4020 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
4021 if (RT_FAILURE(rc))
4022 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
4023 }
4024
4025 const char *pszDescType = NULL;
4026 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4027 {
4028 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4029 pszDescType = "vmfs";
4030 else
4031 pszDescType = (cExtents == 1)
4032 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4033 }
4034 else
4035 {
4036 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4037 pszDescType = "streamOptimized";
4038 else
4039 {
4040 pszDescType = (cExtents == 1)
4041 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4042 }
4043 }
4044 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4045 pszDescType);
4046 if (RT_FAILURE(rc))
4047 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4048 return rc;
4049}
4050
4051/**
4052 * Internal: The actual code for creating any VMDK variant currently in
4053 * existence on hosted environments.
4054 */
4055static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4056 unsigned uImageFlags, const char *pszComment,
4057 PCVDGEOMETRY pPCHSGeometry,
4058 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4059 PFNVDPROGRESS pfnProgress, void *pvUser,
4060 unsigned uPercentStart, unsigned uPercentSpan)
4061{
4062 int rc;
4063
4064 pImage->uImageFlags = uImageFlags;
4065
4066 /* Try to get error interface. */
4067 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4068 if (pImage->pInterfaceError)
4069 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4070
4071 /* Get I/O interface. */
4072 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
4073 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
4074 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
4075 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
4076
4077 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4078 &pImage->Descriptor);
4079 if (RT_FAILURE(rc))
4080 {
4081 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4082 goto out;
4083 }
4084
4085 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4086 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4087 {
4088 /* Raw disk image (includes raw partition). */
4089 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4090 /* As the comment is misused, zap it so that no garbage comment
4091 * is set below. */
4092 pszComment = NULL;
4093 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4094 }
4095 else
4096 {
4097 /* Regular fixed or sparse image (monolithic or split). */
4098 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4099 pfnProgress, pvUser, uPercentStart,
4100 uPercentSpan * 95 / 100);
4101 }
4102
4103 if (RT_FAILURE(rc))
4104 goto out;
4105
4106 if (RT_SUCCESS(rc) && pfnProgress)
4107 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4108
4109 pImage->cbSize = cbSize;
4110
4111 for (unsigned i = 0; i < pImage->cExtents; i++)
4112 {
4113 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4114
4115 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4116 pExtent->cNominalSectors, pExtent->enmType,
4117 pExtent->pszBasename, pExtent->uSectorOffset);
4118 if (RT_FAILURE(rc))
4119 {
4120 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4121 goto out;
4122 }
4123 }
4124 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4125
4126 if ( pPCHSGeometry->cCylinders != 0
4127 && pPCHSGeometry->cHeads != 0
4128 && pPCHSGeometry->cSectors != 0)
4129 {
4130 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4131 if (RT_FAILURE(rc))
4132 goto out;
4133 }
4134 if ( pLCHSGeometry->cCylinders != 0
4135 && pLCHSGeometry->cHeads != 0
4136 && pLCHSGeometry->cSectors != 0)
4137 {
4138 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4139 if (RT_FAILURE(rc))
4140 goto out;
4141 }
4142
4143 pImage->LCHSGeometry = *pLCHSGeometry;
4144 pImage->PCHSGeometry = *pPCHSGeometry;
4145
4146 pImage->ImageUuid = *pUuid;
4147 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4148 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4149 if (RT_FAILURE(rc))
4150 {
4151 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4152 goto out;
4153 }
4154 RTUuidClear(&pImage->ParentUuid);
4155 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4156 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4157 if (RT_FAILURE(rc))
4158 {
4159 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4160 goto out;
4161 }
4162 RTUuidClear(&pImage->ModificationUuid);
4163 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4164 VMDK_DDB_MODIFICATION_UUID,
4165 &pImage->ModificationUuid);
4166 if (RT_FAILURE(rc))
4167 {
4168 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4169 goto out;
4170 }
4171 RTUuidClear(&pImage->ParentModificationUuid);
4172 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4173 VMDK_DDB_PARENT_MODIFICATION_UUID,
4174 &pImage->ParentModificationUuid);
4175 if (RT_FAILURE(rc))
4176 {
4177 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4178 goto out;
4179 }
4180
4181 rc = vmdkAllocateGrainTableCache(pImage);
4182 if (RT_FAILURE(rc))
4183 goto out;
4184
4185 rc = vmdkSetImageComment(pImage, pszComment);
4186 if (RT_FAILURE(rc))
4187 {
4188 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4189 goto out;
4190 }
4191
4192 if (RT_SUCCESS(rc) && pfnProgress)
4193 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4194
4195 rc = vmdkFlushImage(pImage);
4196
4197out:
4198 if (RT_SUCCESS(rc) && pfnProgress)
4199 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4200
4201 if (RT_FAILURE(rc))
4202 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4203 return rc;
4204}
4205
4206/**
4207 * Internal: Update image comment.
4208 */
4209static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4210{
4211 char *pszCommentEncoded;
4212 if (pszComment)
4213 {
4214 pszCommentEncoded = vmdkEncodeString(pszComment);
4215 if (!pszCommentEncoded)
4216 return VERR_NO_MEMORY;
4217 }
4218 else
4219 pszCommentEncoded = NULL;
4220 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4221 "ddb.comment", pszCommentEncoded);
4222 if (pszComment)
4223 RTStrFree(pszCommentEncoded);
4224 if (RT_FAILURE(rc))
4225 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4226 return VINF_SUCCESS;
4227}
4228
4229/**
4230 * Internal. Free all allocated space for representing an image, and optionally
4231 * delete the image from disk.
4232 */
4233static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4234{
4235 int rc = VINF_SUCCESS;
4236
4237 /* Freeing a never allocated image (e.g. because the open failed) is
4238 * not signalled as an error. After all nothing bad happens. */
4239 if (pImage)
4240 {
4241 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4242 {
4243 /* Mark all extents as clean. */
4244 for (unsigned i = 0; i < pImage->cExtents; i++)
4245 {
4246 if ( ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4247#ifdef VBOX_WITH_VMDK_ESX
4248 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4249#endif /* VBOX_WITH_VMDK_ESX */
4250 )
4251 && pImage->pExtents[i].fUncleanShutdown)
4252 {
4253 pImage->pExtents[i].fUncleanShutdown = false;
4254 pImage->pExtents[i].fMetaDirty = true;
4255 }
4256 }
4257 }
4258 vmdkFlushImage(pImage);
4259
4260 if (pImage->pExtents != NULL)
4261 {
4262 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4263 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4264 RTMemFree(pImage->pExtents);
4265 pImage->pExtents = NULL;
4266 }
4267 pImage->cExtents = 0;
4268 if (pImage->pFile != NULL)
4269 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4270 vmdkFileCheckAllClose(pImage);
4271
4272 if (pImage->pGTCache)
4273 {
4274 RTMemFree(pImage->pGTCache);
4275 pImage->pGTCache = NULL;
4276 }
4277 if (pImage->pDescData)
4278 {
4279 RTMemFree(pImage->pDescData);
4280 pImage->pDescData = NULL;
4281 }
4282 }
4283
4284 LogFlowFunc(("returns %Rrc\n", rc));
4285 return rc;
4286}
4287
4288/**
4289 * Internal. Flush image data (and metadata) to disk.
4290 */
4291static int vmdkFlushImage(PVMDKIMAGE pImage)
4292{
4293 PVMDKEXTENT pExtent;
4294 int rc = VINF_SUCCESS;
4295
4296 /* Update descriptor if changed. */
4297 if (pImage->Descriptor.fDirty)
4298 {
4299 rc = vmdkWriteDescriptor(pImage);
4300 if (RT_FAILURE(rc))
4301 goto out;
4302 }
4303
4304 for (unsigned i = 0; i < pImage->cExtents; i++)
4305 {
4306 pExtent = &pImage->pExtents[i];
4307 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4308 {
4309 switch (pExtent->enmType)
4310 {
4311 case VMDKETYPE_HOSTED_SPARSE:
4312 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0);
4313 if (RT_FAILURE(rc))
4314 goto out;
4315 if (pExtent->fFooter)
4316 {
4317 uint64_t cbSize;
4318 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
4319 if (RT_FAILURE(rc))
4320 goto out;
4321 cbSize = RT_ALIGN_64(cbSize, 512);
4322 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
4323 if (RT_FAILURE(rc))
4324 goto out;
4325 }
4326 break;
4327#ifdef VBOX_WITH_VMDK_ESX
4328 case VMDKETYPE_ESX_SPARSE:
4329 /** @todo update the header. */
4330 break;
4331#endif /* VBOX_WITH_VMDK_ESX */
4332 case VMDKETYPE_VMFS:
4333 case VMDKETYPE_FLAT:
4334 /* Nothing to do. */
4335 break;
4336 case VMDKETYPE_ZERO:
4337 default:
4338 AssertMsgFailed(("extent with type %d marked as dirty\n",
4339 pExtent->enmType));
4340 break;
4341 }
4342 }
4343 switch (pExtent->enmType)
4344 {
4345 case VMDKETYPE_HOSTED_SPARSE:
4346#ifdef VBOX_WITH_VMDK_ESX
4347 case VMDKETYPE_ESX_SPARSE:
4348#endif /* VBOX_WITH_VMDK_ESX */
4349 case VMDKETYPE_VMFS:
4350 case VMDKETYPE_FLAT:
4351 /** @todo implement proper path absolute check. */
4352 if ( pExtent->pFile != NULL
4353 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4354 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4355 rc = vmdkFileFlush(pImage, pExtent->pFile);
4356 break;
4357 case VMDKETYPE_ZERO:
4358 /* No need to do anything for this extent. */
4359 break;
4360 default:
4361 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4362 break;
4363 }
4364 }
4365
4366out:
4367 return rc;
4368}
4369
4370/**
4371 * Internal. Flush image data (and metadata) to disk - async version.
4372 */
4373static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4374{
4375 PVMDKEXTENT pExtent;
4376 int rc = VINF_SUCCESS;
4377
4378 /* Update descriptor if changed. */
4379 if (pImage->Descriptor.fDirty)
4380 {
4381 rc = vmdkWriteDescriptor(pImage);
4382 if (RT_FAILURE(rc))
4383 goto out;
4384 }
4385
4386 for (unsigned i = 0; i < pImage->cExtents; i++)
4387 {
4388 pExtent = &pImage->pExtents[i];
4389 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4390 {
4391 switch (pExtent->enmType)
4392 {
4393 case VMDKETYPE_HOSTED_SPARSE:
4394 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4395 break;
4396#ifdef VBOX_WITH_VMDK_ESX
4397 case VMDKETYPE_ESX_SPARSE:
4398 /** @todo update the header. */
4399 break;
4400#endif /* VBOX_WITH_VMDK_ESX */
4401 case VMDKETYPE_VMFS:
4402 case VMDKETYPE_FLAT:
4403 /* Nothing to do. */
4404 break;
4405 case VMDKETYPE_ZERO:
4406 default:
4407 AssertMsgFailed(("extent with type %d marked as dirty\n",
4408 pExtent->enmType));
4409 break;
4410 }
4411 }
4412 switch (pExtent->enmType)
4413 {
4414 case VMDKETYPE_HOSTED_SPARSE:
4415#ifdef VBOX_WITH_VMDK_ESX
4416 case VMDKETYPE_ESX_SPARSE:
4417#endif /* VBOX_WITH_VMDK_ESX */
4418 case VMDKETYPE_VMFS:
4419 case VMDKETYPE_FLAT:
4420 /** @todo implement proper path absolute check. */
4421 if ( pExtent->pFile != NULL
4422 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4423 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4424 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
4425 break;
4426 case VMDKETYPE_ZERO:
4427 /* No need to do anything for this extent. */
4428 break;
4429 default:
4430 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4431 break;
4432 }
4433 }
4434
4435out:
4436 return rc;
4437}
4438
4439/**
4440 * Internal. Find extent corresponding to the sector number in the disk.
4441 */
4442static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4443 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4444{
4445 PVMDKEXTENT pExtent = NULL;
4446 int rc = VINF_SUCCESS;
4447
4448 for (unsigned i = 0; i < pImage->cExtents; i++)
4449 {
4450 if (offSector < pImage->pExtents[i].cNominalSectors)
4451 {
4452 pExtent = &pImage->pExtents[i];
4453 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4454 break;
4455 }
4456 offSector -= pImage->pExtents[i].cNominalSectors;
4457 }
4458
4459 if (pExtent)
4460 *ppExtent = pExtent;
4461 else
4462 rc = VERR_IO_SECTOR_NOT_FOUND;
4463
4464 return rc;
4465}
4466
4467/**
4468 * Internal. Hash function for placing the grain table hash entries.
4469 */
4470static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4471 unsigned uExtent)
4472{
4473 /** @todo this hash function is quite simple, maybe use a better one which
4474 * scrambles the bits better. */
4475 return (uSector + uExtent) % pCache->cEntries;
4476}
4477
4478/**
4479 * Internal. Get sector number in the extent file from the relative sector
4480 * number in the extent.
4481 */
4482static int vmdkGetSector(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4483 uint64_t uSector, uint64_t *puExtentSector)
4484{
4485 PVMDKGTCACHE pCache = pImage->pGTCache;
4486 uint64_t uGDIndex, uGTSector, uGTBlock;
4487 uint32_t uGTHash, uGTBlockIndex;
4488 PVMDKGTCACHEENTRY pGTCacheEntry;
4489 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4490 int rc;
4491
4492 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4493 if (uGDIndex >= pExtent->cGDEntries)
4494 return VERR_OUT_OF_RANGE;
4495 uGTSector = pExtent->pGD[uGDIndex];
4496 if (!uGTSector)
4497 {
4498 /* There is no grain table referenced by this grain directory
4499 * entry. So there is absolutely no data in this area. */
4500 *puExtentSector = 0;
4501 return VINF_SUCCESS;
4502 }
4503
4504 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4505 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4506 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4507 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4508 || pGTCacheEntry->uGTBlock != uGTBlock)
4509 {
4510 /* Cache miss, fetch data from disk. */
4511 rc = vmdkFileReadSync(pImage, pExtent->pFile,
4512 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4513 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4514 if (RT_FAILURE(rc))
4515 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4516 pGTCacheEntry->uExtent = pExtent->uExtent;
4517 pGTCacheEntry->uGTBlock = uGTBlock;
4518 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4519 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4520 }
4521 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4522 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4523 if (uGrainSector)
4524 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4525 else
4526 *puExtentSector = 0;
4527 return VINF_SUCCESS;
4528}
4529
4530/**
4531 * Internal. Get sector number in the extent file from the relative sector
4532 * number in the extent - version for async access.
4533 */
4534static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4535 PVMDKEXTENT pExtent, uint64_t uSector,
4536 uint64_t *puExtentSector)
4537{
4538 PVMDKGTCACHE pCache = pImage->pGTCache;
4539 uint64_t uGDIndex, uGTSector, uGTBlock;
4540 uint32_t uGTHash, uGTBlockIndex;
4541 PVMDKGTCACHEENTRY pGTCacheEntry;
4542 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4543 int rc;
4544
4545 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4546 if (uGDIndex >= pExtent->cGDEntries)
4547 return VERR_OUT_OF_RANGE;
4548 uGTSector = pExtent->pGD[uGDIndex];
4549 if (!uGTSector)
4550 {
4551 /* There is no grain table referenced by this grain directory
4552 * entry. So there is absolutely no data in this area. */
4553 *puExtentSector = 0;
4554 return VINF_SUCCESS;
4555 }
4556
4557 LogFlowFunc(("uGTSector=%llu\n", uGTSector));
4558
4559 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4560 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4561 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4562 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4563 || pGTCacheEntry->uGTBlock != uGTBlock)
4564 {
4565 /* Cache miss, fetch data from disk. */
4566 PVDMETAXFER pMetaXfer;
4567 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
4568 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4569 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4570 if (RT_FAILURE(rc))
4571 return rc;
4572 /* We can release the metadata transfer immediately. */
4573 vmdkFileMetaXferRelease(pImage, pMetaXfer);
4574 pGTCacheEntry->uExtent = pExtent->uExtent;
4575 pGTCacheEntry->uGTBlock = uGTBlock;
4576 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4577 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4578 }
4579 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4580 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4581 if (uGrainSector)
4582 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4583 else
4584 *puExtentSector = 0;
4585 return VINF_SUCCESS;
4586}
4587
4588/**
4589 * Internal. Allocates a new grain table (if necessary), writes the grain
4590 * and updates the grain table. The cache is also updated by this operation.
4591 * This is separate from vmdkGetSector, because that should be as fast as
4592 * possible. Most code from vmdkGetSector also appears here.
4593 */
4594static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4595 uint64_t uSector, const void *pvBuf,
4596 uint64_t cbWrite)
4597{
4598 PVMDKGTCACHE pCache = pImage->pGTCache;
4599 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4600 uint64_t cbExtentSize;
4601 uint32_t uGTHash, uGTBlockIndex;
4602 PVMDKGTCACHEENTRY pGTCacheEntry;
4603 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4604 int rc;
4605
4606 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4607 if (uGDIndex >= pExtent->cGDEntries)
4608 return VERR_OUT_OF_RANGE;
4609 uGTSector = pExtent->pGD[uGDIndex];
4610 if (pExtent->pRGD)
4611 uRGTSector = pExtent->pRGD[uGDIndex];
4612 else
4613 uRGTSector = 0; /**< avoid compiler warning */
4614 if (!uGTSector)
4615 {
4616 /* There is no grain table referenced by this grain directory
4617 * entry. So there is absolutely no data in this area. Allocate
4618 * a new grain table and put the reference to it in the GDs. */
4619 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
4620 if (RT_FAILURE(rc))
4621 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4622 Assert(!(cbExtentSize % 512));
4623 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4624 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4625 /* For writable streamOptimized extents the final sector is the
4626 * end-of-stream marker. Will be re-added after the grain table.
4627 * If the file has a footer it also will be re-added before EOS. */
4628 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4629 {
4630 uint64_t uEOSOff = 0;
4631 uGTSector--;
4632 if (pExtent->fFooter)
4633 {
4634 uGTSector--;
4635 uEOSOff = 512;
4636 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4637 if (RT_FAILURE(rc))
4638 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4639 }
4640 pExtent->uLastGrainSector = 0;
4641 uint8_t aEOS[512];
4642 memset(aEOS, '\0', sizeof(aEOS));
4643 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4644 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4645 aEOS, sizeof(aEOS), NULL);
4646 if (RT_FAILURE(rc))
4647 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4648 }
4649 /* Normally the grain table is preallocated for hosted sparse extents
4650 * that support more than 32 bit sector numbers. So this shouldn't
4651 * ever happen on a valid extent. */
4652 if (uGTSector > UINT32_MAX)
4653 return VERR_VD_VMDK_INVALID_HEADER;
4654 /* Write grain table by writing the required number of grain table
4655 * cache chunks. Avoids dynamic memory allocation, but is a bit
4656 * slower. But as this is a pretty infrequently occurring case it
4657 * should be acceptable. */
4658 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4659 for (unsigned i = 0;
4660 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4661 i++)
4662 {
4663 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4664 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4665 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4666 if (RT_FAILURE(rc))
4667 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4668 }
4669 if (pExtent->pRGD)
4670 {
4671 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4672 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
4673 if (RT_FAILURE(rc))
4674 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4675 Assert(!(cbExtentSize % 512));
4676 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4677 /* For writable streamOptimized extents the final sector is the
4678 * end-of-stream marker. Will be re-added after the grain table.
4679 * If the file has a footer it also will be re-added before EOS. */
4680 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4681 {
4682 uint64_t uEOSOff = 0;
4683 uRGTSector--;
4684 if (pExtent->fFooter)
4685 {
4686 uRGTSector--;
4687 uEOSOff = 512;
4688 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4689 if (RT_FAILURE(rc))
4690 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4691 }
4692 pExtent->uLastGrainSector = 0;
4693 uint8_t aEOS[512];
4694 memset(aEOS, '\0', sizeof(aEOS));
4695 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4696 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4697 aEOS, sizeof(aEOS), NULL);
4698 if (RT_FAILURE(rc))
4699 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4700 }
4701 /* Normally the redundant grain table is preallocated for hosted
4702 * sparse extents that support more than 32 bit sector numbers. So
4703 * this shouldn't ever happen on a valid extent. */
4704 if (uRGTSector > UINT32_MAX)
4705 return VERR_VD_VMDK_INVALID_HEADER;
4706 /* Write backup grain table by writing the required number of grain
4707 * table cache chunks. Avoids dynamic memory allocation, but is a
4708 * bit slower. But as this is a pretty infrequently occurring case
4709 * it should be acceptable. */
4710 for (unsigned i = 0;
4711 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4712 i++)
4713 {
4714 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4715 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4716 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4717 if (RT_FAILURE(rc))
4718 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4719 }
4720 }
4721
4722 /* Update the grain directory on disk (doing it before writing the
4723 * grain table will result in a garbled extent if the operation is
4724 * aborted for some reason. Otherwise the worst that can happen is
4725 * some unused sectors in the extent. */
4726 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4727 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4728 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4729 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4730 if (RT_FAILURE(rc))
4731 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4732 if (pExtent->pRGD)
4733 {
4734 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4735 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4736 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4737 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4738 if (RT_FAILURE(rc))
4739 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4740 }
4741
4742 /* As the final step update the in-memory copy of the GDs. */
4743 pExtent->pGD[uGDIndex] = uGTSector;
4744 if (pExtent->pRGD)
4745 pExtent->pRGD[uGDIndex] = uRGTSector;
4746 }
4747
4748 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
4749 if (RT_FAILURE(rc))
4750 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4751 Assert(!(cbExtentSize % 512));
4752
4753 /* Write the data. Always a full grain, or we're in big trouble. */
4754 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4755 {
4756 /* For streamOptimized extents this is a little more difficult, as the
4757 * cached data also needs to be updated, to handle updating the last
4758 * written block properly. Also we're trying to avoid unnecessary gaps.
4759 * Additionally the end-of-stream marker needs to be written. */
4760 if (!pExtent->uLastGrainSector)
4761 {
4762 cbExtentSize -= 512;
4763 if (pExtent->fFooter)
4764 cbExtentSize -= 512;
4765 }
4766 else
4767 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4768 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4769 uint32_t cbGrain = 0;
4770 rc = vmdkFileDeflateSync(pImage, pExtent->pFile, cbExtentSize,
4771 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4772 if (RT_FAILURE(rc))
4773 {
4774 pExtent->uGrainSector = 0;
4775 pExtent->uLastGrainSector = 0;
4776 AssertRC(rc);
4777 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4778 }
4779 cbGrain = RT_ALIGN(cbGrain, 512);
4780 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4781 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4782 pExtent->cbLastGrainWritten = cbGrain;
4783 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4784 pExtent->uGrainSector = uSector;
4785
4786 uint64_t uEOSOff = 0;
4787 if (pExtent->fFooter)
4788 {
4789 uEOSOff = 512;
4790 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4791 if (RT_FAILURE(rc))
4792 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4793 }
4794 uint8_t aEOS[512];
4795 memset(aEOS, '\0', sizeof(aEOS));
4796 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4797 cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4798 aEOS, sizeof(aEOS), NULL);
4799 if (RT_FAILURE(rc))
4800 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4801 }
4802 else
4803 {
4804 rc = vmdkFileWriteSync(pImage, pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4805 if (RT_FAILURE(rc))
4806 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4807 }
4808
4809 /* Update the grain table (and the cache). */
4810 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4811 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4812 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4813 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4814 || pGTCacheEntry->uGTBlock != uGTBlock)
4815 {
4816 /* Cache miss, fetch data from disk. */
4817 rc = vmdkFileReadSync(pImage, pExtent->pFile,
4818 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4819 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4820 if (RT_FAILURE(rc))
4821 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4822 pGTCacheEntry->uExtent = pExtent->uExtent;
4823 pGTCacheEntry->uGTBlock = uGTBlock;
4824 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4825 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4826 }
4827 else
4828 {
4829 /* Cache hit. Convert grain table block back to disk format, otherwise
4830 * the code below will write garbage for all but the updated entry. */
4831 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4832 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4833 }
4834 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4835 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4836 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4837 /* Update grain table on disk. */
4838 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4839 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4840 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4841 if (RT_FAILURE(rc))
4842 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4843 if (pExtent->pRGD)
4844 {
4845 /* Update backup grain table on disk. */
4846 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
4847 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4848 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4849 if (RT_FAILURE(rc))
4850 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4851 }
4852#ifdef VBOX_WITH_VMDK_ESX
4853 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4854 {
4855 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4856 pExtent->fMetaDirty = true;
4857 }
4858#endif /* VBOX_WITH_VMDK_ESX */
4859 return rc;
4860}
4861
4862/**
4863 * Internal: Updates the grain table during a async grain allocation.
4864 */
4865static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4866 PVDIOCTX pIoCtx,
4867 PVMDKGRAINALLOCASYNC pGrainAlloc)
4868{
4869 int rc = VINF_SUCCESS;
4870 PVMDKGTCACHE pCache = pImage->pGTCache;
4871 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4872 uint32_t uGTHash, uGTBlockIndex;
4873 uint64_t uGTSector, uRGTSector, uGTBlock;
4874 uint64_t uSector = pGrainAlloc->uSector;
4875 PVMDKGTCACHEENTRY pGTCacheEntry;
4876
4877 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
4878 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
4879
4880 uGTSector = pGrainAlloc->uGTSector;
4881 uRGTSector = pGrainAlloc->uRGTSector;
4882 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4883
4884 /* Update the grain table (and the cache). */
4885 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4886 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4887 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4888 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4889 || pGTCacheEntry->uGTBlock != uGTBlock)
4890 {
4891 /* Cache miss, fetch data from disk. */
4892 LogFlow(("Cache miss, fetch data from disk\n"));
4893 PVDMETAXFER pMetaXfer = NULL;
4894 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
4895 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4896 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4897 &pMetaXfer, vmdkAllocGrainAsyncComplete, pGrainAlloc);
4898 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4899 {
4900 pGrainAlloc->cIoXfersPending++;
4901 pGrainAlloc->fGTUpdateNeeded = true;
4902 /* Leave early, we will be called again after the read completed. */
4903 LogFlowFunc(("Metadata read in progress, leaving\n"));
4904 return rc;
4905 }
4906 else if (RT_FAILURE(rc))
4907 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4908 vmdkFileMetaXferRelease(pImage, pMetaXfer);
4909 pGTCacheEntry->uExtent = pExtent->uExtent;
4910 pGTCacheEntry->uGTBlock = uGTBlock;
4911 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4912 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4913 }
4914 else
4915 {
4916 /* Cache hit. Convert grain table block back to disk format, otherwise
4917 * the code below will write garbage for all but the updated entry. */
4918 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4919 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4920 }
4921 pGrainAlloc->fGTUpdateNeeded = false;
4922 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4923 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize));
4924 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize);
4925 /* Update grain table on disk. */
4926 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
4927 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4928 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4929 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4930 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4931 pGrainAlloc->cIoXfersPending++;
4932 else if (RT_FAILURE(rc))
4933 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4934 if (pExtent->pRGD)
4935 {
4936 /* Update backup grain table on disk. */
4937 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
4938 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4939 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4940 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4941 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4942 pGrainAlloc->cIoXfersPending++;
4943 else if (RT_FAILURE(rc))
4944 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4945 }
4946#ifdef VBOX_WITH_VMDK_ESX
4947 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4948 {
4949 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4950 pExtent->fMetaDirty = true;
4951 }
4952#endif /* VBOX_WITH_VMDK_ESX */
4953
4954 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4955
4956 return rc;
4957}
4958
4959/**
4960 * Internal - complete the grain allocation by updating disk grain table if required.
4961 */
4962static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
4963{
4964 int rc = VINF_SUCCESS;
4965 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4966 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
4967 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
4968
4969 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
4970 pBackendData, pIoCtx, pvUser, rcReq));
4971
4972 pGrainAlloc->cIoXfersPending--;
4973 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
4974 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent,
4975 pIoCtx, pGrainAlloc);
4976
4977 if (!pGrainAlloc->cIoXfersPending)
4978 {
4979 /* Grain allocation completed. */
4980 RTMemFree(pGrainAlloc);
4981 }
4982
4983 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
4984 return rc;
4985}
4986
4987/**
4988 * Internal. Allocates a new grain table (if necessary) - async version.
4989 */
4990static int vmdkAllocGrainAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4991 PVDIOCTX pIoCtx, uint64_t uSector,
4992 uint64_t cbWrite)
4993{
4994 PVMDKGTCACHE pCache = pImage->pGTCache;
4995 uint64_t uGDIndex, uGTSector, uRGTSector;
4996 uint64_t cbExtentSize;
4997 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
4998 int rc;
4999
5000 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5001 pCache, pExtent, pIoCtx, uSector, cbWrite));
5002
5003 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
5004
5005 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5006 if (!pGrainAlloc)
5007 return VERR_NO_MEMORY;
5008
5009 pGrainAlloc->pExtent = pExtent;
5010 pGrainAlloc->uSector = uSector;
5011
5012 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5013 if (uGDIndex >= pExtent->cGDEntries)
5014 return VERR_OUT_OF_RANGE;
5015 uGTSector = pExtent->pGD[uGDIndex];
5016 if (pExtent->pRGD)
5017 uRGTSector = pExtent->pRGD[uGDIndex];
5018 else
5019 uRGTSector = 0; /**< avoid compiler warning */
5020 if (!uGTSector)
5021 {
5022 LogFlow(("Allocating new grain table\n"));
5023
5024 /* There is no grain table referenced by this grain directory
5025 * entry. So there is absolutely no data in this area. Allocate
5026 * a new grain table and put the reference to it in the GDs. */
5027 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5028 if (RT_FAILURE(rc))
5029 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5030 Assert(!(cbExtentSize % 512));
5031
5032 pGrainAlloc->cbExtentOld = cbExtentSize;
5033
5034 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
5035 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5036
5037 /* Normally the grain table is preallocated for hosted sparse extents
5038 * that support more than 32 bit sector numbers. So this shouldn't
5039 * ever happen on a valid extent. */
5040 if (uGTSector > UINT32_MAX)
5041 return VERR_VD_VMDK_INVALID_HEADER;
5042
5043 /* Write grain table by writing the required number of grain table
5044 * cache chunks. Allocate memory dynamically here or we flood the
5045 * metadata cache with very small entries.
5046 */
5047 size_t cbGTDataTmp = (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE) * VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5048 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5049
5050 if (!paGTDataTmp)
5051 return VERR_NO_MEMORY;
5052
5053 memset(paGTDataTmp, '\0', cbGTDataTmp);
5054 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5055 VMDK_SECTOR2BYTE(uGTSector),
5056 paGTDataTmp, cbGTDataTmp, pIoCtx,
5057 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5058 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5059 pGrainAlloc->cIoXfersPending++;
5060 else if (RT_FAILURE(rc))
5061 {
5062 RTMemTmpFree(paGTDataTmp);
5063 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5064 }
5065
5066 if (pExtent->pRGD)
5067 {
5068 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5069 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5070 if (RT_FAILURE(rc))
5071 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5072 Assert(!(cbExtentSize % 512));
5073 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5074
5075 /* Normally the redundant grain table is preallocated for hosted
5076 * sparse extents that support more than 32 bit sector numbers. So
5077 * this shouldn't ever happen on a valid extent. */
5078 if (uRGTSector > UINT32_MAX)
5079 {
5080 RTMemTmpFree(paGTDataTmp);
5081 return VERR_VD_VMDK_INVALID_HEADER;
5082 }
5083 /* Write backup grain table by writing the required number of grain
5084 * table cache chunks. */
5085 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5086 VMDK_SECTOR2BYTE(uRGTSector),
5087 paGTDataTmp, cbGTDataTmp, pIoCtx,
5088 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5089 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5090 pGrainAlloc->cIoXfersPending++;
5091 else if (RT_FAILURE(rc))
5092 {
5093 RTMemTmpFree(paGTDataTmp);
5094 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5095 }
5096 }
5097
5098 RTMemTmpFree(paGTDataTmp);
5099
5100 /* Update the grain directory on disk (doing it before writing the
5101 * grain table will result in a garbled extent if the operation is
5102 * aborted for some reason. Otherwise the worst that can happen is
5103 * some unused sectors in the extent. */
5104 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5105 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5106 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5107 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5108 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5109 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5110 pGrainAlloc->cIoXfersPending++;
5111 else if (RT_FAILURE(rc))
5112 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5113 if (pExtent->pRGD)
5114 {
5115 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5116 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5117 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5118 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5119 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5120 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5121 pGrainAlloc->cIoXfersPending++;
5122 else if (RT_FAILURE(rc))
5123 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5124 }
5125
5126 /* As the final step update the in-memory copy of the GDs. */
5127 pExtent->pGD[uGDIndex] = uGTSector;
5128 if (pExtent->pRGD)
5129 pExtent->pRGD[uGDIndex] = uRGTSector;
5130 }
5131
5132 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5133 pGrainAlloc->uGTSector = uGTSector;
5134 pGrainAlloc->uRGTSector = uRGTSector;
5135
5136 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5137 if (RT_FAILURE(rc))
5138 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5139 Assert(!(cbExtentSize % 512));
5140
5141 if (!pGrainAlloc->cbExtentOld)
5142 pGrainAlloc->cbExtentOld = cbExtentSize;
5143
5144 pGrainAlloc->cbExtentSize = cbExtentSize;
5145
5146 /* Write the data. Always a full grain, or we're in big trouble. */
5147 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
5148 cbExtentSize, pIoCtx, cbWrite,
5149 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5150 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5151 pGrainAlloc->cIoXfersPending++;
5152 else if (RT_FAILURE(rc))
5153 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5154
5155 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
5156
5157 if (!pGrainAlloc->cIoXfersPending)
5158 {
5159 /* Grain allocation completed. */
5160 RTMemFree(pGrainAlloc);
5161 }
5162
5163 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5164
5165 return rc;
5166}
5167
5168/**
5169 * Replaces a fragment of a string with the specified string.
5170 *
5171 * @returns Pointer to the allocated UTF-8 string.
5172 * @param pszWhere UTF-8 string to search in.
5173 * @param pszWhat UTF-8 string to search for.
5174 * @param pszByWhat UTF-8 string to replace the found string with.
5175 */
5176static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5177 const char *pszByWhat)
5178{
5179 AssertPtr(pszWhere);
5180 AssertPtr(pszWhat);
5181 AssertPtr(pszByWhat);
5182 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5183 if (!pszFoundStr)
5184 return NULL;
5185 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5186 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5187 if (pszNewStr)
5188 {
5189 char *pszTmp = pszNewStr;
5190 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5191 pszTmp += pszFoundStr - pszWhere;
5192 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5193 pszTmp += strlen(pszByWhat);
5194 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5195 }
5196 return pszNewStr;
5197}
5198
5199
5200/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5201static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5202 PVDINTERFACE pVDIfsImage)
5203{
5204 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
5205 int rc = VINF_SUCCESS;
5206 PVMDKIMAGE pImage;
5207
5208 if ( !pszFilename
5209 || !*pszFilename
5210 || strchr(pszFilename, '"'))
5211 {
5212 rc = VERR_INVALID_PARAMETER;
5213 goto out;
5214 }
5215
5216 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5217 if (!pImage)
5218 {
5219 rc = VERR_NO_MEMORY;
5220 goto out;
5221 }
5222 pImage->pszFilename = pszFilename;
5223 pImage->pFile = NULL;
5224 pImage->pExtents = NULL;
5225 pImage->pFiles = NULL;
5226 pImage->pGTCache = NULL;
5227 pImage->pDescData = NULL;
5228 pImage->pVDIfsDisk = pVDIfsDisk;
5229 pImage->pVDIfsImage = pVDIfsImage;
5230 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5231 * much as possible in vmdkOpenImage. */
5232 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5233 vmdkFreeImage(pImage, false);
5234 RTMemFree(pImage);
5235
5236out:
5237 LogFlowFunc(("returns %Rrc\n", rc));
5238 return rc;
5239}
5240
5241/** @copydoc VBOXHDDBACKEND::pfnOpen */
5242static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5243 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5244 void **ppBackendData)
5245{
5246 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5247 int rc;
5248 PVMDKIMAGE pImage;
5249
5250 /* Check open flags. All valid flags are supported. */
5251 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5252 {
5253 rc = VERR_INVALID_PARAMETER;
5254 goto out;
5255 }
5256
5257 /* Check remaining arguments. */
5258 if ( !VALID_PTR(pszFilename)
5259 || !*pszFilename
5260 || strchr(pszFilename, '"'))
5261 {
5262 rc = VERR_INVALID_PARAMETER;
5263 goto out;
5264 }
5265
5266
5267 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5268 if (!pImage)
5269 {
5270 rc = VERR_NO_MEMORY;
5271 goto out;
5272 }
5273 pImage->pszFilename = pszFilename;
5274 pImage->pFile = NULL;
5275 pImage->pExtents = NULL;
5276 pImage->pFiles = NULL;
5277 pImage->pGTCache = NULL;
5278 pImage->pDescData = NULL;
5279 pImage->pVDIfsDisk = pVDIfsDisk;
5280 pImage->pVDIfsImage = pVDIfsImage;
5281
5282 rc = vmdkOpenImage(pImage, uOpenFlags);
5283 if (RT_SUCCESS(rc))
5284 *ppBackendData = pImage;
5285
5286out:
5287 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5288 return rc;
5289}
5290
5291/** @copydoc VBOXHDDBACKEND::pfnCreate */
5292static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
5293 unsigned uImageFlags, const char *pszComment,
5294 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5295 PCRTUUID pUuid, unsigned uOpenFlags,
5296 unsigned uPercentStart, unsigned uPercentSpan,
5297 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5298 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
5299{
5300 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5301 int rc;
5302 PVMDKIMAGE pImage;
5303
5304 PFNVDPROGRESS pfnProgress = NULL;
5305 void *pvUser = NULL;
5306 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
5307 VDINTERFACETYPE_PROGRESS);
5308 PVDINTERFACEPROGRESS pCbProgress = NULL;
5309 if (pIfProgress)
5310 {
5311 pCbProgress = VDGetInterfaceProgress(pIfProgress);
5312 pfnProgress = pCbProgress->pfnProgress;
5313 pvUser = pIfProgress->pvUser;
5314 }
5315
5316 /* Check open flags. All valid flags are supported. */
5317 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5318 {
5319 rc = VERR_INVALID_PARAMETER;
5320 goto out;
5321 }
5322
5323 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
5324 if ( !cbSize
5325 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
5326 {
5327 rc = VERR_VD_INVALID_SIZE;
5328 goto out;
5329 }
5330
5331 /* Check remaining arguments. */
5332 if ( !VALID_PTR(pszFilename)
5333 || !*pszFilename
5334 || strchr(pszFilename, '"')
5335 || !VALID_PTR(pPCHSGeometry)
5336 || !VALID_PTR(pLCHSGeometry)
5337#ifndef VBOX_WITH_VMDK_ESX
5338 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
5339 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5340#endif
5341 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5342 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
5343 {
5344 rc = VERR_INVALID_PARAMETER;
5345 goto out;
5346 }
5347
5348 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5349 if (!pImage)
5350 {
5351 rc = VERR_NO_MEMORY;
5352 goto out;
5353 }
5354 pImage->pszFilename = pszFilename;
5355 pImage->pFile = NULL;
5356 pImage->pExtents = NULL;
5357 pImage->pFiles = NULL;
5358 pImage->pGTCache = NULL;
5359 pImage->pDescData = NULL;
5360 pImage->pVDIfsDisk = pVDIfsDisk;
5361 pImage->pVDIfsImage = pVDIfsImage;
5362 /* Descriptors for split images can be pretty large, especially if the
5363 * filename is long. So prepare for the worst, and allocate quite some
5364 * memory for the descriptor in this case. */
5365 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5366 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5367 else
5368 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5369 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5370 if (!pImage->pDescData)
5371 {
5372 rc = VERR_NO_MEMORY;
5373 goto out;
5374 }
5375
5376 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5377 pPCHSGeometry, pLCHSGeometry, pUuid,
5378 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5379 if (RT_SUCCESS(rc))
5380 {
5381 /* So far the image is opened in read/write mode. Make sure the
5382 * image is opened in read-only mode if the caller requested that. */
5383 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5384 {
5385 vmdkFreeImage(pImage, false);
5386 rc = vmdkOpenImage(pImage, uOpenFlags);
5387 if (RT_FAILURE(rc))
5388 goto out;
5389 }
5390 *ppBackendData = pImage;
5391 }
5392 else
5393 {
5394 RTMemFree(pImage->pDescData);
5395 RTMemFree(pImage);
5396 }
5397
5398out:
5399 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5400 return rc;
5401}
5402
5403/** @copydoc VBOXHDDBACKEND::pfnRename */
5404static int vmdkRename(void *pBackendData, const char *pszFilename)
5405{
5406 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5407
5408 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5409 int rc = VINF_SUCCESS;
5410 char **apszOldName = NULL;
5411 char **apszNewName = NULL;
5412 char **apszNewLines = NULL;
5413 char *pszOldDescName = NULL;
5414 bool fImageFreed = false;
5415 bool fEmbeddedDesc = false;
5416 unsigned cExtents = pImage->cExtents;
5417 char *pszNewBaseName = NULL;
5418 char *pszOldBaseName = NULL;
5419 char *pszNewFullName = NULL;
5420 char *pszOldFullName = NULL;
5421 const char *pszOldImageName;
5422 unsigned i, line;
5423 VMDKDESCRIPTOR DescriptorCopy;
5424 VMDKEXTENT ExtentCopy;
5425
5426 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
5427
5428 /* Check arguments. */
5429 if ( !pImage
5430 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5431 || !VALID_PTR(pszFilename)
5432 || !*pszFilename)
5433 {
5434 rc = VERR_INVALID_PARAMETER;
5435 goto out;
5436 }
5437
5438 /*
5439 * Allocate an array to store both old and new names of renamed files
5440 * in case we have to roll back the changes. Arrays are initialized
5441 * with zeros. We actually save stuff when and if we change it.
5442 */
5443 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5444 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5445 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
5446 if (!apszOldName || !apszNewName || !apszNewLines)
5447 {
5448 rc = VERR_NO_MEMORY;
5449 goto out;
5450 }
5451
5452 /* Save the descriptor size and position. */
5453 if (pImage->pDescData)
5454 {
5455 /* Separate descriptor file. */
5456 fEmbeddedDesc = false;
5457 }
5458 else
5459 {
5460 /* Embedded descriptor file. */
5461 ExtentCopy = pImage->pExtents[0];
5462 fEmbeddedDesc = true;
5463 }
5464 /* Save the descriptor content. */
5465 DescriptorCopy.cLines = pImage->Descriptor.cLines;
5466 for (i = 0; i < DescriptorCopy.cLines; i++)
5467 {
5468 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5469 if (!DescriptorCopy.aLines[i])
5470 {
5471 rc = VERR_NO_MEMORY;
5472 goto out;
5473 }
5474 }
5475
5476 /* Prepare both old and new base names used for string replacement. */
5477 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5478 RTPathStripExt(pszNewBaseName);
5479 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5480 RTPathStripExt(pszOldBaseName);
5481 /* Prepare both old and new full names used for string replacement. */
5482 pszNewFullName = RTStrDup(pszFilename);
5483 RTPathStripExt(pszNewFullName);
5484 pszOldFullName = RTStrDup(pImage->pszFilename);
5485 RTPathStripExt(pszOldFullName);
5486
5487 /* --- Up to this point we have not done any damage yet. --- */
5488
5489 /* Save the old name for easy access to the old descriptor file. */
5490 pszOldDescName = RTStrDup(pImage->pszFilename);
5491 /* Save old image name. */
5492 pszOldImageName = pImage->pszFilename;
5493
5494 /* Update the descriptor with modified extent names. */
5495 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5496 i < cExtents;
5497 i++, line = pImage->Descriptor.aNextLines[line])
5498 {
5499 /* Assume that vmdkStrReplace will fail. */
5500 rc = VERR_NO_MEMORY;
5501 /* Update the descriptor. */
5502 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5503 pszOldBaseName, pszNewBaseName);
5504 if (!apszNewLines[i])
5505 goto rollback;
5506 pImage->Descriptor.aLines[line] = apszNewLines[i];
5507 }
5508 /* Make sure the descriptor gets written back. */
5509 pImage->Descriptor.fDirty = true;
5510 /* Flush the descriptor now, in case it is embedded. */
5511 vmdkFlushImage(pImage);
5512
5513 /* Close and rename/move extents. */
5514 for (i = 0; i < cExtents; i++)
5515 {
5516 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5517 /* Compose new name for the extent. */
5518 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5519 pszOldFullName, pszNewFullName);
5520 if (!apszNewName[i])
5521 goto rollback;
5522 /* Close the extent file. */
5523 vmdkFileClose(pImage, &pExtent->pFile, false);
5524 /* Rename the extent file. */
5525 rc = vmdkFileMove(pImage, pExtent->pszFullname, apszNewName[i], 0);
5526 if (RT_FAILURE(rc))
5527 goto rollback;
5528 /* Remember the old name. */
5529 apszOldName[i] = RTStrDup(pExtent->pszFullname);
5530 }
5531 /* Release all old stuff. */
5532 vmdkFreeImage(pImage, false);
5533
5534 fImageFreed = true;
5535
5536 /* Last elements of new/old name arrays are intended for
5537 * storing descriptor's names.
5538 */
5539 apszNewName[cExtents] = RTStrDup(pszFilename);
5540 /* Rename the descriptor file if it's separate. */
5541 if (!fEmbeddedDesc)
5542 {
5543 rc = vmdkFileMove(pImage, pImage->pszFilename, apszNewName[cExtents], 0);
5544 if (RT_FAILURE(rc))
5545 goto rollback;
5546 /* Save old name only if we may need to change it back. */
5547 apszOldName[cExtents] = RTStrDup(pszFilename);
5548 }
5549
5550 /* Update pImage with the new information. */
5551 pImage->pszFilename = pszFilename;
5552
5553 /* Open the new image. */
5554 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5555 if (RT_SUCCESS(rc))
5556 goto out;
5557
5558rollback:
5559 /* Roll back all changes in case of failure. */
5560 if (RT_FAILURE(rc))
5561 {
5562 int rrc;
5563 if (!fImageFreed)
5564 {
5565 /*
5566 * Some extents may have been closed, close the rest. We will
5567 * re-open the whole thing later.
5568 */
5569 vmdkFreeImage(pImage, false);
5570 }
5571 /* Rename files back. */
5572 for (i = 0; i <= cExtents; i++)
5573 {
5574 if (apszOldName[i])
5575 {
5576 rrc = vmdkFileMove(pImage, apszNewName[i], apszOldName[i], 0);
5577 AssertRC(rrc);
5578 }
5579 }
5580 /* Restore the old descriptor. */
5581 PVMDKFILE pFile;
5582 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
5583 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
5584 false /* fCreate */),
5585 false /* fAsyncIO */);
5586 AssertRC(rrc);
5587 if (fEmbeddedDesc)
5588 {
5589 ExtentCopy.pFile = pFile;
5590 pImage->pExtents = &ExtentCopy;
5591 }
5592 else
5593 {
5594 /* Shouldn't be null for separate descriptor.
5595 * There will be no access to the actual content.
5596 */
5597 pImage->pDescData = pszOldDescName;
5598 pImage->pFile = pFile;
5599 }
5600 pImage->Descriptor = DescriptorCopy;
5601 vmdkWriteDescriptor(pImage);
5602 vmdkFileClose(pImage, &pFile, false);
5603 /* Get rid of the stuff we implanted. */
5604 pImage->pExtents = NULL;
5605 pImage->pFile = NULL;
5606 pImage->pDescData = NULL;
5607 /* Re-open the image back. */
5608 pImage->pszFilename = pszOldImageName;
5609 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5610 AssertRC(rrc);
5611 }
5612
5613out:
5614 for (i = 0; i < DescriptorCopy.cLines; i++)
5615 if (DescriptorCopy.aLines[i])
5616 RTStrFree(DescriptorCopy.aLines[i]);
5617 if (apszOldName)
5618 {
5619 for (i = 0; i <= cExtents; i++)
5620 if (apszOldName[i])
5621 RTStrFree(apszOldName[i]);
5622 RTMemTmpFree(apszOldName);
5623 }
5624 if (apszNewName)
5625 {
5626 for (i = 0; i <= cExtents; i++)
5627 if (apszNewName[i])
5628 RTStrFree(apszNewName[i]);
5629 RTMemTmpFree(apszNewName);
5630 }
5631 if (apszNewLines)
5632 {
5633 for (i = 0; i < cExtents; i++)
5634 if (apszNewLines[i])
5635 RTStrFree(apszNewLines[i]);
5636 RTMemTmpFree(apszNewLines);
5637 }
5638 if (pszOldDescName)
5639 RTStrFree(pszOldDescName);
5640 if (pszOldBaseName)
5641 RTStrFree(pszOldBaseName);
5642 if (pszNewBaseName)
5643 RTStrFree(pszNewBaseName);
5644 if (pszOldFullName)
5645 RTStrFree(pszOldFullName);
5646 if (pszNewFullName)
5647 RTStrFree(pszNewFullName);
5648 LogFlowFunc(("returns %Rrc\n", rc));
5649 return rc;
5650}
5651
5652/** @copydoc VBOXHDDBACKEND::pfnClose */
5653static int vmdkClose(void *pBackendData, bool fDelete)
5654{
5655 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5656 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5657 int rc;
5658
5659 rc = vmdkFreeImage(pImage, fDelete);
5660 RTMemFree(pImage);
5661
5662 LogFlowFunc(("returns %Rrc\n", rc));
5663 return rc;
5664}
5665
5666/** @copydoc VBOXHDDBACKEND::pfnRead */
5667static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5668 size_t cbToRead, size_t *pcbActuallyRead)
5669{
5670 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5671 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5672 PVMDKEXTENT pExtent;
5673 uint64_t uSectorExtentRel;
5674 uint64_t uSectorExtentAbs;
5675 int rc;
5676
5677 AssertPtr(pImage);
5678 Assert(uOffset % 512 == 0);
5679 Assert(cbToRead % 512 == 0);
5680
5681 if ( uOffset + cbToRead > pImage->cbSize
5682 || cbToRead == 0)
5683 {
5684 rc = VERR_INVALID_PARAMETER;
5685 goto out;
5686 }
5687
5688 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5689 &pExtent, &uSectorExtentRel);
5690 if (RT_FAILURE(rc))
5691 goto out;
5692
5693 /* Check access permissions as defined in the extent descriptor. */
5694 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5695 {
5696 rc = VERR_VD_VMDK_INVALID_STATE;
5697 goto out;
5698 }
5699
5700
5701 /* Clip read range to remain in this extent. */
5702 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5703
5704 /* Handle the read according to the current extent type. */
5705 switch (pExtent->enmType)
5706 {
5707 case VMDKETYPE_HOSTED_SPARSE:
5708#ifdef VBOX_WITH_VMDK_ESX
5709 case VMDKETYPE_ESX_SPARSE:
5710#endif /* VBOX_WITH_VMDK_ESX */
5711 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
5712 &uSectorExtentAbs);
5713 if (RT_FAILURE(rc))
5714 goto out;
5715 /* Clip read range to at most the rest of the grain. */
5716 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5717 Assert(!(cbToRead % 512));
5718 if (uSectorExtentAbs == 0)
5719 rc = VERR_VD_BLOCK_FREE;
5720 else
5721 {
5722 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5723 {
5724 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5725 uSectorExtentAbs -= uSectorInGrain;
5726 uint64_t uLBA;
5727 if (pExtent->uGrainSector != uSectorExtentAbs)
5728 {
5729 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5730 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5731 if (RT_FAILURE(rc))
5732 {
5733 pExtent->uGrainSector = 0;
5734 AssertRC(rc);
5735 goto out;
5736 }
5737 pExtent->uGrainSector = uSectorExtentAbs;
5738 Assert(uLBA == uSectorExtentRel);
5739 }
5740 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5741 }
5742 else
5743 {
5744 rc = vmdkFileReadSync(pImage, pExtent->pFile,
5745 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5746 pvBuf, cbToRead, NULL);
5747 }
5748 }
5749 break;
5750 case VMDKETYPE_VMFS:
5751 case VMDKETYPE_FLAT:
5752 rc = vmdkFileReadSync(pImage, pExtent->pFile,
5753 VMDK_SECTOR2BYTE(uSectorExtentRel),
5754 pvBuf, cbToRead, NULL);
5755 break;
5756 case VMDKETYPE_ZERO:
5757 memset(pvBuf, '\0', cbToRead);
5758 break;
5759 }
5760 if (pcbActuallyRead)
5761 *pcbActuallyRead = cbToRead;
5762
5763out:
5764 LogFlowFunc(("returns %Rrc\n", rc));
5765 return rc;
5766}
5767
5768/** @copydoc VBOXHDDBACKEND::pfnWrite */
5769static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5770 size_t cbToWrite, size_t *pcbWriteProcess,
5771 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5772{
5773 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5774 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5775 PVMDKEXTENT pExtent;
5776 uint64_t uSectorExtentRel;
5777 uint64_t uSectorExtentAbs;
5778 int rc;
5779
5780 AssertPtr(pImage);
5781 Assert(uOffset % 512 == 0);
5782 Assert(cbToWrite % 512 == 0);
5783
5784 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5785 {
5786 rc = VERR_VD_IMAGE_READ_ONLY;
5787 goto out;
5788 }
5789
5790 if (cbToWrite == 0)
5791 {
5792 rc = VERR_INVALID_PARAMETER;
5793 goto out;
5794 }
5795
5796 /* No size check here, will do that later when the extent is located.
5797 * There are sparse images out there which according to the spec are
5798 * invalid, because the total size is not a multiple of the grain size.
5799 * Also for sparse images which are stitched together in odd ways (not at
5800 * grain boundaries, and with the nominal size not being a multiple of the
5801 * grain size), this would prevent writing to the last grain. */
5802
5803 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5804 &pExtent, &uSectorExtentRel);
5805 if (RT_FAILURE(rc))
5806 goto out;
5807
5808 /* Check access permissions as defined in the extent descriptor. */
5809 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5810 {
5811 rc = VERR_VD_VMDK_INVALID_STATE;
5812 goto out;
5813 }
5814
5815 /* Handle the write according to the current extent type. */
5816 switch (pExtent->enmType)
5817 {
5818 case VMDKETYPE_HOSTED_SPARSE:
5819#ifdef VBOX_WITH_VMDK_ESX
5820 case VMDKETYPE_ESX_SPARSE:
5821#endif /* VBOX_WITH_VMDK_ESX */
5822 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
5823 &uSectorExtentAbs);
5824 if (RT_FAILURE(rc))
5825 goto out;
5826 /* Clip write range to at most the rest of the grain. */
5827 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5828 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5829 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5830 {
5831 rc = VERR_VD_VMDK_INVALID_WRITE;
5832 goto out;
5833 }
5834 if (uSectorExtentAbs == 0)
5835 {
5836 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5837 {
5838 /* Full block write to a previously unallocated block.
5839 * Check if the caller wants to avoid the automatic alloc. */
5840 if (!(fWrite & VD_WRITE_NO_ALLOC))
5841 {
5842 /* Allocate GT and find out where to store the grain. */
5843 rc = vmdkAllocGrain(pImage, pExtent, uSectorExtentRel,
5844 pvBuf, cbToWrite);
5845 }
5846 else
5847 rc = VERR_VD_BLOCK_FREE;
5848 *pcbPreRead = 0;
5849 *pcbPostRead = 0;
5850 }
5851 else
5852 {
5853 /* Clip write range to remain in this extent. */
5854 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5855 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5856 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5857 rc = VERR_VD_BLOCK_FREE;
5858 }
5859 }
5860 else
5861 {
5862 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5863 {
5864 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5865 uSectorExtentAbs -= uSectorInGrain;
5866 uint64_t uLBA = uSectorExtentRel;
5867 if ( pExtent->uGrainSector != uSectorExtentAbs
5868 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5869 {
5870 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5871 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5872 if (RT_FAILURE(rc))
5873 {
5874 pExtent->uGrainSector = 0;
5875 pExtent->uLastGrainSector = 0;
5876 AssertRC(rc);
5877 goto out;
5878 }
5879 pExtent->uGrainSector = uSectorExtentAbs;
5880 pExtent->uLastGrainSector = uSectorExtentAbs;
5881 Assert(uLBA == uSectorExtentRel);
5882 }
5883 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5884 uint32_t cbGrain = 0;
5885 rc = vmdkFileDeflateSync(pImage, pExtent->pFile,
5886 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5887 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5888 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5889 if (RT_FAILURE(rc))
5890 {
5891 pExtent->uGrainSector = 0;
5892 pExtent->uLastGrainSector = 0;
5893 AssertRC(rc);
5894 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5895 }
5896 cbGrain = RT_ALIGN(cbGrain, 512);
5897 pExtent->uLastGrainSector = uSectorExtentAbs;
5898 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5899 pExtent->cbLastGrainWritten = cbGrain;
5900
5901 uint64_t uEOSOff = 0;
5902 if (pExtent->fFooter)
5903 {
5904 uEOSOff = 512;
5905 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5906 if (RT_FAILURE(rc))
5907 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5908 }
5909 uint8_t aEOS[512];
5910 memset(aEOS, '\0', sizeof(aEOS));
5911 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5912 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5913 aEOS, sizeof(aEOS), NULL);
5914 if (RT_FAILURE(rc))
5915 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5916 }
5917 else
5918 {
5919 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5920 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5921 pvBuf, cbToWrite, NULL);
5922 }
5923 }
5924 break;
5925 case VMDKETYPE_VMFS:
5926 case VMDKETYPE_FLAT:
5927 /* Clip write range to remain in this extent. */
5928 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5929 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5930 VMDK_SECTOR2BYTE(uSectorExtentRel),
5931 pvBuf, cbToWrite, NULL);
5932 break;
5933 case VMDKETYPE_ZERO:
5934 /* Clip write range to remain in this extent. */
5935 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5936 break;
5937 }
5938
5939 if (pcbWriteProcess)
5940 *pcbWriteProcess = cbToWrite;
5941
5942out:
5943 LogFlowFunc(("returns %Rrc\n", rc));
5944 return rc;
5945}
5946
5947/** @copydoc VBOXHDDBACKEND::pfnFlush */
5948static int vmdkFlush(void *pBackendData)
5949{
5950 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5951 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5952 int rc;
5953
5954 AssertPtr(pImage);
5955
5956 rc = vmdkFlushImage(pImage);
5957 LogFlowFunc(("returns %Rrc\n", rc));
5958 return rc;
5959}
5960
5961/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5962static unsigned vmdkGetVersion(void *pBackendData)
5963{
5964 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5965 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5966
5967 AssertPtr(pImage);
5968
5969 if (pImage)
5970 return VMDK_IMAGE_VERSION;
5971 else
5972 return 0;
5973}
5974
5975/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5976static uint64_t vmdkGetSize(void *pBackendData)
5977{
5978 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5979 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5980
5981 AssertPtr(pImage);
5982
5983 if (pImage)
5984 return pImage->cbSize;
5985 else
5986 return 0;
5987}
5988
5989/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5990static uint64_t vmdkGetFileSize(void *pBackendData)
5991{
5992 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5993 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5994 uint64_t cb = 0;
5995
5996 AssertPtr(pImage);
5997
5998 if (pImage)
5999 {
6000 uint64_t cbFile;
6001 if (pImage->pFile != NULL)
6002 {
6003 int rc = vmdkFileGetSize(pImage, pImage->pFile, &cbFile);
6004 if (RT_SUCCESS(rc))
6005 cb += cbFile;
6006 }
6007 for (unsigned i = 0; i < pImage->cExtents; i++)
6008 {
6009 if (pImage->pExtents[i].pFile != NULL)
6010 {
6011 int rc = vmdkFileGetSize(pImage, pImage->pExtents[i].pFile, &cbFile);
6012 if (RT_SUCCESS(rc))
6013 cb += cbFile;
6014 }
6015 }
6016 }
6017
6018 LogFlowFunc(("returns %lld\n", cb));
6019 return cb;
6020}
6021
6022/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
6023static int vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
6024{
6025 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
6026 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6027 int rc;
6028
6029 AssertPtr(pImage);
6030
6031 if (pImage)
6032 {
6033 if (pImage->PCHSGeometry.cCylinders)
6034 {
6035 *pPCHSGeometry = pImage->PCHSGeometry;
6036 rc = VINF_SUCCESS;
6037 }
6038 else
6039 rc = VERR_VD_GEOMETRY_NOT_SET;
6040 }
6041 else
6042 rc = VERR_VD_NOT_OPENED;
6043
6044 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6045 return rc;
6046}
6047
6048/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
6049static int vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6050{
6051 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6052 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6053 int rc;
6054
6055 AssertPtr(pImage);
6056
6057 if (pImage)
6058 {
6059 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6060 {
6061 rc = VERR_VD_IMAGE_READ_ONLY;
6062 goto out;
6063 }
6064 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6065 if (RT_FAILURE(rc))
6066 goto out;
6067
6068 pImage->PCHSGeometry = *pPCHSGeometry;
6069 rc = VINF_SUCCESS;
6070 }
6071 else
6072 rc = VERR_VD_NOT_OPENED;
6073
6074out:
6075 LogFlowFunc(("returns %Rrc\n", rc));
6076 return rc;
6077}
6078
6079/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
6080static int vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
6081{
6082 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6083 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6084 int rc;
6085
6086 AssertPtr(pImage);
6087
6088 if (pImage)
6089 {
6090 if (pImage->LCHSGeometry.cCylinders)
6091 {
6092 *pLCHSGeometry = pImage->LCHSGeometry;
6093 rc = VINF_SUCCESS;
6094 }
6095 else
6096 rc = VERR_VD_GEOMETRY_NOT_SET;
6097 }
6098 else
6099 rc = VERR_VD_NOT_OPENED;
6100
6101 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6102 return rc;
6103}
6104
6105/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
6106static int vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
6107{
6108 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6109 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6110 int rc;
6111
6112 AssertPtr(pImage);
6113
6114 if (pImage)
6115 {
6116 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6117 {
6118 rc = VERR_VD_IMAGE_READ_ONLY;
6119 goto out;
6120 }
6121 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6122 if (RT_FAILURE(rc))
6123 goto out;
6124
6125 pImage->LCHSGeometry = *pLCHSGeometry;
6126 rc = VINF_SUCCESS;
6127 }
6128 else
6129 rc = VERR_VD_NOT_OPENED;
6130
6131out:
6132 LogFlowFunc(("returns %Rrc\n", rc));
6133 return rc;
6134}
6135
6136/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
6137static unsigned vmdkGetImageFlags(void *pBackendData)
6138{
6139 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6140 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6141 unsigned uImageFlags;
6142
6143 AssertPtr(pImage);
6144
6145 if (pImage)
6146 uImageFlags = pImage->uImageFlags;
6147 else
6148 uImageFlags = 0;
6149
6150 LogFlowFunc(("returns %#x\n", uImageFlags));
6151 return uImageFlags;
6152}
6153
6154/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
6155static unsigned vmdkGetOpenFlags(void *pBackendData)
6156{
6157 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6158 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6159 unsigned uOpenFlags;
6160
6161 AssertPtr(pImage);
6162
6163 if (pImage)
6164 uOpenFlags = pImage->uOpenFlags;
6165 else
6166 uOpenFlags = 0;
6167
6168 LogFlowFunc(("returns %#x\n", uOpenFlags));
6169 return uOpenFlags;
6170}
6171
6172/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6173static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6174{
6175 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
6176 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6177 int rc;
6178
6179 /* Image must be opened and the new flags must be valid. Just readonly and
6180 * info flags are supported. */
6181 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE)))
6182 {
6183 rc = VERR_INVALID_PARAMETER;
6184 goto out;
6185 }
6186
6187 /* Implement this operation via reopening the image. */
6188 vmdkFreeImage(pImage, false);
6189 rc = vmdkOpenImage(pImage, uOpenFlags);
6190
6191out:
6192 LogFlowFunc(("returns %Rrc\n", rc));
6193 return rc;
6194}
6195
6196/** @copydoc VBOXHDDBACKEND::pfnGetComment */
6197static int vmdkGetComment(void *pBackendData, char *pszComment,
6198 size_t cbComment)
6199{
6200 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6201 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6202 int rc;
6203
6204 AssertPtr(pImage);
6205
6206 if (pImage)
6207 {
6208 const char *pszCommentEncoded = NULL;
6209 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6210 "ddb.comment", &pszCommentEncoded);
6211 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6212 pszCommentEncoded = NULL;
6213 else if (RT_FAILURE(rc))
6214 goto out;
6215
6216 if (pszComment && pszCommentEncoded)
6217 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6218 else
6219 {
6220 if (pszComment)
6221 *pszComment = '\0';
6222 rc = VINF_SUCCESS;
6223 }
6224 if (pszCommentEncoded)
6225 RTStrFree((char *)(void *)pszCommentEncoded);
6226 }
6227 else
6228 rc = VERR_VD_NOT_OPENED;
6229
6230out:
6231 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6232 return rc;
6233}
6234
6235/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6236static int vmdkSetComment(void *pBackendData, const char *pszComment)
6237{
6238 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6239 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6240 int rc;
6241
6242 AssertPtr(pImage);
6243
6244 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6245 {
6246 rc = VERR_VD_IMAGE_READ_ONLY;
6247 goto out;
6248 }
6249
6250 if (pImage)
6251 rc = vmdkSetImageComment(pImage, pszComment);
6252 else
6253 rc = VERR_VD_NOT_OPENED;
6254
6255out:
6256 LogFlowFunc(("returns %Rrc\n", rc));
6257 return rc;
6258}
6259
6260/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
6261static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6262{
6263 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6264 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6265 int rc;
6266
6267 AssertPtr(pImage);
6268
6269 if (pImage)
6270 {
6271 *pUuid = pImage->ImageUuid;
6272 rc = VINF_SUCCESS;
6273 }
6274 else
6275 rc = VERR_VD_NOT_OPENED;
6276
6277 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6278 return rc;
6279}
6280
6281/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6282static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6283{
6284 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6285 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6286 int rc;
6287
6288 LogFlowFunc(("%RTuuid\n", pUuid));
6289 AssertPtr(pImage);
6290
6291 if (pImage)
6292 {
6293 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6294 {
6295 pImage->ImageUuid = *pUuid;
6296 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6297 VMDK_DDB_IMAGE_UUID, pUuid);
6298 if (RT_FAILURE(rc))
6299 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6300 rc = VINF_SUCCESS;
6301 }
6302 else
6303 rc = VERR_VD_IMAGE_READ_ONLY;
6304 }
6305 else
6306 rc = VERR_VD_NOT_OPENED;
6307
6308 LogFlowFunc(("returns %Rrc\n", rc));
6309 return rc;
6310}
6311
6312/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
6313static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6314{
6315 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6316 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6317 int rc;
6318
6319 AssertPtr(pImage);
6320
6321 if (pImage)
6322 {
6323 *pUuid = pImage->ModificationUuid;
6324 rc = VINF_SUCCESS;
6325 }
6326 else
6327 rc = VERR_VD_NOT_OPENED;
6328
6329 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6330 return rc;
6331}
6332
6333/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6334static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6335{
6336 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6337 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6338 int rc;
6339
6340 AssertPtr(pImage);
6341
6342 if (pImage)
6343 {
6344 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6345 {
6346 /*
6347 * Only change the modification uuid if it changed.
6348 * Avoids a lot of unneccessary 1-byte writes during
6349 * vmdkFlush.
6350 */
6351 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6352 {
6353 pImage->ModificationUuid = *pUuid;
6354 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6355 VMDK_DDB_MODIFICATION_UUID, pUuid);
6356 if (RT_FAILURE(rc))
6357 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6358 }
6359 rc = VINF_SUCCESS;
6360 }
6361 else
6362 rc = VERR_VD_IMAGE_READ_ONLY;
6363 }
6364 else
6365 rc = VERR_VD_NOT_OPENED;
6366
6367 LogFlowFunc(("returns %Rrc\n", rc));
6368 return rc;
6369}
6370
6371/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
6372static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6373{
6374 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6375 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6376 int rc;
6377
6378 AssertPtr(pImage);
6379
6380 if (pImage)
6381 {
6382 *pUuid = pImage->ParentUuid;
6383 rc = VINF_SUCCESS;
6384 }
6385 else
6386 rc = VERR_VD_NOT_OPENED;
6387
6388 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6389 return rc;
6390}
6391
6392/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6393static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6394{
6395 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6396 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6397 int rc;
6398
6399 AssertPtr(pImage);
6400
6401 if (pImage)
6402 {
6403 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6404 {
6405 pImage->ParentUuid = *pUuid;
6406 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6407 VMDK_DDB_PARENT_UUID, pUuid);
6408 if (RT_FAILURE(rc))
6409 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6410 rc = VINF_SUCCESS;
6411 }
6412 else
6413 rc = VERR_VD_IMAGE_READ_ONLY;
6414 }
6415 else
6416 rc = VERR_VD_NOT_OPENED;
6417
6418 LogFlowFunc(("returns %Rrc\n", rc));
6419 return rc;
6420}
6421
6422/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
6423static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6424{
6425 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6426 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6427 int rc;
6428
6429 AssertPtr(pImage);
6430
6431 if (pImage)
6432 {
6433 *pUuid = pImage->ParentModificationUuid;
6434 rc = VINF_SUCCESS;
6435 }
6436 else
6437 rc = VERR_VD_NOT_OPENED;
6438
6439 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6440 return rc;
6441}
6442
6443/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6444static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6445{
6446 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6447 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6448 int rc;
6449
6450 AssertPtr(pImage);
6451
6452 if (pImage)
6453 {
6454 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6455 {
6456 pImage->ParentModificationUuid = *pUuid;
6457 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6458 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6459 if (RT_FAILURE(rc))
6460 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6461 rc = VINF_SUCCESS;
6462 }
6463 else
6464 rc = VERR_VD_IMAGE_READ_ONLY;
6465 }
6466 else
6467 rc = VERR_VD_NOT_OPENED;
6468
6469 LogFlowFunc(("returns %Rrc\n", rc));
6470 return rc;
6471}
6472
6473/** @copydoc VBOXHDDBACKEND::pfnDump */
6474static void vmdkDump(void *pBackendData)
6475{
6476 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6477
6478 AssertPtr(pImage);
6479 if (pImage)
6480 {
6481 vmdkMessage(pImage, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6482 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6483 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6484 VMDK_BYTE2SECTOR(pImage->cbSize));
6485 vmdkMessage(pImage, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6486 vmdkMessage(pImage, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6487 vmdkMessage(pImage, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6488 vmdkMessage(pImage, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6489 }
6490}
6491
6492/** @copydoc VBOXHDDBACKEND::pfnIsAsyncIOSupported */
6493static bool vmdkIsAsyncIOSupported(void *pBackendData)
6494{
6495 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6496
6497 /* We do not support async I/O for stream optimized VMDK images. */
6498 return (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) == 0;
6499}
6500
6501/** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
6502static int vmdkAsyncRead(void *pBackendData, uint64_t uOffset, size_t cbRead,
6503 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6504{
6505 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6506 pBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
6507 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6508 PVMDKEXTENT pExtent;
6509 uint64_t uSectorExtentRel;
6510 uint64_t uSectorExtentAbs;
6511 int rc;
6512
6513 AssertPtr(pImage);
6514 Assert(uOffset % 512 == 0);
6515 Assert(cbRead % 512 == 0);
6516
6517 if ( uOffset + cbRead > pImage->cbSize
6518 || cbRead == 0)
6519 {
6520 rc = VERR_INVALID_PARAMETER;
6521 goto out;
6522 }
6523
6524 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6525 &pExtent, &uSectorExtentRel);
6526 if (RT_FAILURE(rc))
6527 goto out;
6528
6529 /* Check access permissions as defined in the extent descriptor. */
6530 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6531 {
6532 rc = VERR_VD_VMDK_INVALID_STATE;
6533 goto out;
6534 }
6535
6536 /* Clip read range to remain in this extent. */
6537 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6538
6539 /* Handle the read according to the current extent type. */
6540 switch (pExtent->enmType)
6541 {
6542 case VMDKETYPE_HOSTED_SPARSE:
6543#ifdef VBOX_WITH_VMDK_ESX
6544 case VMDKETYPE_ESX_SPARSE:
6545#endif /* VBOX_WITH_VMDK_ESX */
6546 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent,
6547 uSectorExtentRel, &uSectorExtentAbs);
6548 if (RT_FAILURE(rc))
6549 goto out;
6550 /* Clip read range to at most the rest of the grain. */
6551 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6552 Assert(!(cbRead % 512));
6553 if (uSectorExtentAbs == 0)
6554 rc = VERR_VD_BLOCK_FREE;
6555 else
6556 {
6557 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
6558 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
6559 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6560 pIoCtx, cbRead);
6561 }
6562 break;
6563 case VMDKETYPE_VMFS:
6564 case VMDKETYPE_FLAT:
6565 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
6566 VMDK_SECTOR2BYTE(uSectorExtentRel),
6567 pIoCtx, cbRead);
6568 break;
6569 case VMDKETYPE_ZERO:
6570 size_t cbSet;
6571
6572 cbSet = vmdkFileIoCtxSet(pImage, pIoCtx, 0, cbRead);
6573 Assert(cbSet == cbRead);
6574
6575 rc = VINF_SUCCESS;
6576 break;
6577 }
6578 if (pcbActuallyRead)
6579 *pcbActuallyRead = cbRead;
6580
6581out:
6582 LogFlowFunc(("returns %Rrc\n", rc));
6583 return rc;
6584}
6585
6586/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
6587static int vmdkAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbWrite,
6588 PVDIOCTX pIoCtx,
6589 size_t *pcbWriteProcess, size_t *pcbPreRead,
6590 size_t *pcbPostRead, unsigned fWrite)
6591{
6592 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6593 pBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6594 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6595 PVMDKEXTENT pExtent;
6596 uint64_t uSectorExtentRel;
6597 uint64_t uSectorExtentAbs;
6598 int rc;
6599
6600 AssertPtr(pImage);
6601 Assert(uOffset % 512 == 0);
6602 Assert(cbWrite % 512 == 0);
6603
6604 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6605 {
6606 rc = VERR_VD_IMAGE_READ_ONLY;
6607 goto out;
6608 }
6609
6610 if (cbWrite == 0)
6611 {
6612 rc = VERR_INVALID_PARAMETER;
6613 goto out;
6614 }
6615
6616 /* No size check here, will do that later when the extent is located.
6617 * There are sparse images out there which according to the spec are
6618 * invalid, because the total size is not a multiple of the grain size.
6619 * Also for sparse images which are stitched together in odd ways (not at
6620 * grain boundaries, and with the nominal size not being a multiple of the
6621 * grain size), this would prevent writing to the last grain. */
6622
6623 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6624 &pExtent, &uSectorExtentRel);
6625 if (RT_FAILURE(rc))
6626 goto out;
6627
6628 /* Check access permissions as defined in the extent descriptor. */
6629 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6630 {
6631 rc = VERR_VD_VMDK_INVALID_STATE;
6632 goto out;
6633 }
6634
6635 /* Handle the write according to the current extent type. */
6636 switch (pExtent->enmType)
6637 {
6638 case VMDKETYPE_HOSTED_SPARSE:
6639#ifdef VBOX_WITH_VMDK_ESX
6640 case VMDKETYPE_ESX_SPARSE:
6641#endif /* VBOX_WITH_VMDK_ESX */
6642 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent, uSectorExtentRel,
6643 &uSectorExtentAbs);
6644 if (RT_FAILURE(rc))
6645 goto out;
6646 /* Clip write range to at most the rest of the grain. */
6647 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6648 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6649 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
6650 {
6651 rc = VERR_VD_VMDK_INVALID_WRITE;
6652 goto out;
6653 }
6654 if (uSectorExtentAbs == 0)
6655 {
6656 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6657 {
6658 /* Full block write to a previously unallocated block.
6659 * Check if the caller wants to avoid the automatic alloc. */
6660 if (!(fWrite & VD_WRITE_NO_ALLOC))
6661 {
6662 /* Allocate GT and find out where to store the grain. */
6663 rc = vmdkAllocGrainAsync(pImage, pExtent, pIoCtx,
6664 uSectorExtentRel, cbWrite);
6665 }
6666 else
6667 rc = VERR_VD_BLOCK_FREE;
6668 *pcbPreRead = 0;
6669 *pcbPostRead = 0;
6670 }
6671 else
6672 {
6673 /* Clip write range to remain in this extent. */
6674 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6675 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6676 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
6677 rc = VERR_VD_BLOCK_FREE;
6678 }
6679 }
6680 else
6681 {
6682 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
6683 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
6684 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6685 pIoCtx, cbWrite, NULL, NULL);
6686 }
6687 break;
6688 case VMDKETYPE_VMFS:
6689 case VMDKETYPE_FLAT:
6690 /* Clip write range to remain in this extent. */
6691 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6692 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
6693 VMDK_SECTOR2BYTE(uSectorExtentRel),
6694 pIoCtx, cbWrite, NULL, NULL);
6695 break;
6696 case VMDKETYPE_ZERO:
6697 /* Clip write range to remain in this extent. */
6698 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6699 break;
6700 }
6701
6702 if (pcbWriteProcess)
6703 *pcbWriteProcess = cbWrite;
6704
6705out:
6706 LogFlowFunc(("returns %Rrc\n", rc));
6707 return rc;
6708}
6709
6710/** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
6711static int vmdkAsyncFlush(void *pBackendData, PVDIOCTX pIoCtx)
6712{
6713 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6714 PVMDKEXTENT pExtent;
6715 int rc = VINF_SUCCESS;
6716
6717 for (unsigned i = 0; i < pImage->cExtents; i++)
6718 {
6719 pExtent = &pImage->pExtents[i];
6720 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6721 {
6722 switch (pExtent->enmType)
6723 {
6724 case VMDKETYPE_HOSTED_SPARSE:
6725#ifdef VBOX_WITH_VMDK_ESX
6726 case VMDKETYPE_ESX_SPARSE:
6727#endif /* VBOX_WITH_VMDK_ESX */
6728 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
6729 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6730 goto out;
6731 if (pExtent->fFooter)
6732 {
6733 uint64_t cbSize;
6734 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
6735 if (RT_FAILURE(rc))
6736 goto out;
6737 cbSize = RT_ALIGN_64(cbSize, 512);
6738 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
6739 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6740 goto out;
6741 }
6742 break;
6743 case VMDKETYPE_VMFS:
6744 case VMDKETYPE_FLAT:
6745 /* Nothing to do. */
6746 break;
6747 case VMDKETYPE_ZERO:
6748 default:
6749 AssertMsgFailed(("extent with type %d marked as dirty\n",
6750 pExtent->enmType));
6751 break;
6752 }
6753 }
6754 switch (pExtent->enmType)
6755 {
6756 case VMDKETYPE_HOSTED_SPARSE:
6757#ifdef VBOX_WITH_VMDK_ESX
6758 case VMDKETYPE_ESX_SPARSE:
6759#endif /* VBOX_WITH_VMDK_ESX */
6760 case VMDKETYPE_VMFS:
6761 case VMDKETYPE_FLAT:
6762 /** @todo implement proper path absolute check. */
6763 if ( pExtent->pFile != NULL
6764 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6765 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6766 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
6767 break;
6768 case VMDKETYPE_ZERO:
6769 /* No need to do anything for this extent. */
6770 break;
6771 default:
6772 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6773 break;
6774 }
6775 }
6776
6777out:
6778 return rc;
6779}
6780
6781
6782VBOXHDDBACKEND g_VmdkBackend =
6783{
6784 /* pszBackendName */
6785 "VMDK",
6786 /* cbSize */
6787 sizeof(VBOXHDDBACKEND),
6788 /* uBackendCaps */
6789 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6790 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
6791 | VD_CAP_VFS,
6792 /* papszFileExtensions */
6793 s_apszVmdkFileExtensions,
6794 /* paConfigInfo */
6795 NULL,
6796 /* hPlugin */
6797 NIL_RTLDRMOD,
6798 /* pfnCheckIfValid */
6799 vmdkCheckIfValid,
6800 /* pfnOpen */
6801 vmdkOpen,
6802 /* pfnCreate */
6803 vmdkCreate,
6804 /* pfnRename */
6805 vmdkRename,
6806 /* pfnClose */
6807 vmdkClose,
6808 /* pfnRead */
6809 vmdkRead,
6810 /* pfnWrite */
6811 vmdkWrite,
6812 /* pfnFlush */
6813 vmdkFlush,
6814 /* pfnGetVersion */
6815 vmdkGetVersion,
6816 /* pfnGetSize */
6817 vmdkGetSize,
6818 /* pfnGetFileSize */
6819 vmdkGetFileSize,
6820 /* pfnGetPCHSGeometry */
6821 vmdkGetPCHSGeometry,
6822 /* pfnSetPCHSGeometry */
6823 vmdkSetPCHSGeometry,
6824 /* pfnGetLCHSGeometry */
6825 vmdkGetLCHSGeometry,
6826 /* pfnSetLCHSGeometry */
6827 vmdkSetLCHSGeometry,
6828 /* pfnGetImageFlags */
6829 vmdkGetImageFlags,
6830 /* pfnGetOpenFlags */
6831 vmdkGetOpenFlags,
6832 /* pfnSetOpenFlags */
6833 vmdkSetOpenFlags,
6834 /* pfnGetComment */
6835 vmdkGetComment,
6836 /* pfnSetComment */
6837 vmdkSetComment,
6838 /* pfnGetUuid */
6839 vmdkGetUuid,
6840 /* pfnSetUuid */
6841 vmdkSetUuid,
6842 /* pfnGetModificationUuid */
6843 vmdkGetModificationUuid,
6844 /* pfnSetModificationUuid */
6845 vmdkSetModificationUuid,
6846 /* pfnGetParentUuid */
6847 vmdkGetParentUuid,
6848 /* pfnSetParentUuid */
6849 vmdkSetParentUuid,
6850 /* pfnGetParentModificationUuid */
6851 vmdkGetParentModificationUuid,
6852 /* pfnSetParentModificationUuid */
6853 vmdkSetParentModificationUuid,
6854 /* pfnDump */
6855 vmdkDump,
6856 /* pfnGetTimeStamp */
6857 NULL,
6858 /* pfnGetParentTimeStamp */
6859 NULL,
6860 /* pfnSetParentTimeStamp */
6861 NULL,
6862 /* pfnGetParentFilename */
6863 NULL,
6864 /* pfnSetParentFilename */
6865 NULL,
6866 /* pfnIsAsyncIOSupported */
6867 vmdkIsAsyncIOSupported,
6868 /* pfnAsyncRead */
6869 vmdkAsyncRead,
6870 /* pfnAsyncWrite */
6871 vmdkAsyncWrite,
6872 /* pfnAsyncFlush */
6873 vmdkAsyncFlush,
6874 /* pfnComposeLocation */
6875 genericFileComposeLocation,
6876 /* pfnComposeName */
6877 genericFileComposeName,
6878 /* pfnCompact */
6879 NULL,
6880 /* pfnResize */
6881 NULL
6882};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette