VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 32610

Last change on this file since 32610 was 32610, checked in by vboxsync, 14 years ago

Storage/VMDK: some fixes to get the special streamOptimized header 100% right

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 284.8 KB
Line 
1/* $Id: VmdkHDDCore.cpp 32610 2010-09-17 14:58:23Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/path.h>
30#include <iprt/string.h>
31#include <iprt/rand.h>
32#include <iprt/zip.h>
33
34
35/*******************************************************************************
36* Constants And Macros, Structures and Typedefs *
37*******************************************************************************/
38
39/** Maximum encoded string size (including NUL) we allow for VMDK images.
40 * Deliberately not set high to avoid running out of descriptor space. */
41#define VMDK_ENCODED_COMMENT_MAX 1024
42
43/** VMDK descriptor DDB entry for PCHS cylinders. */
44#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
45
46/** VMDK descriptor DDB entry for PCHS heads. */
47#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
48
49/** VMDK descriptor DDB entry for PCHS sectors. */
50#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
51
52/** VMDK descriptor DDB entry for LCHS cylinders. */
53#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
54
55/** VMDK descriptor DDB entry for LCHS heads. */
56#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
57
58/** VMDK descriptor DDB entry for LCHS sectors. */
59#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
60
61/** VMDK descriptor DDB entry for image UUID. */
62#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
63
64/** VMDK descriptor DDB entry for image modification UUID. */
65#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
66
67/** VMDK descriptor DDB entry for parent image UUID. */
68#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
69
70/** VMDK descriptor DDB entry for parent image modification UUID. */
71#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
72
73/** No compression for streamOptimized files. */
74#define VMDK_COMPRESSION_NONE 0
75
76/** Deflate compression for streamOptimized files. */
77#define VMDK_COMPRESSION_DEFLATE 1
78
79/** Marker that the actual GD value is stored in the footer. */
80#define VMDK_GD_AT_END 0xffffffffffffffffULL
81
82/** Marker for end-of-stream in streamOptimized images. */
83#define VMDK_MARKER_EOS 0
84
85/** Marker for grain table block in streamOptimized images. */
86#define VMDK_MARKER_GT 1
87
88/** Marker for grain directory block in streamOptimized images. */
89#define VMDK_MARKER_GD 2
90
91/** Marker for footer in streamOptimized images. */
92#define VMDK_MARKER_FOOTER 3
93
94/** Dummy marker for "don't check the marker value". */
95#define VMDK_MARKER_IGNORE 0xffffffffU
96
97/**
98 * Magic number for hosted images created by VMware Workstation 4, VMware
99 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
100 */
101#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
102
103/**
104 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
105 * this header is also used for monolithic flat images.
106 */
107#pragma pack(1)
108typedef struct SparseExtentHeader
109{
110 uint32_t magicNumber;
111 uint32_t version;
112 uint32_t flags;
113 uint64_t capacity;
114 uint64_t grainSize;
115 uint64_t descriptorOffset;
116 uint64_t descriptorSize;
117 uint32_t numGTEsPerGT;
118 uint64_t rgdOffset;
119 uint64_t gdOffset;
120 uint64_t overHead;
121 bool uncleanShutdown;
122 char singleEndLineChar;
123 char nonEndLineChar;
124 char doubleEndLineChar1;
125 char doubleEndLineChar2;
126 uint16_t compressAlgorithm;
127 uint8_t pad[433];
128} SparseExtentHeader;
129#pragma pack()
130
131/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
132 * divisible by the default grain size (64K) */
133#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
134
135/** VMDK streamOptimized file format marker. The type field may or may not
136 * be actually valid, but there's always data to read there. */
137#pragma pack(1)
138typedef struct VMDKMARKER
139{
140 uint64_t uSector;
141 uint32_t cbSize;
142 uint32_t uType;
143} VMDKMARKER, *PVMDKMARKER;
144#pragma pack()
145
146
147#ifdef VBOX_WITH_VMDK_ESX
148
149/** @todo the ESX code is not tested, not used, and lacks error messages. */
150
151/**
152 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
153 */
154#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
155
156#pragma pack(1)
157typedef struct COWDisk_Header
158{
159 uint32_t magicNumber;
160 uint32_t version;
161 uint32_t flags;
162 uint32_t numSectors;
163 uint32_t grainSize;
164 uint32_t gdOffset;
165 uint32_t numGDEntries;
166 uint32_t freeSector;
167 /* The spec incompletely documents quite a few further fields, but states
168 * that they are unused by the current format. Replace them by padding. */
169 char reserved1[1604];
170 uint32_t savedGeneration;
171 char reserved2[8];
172 uint32_t uncleanShutdown;
173 char padding[396];
174} COWDisk_Header;
175#pragma pack()
176#endif /* VBOX_WITH_VMDK_ESX */
177
178
179/** Convert sector number/size to byte offset/size. */
180#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
181
182/** Convert byte offset/size to sector number/size. */
183#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
184
185/**
186 * VMDK extent type.
187 */
188typedef enum VMDKETYPE
189{
190 /** Hosted sparse extent. */
191 VMDKETYPE_HOSTED_SPARSE = 1,
192 /** Flat extent. */
193 VMDKETYPE_FLAT,
194 /** Zero extent. */
195 VMDKETYPE_ZERO,
196 /** VMFS extent, used by ESX. */
197 VMDKETYPE_VMFS
198#ifdef VBOX_WITH_VMDK_ESX
199 ,
200 /** ESX sparse extent. */
201 VMDKETYPE_ESX_SPARSE
202#endif /* VBOX_WITH_VMDK_ESX */
203} VMDKETYPE, *PVMDKETYPE;
204
205/**
206 * VMDK access type for a extent.
207 */
208typedef enum VMDKACCESS
209{
210 /** No access allowed. */
211 VMDKACCESS_NOACCESS = 0,
212 /** Read-only access. */
213 VMDKACCESS_READONLY,
214 /** Read-write access. */
215 VMDKACCESS_READWRITE
216} VMDKACCESS, *PVMDKACCESS;
217
218/** Forward declaration for PVMDKIMAGE. */
219typedef struct VMDKIMAGE *PVMDKIMAGE;
220
221/**
222 * Extents files entry. Used for opening a particular file only once.
223 */
224typedef struct VMDKFILE
225{
226 /** Pointer to filename. Local copy. */
227 const char *pszFilename;
228 /** File open flags for consistency checking. */
229 unsigned fOpen;
230 /** Flag whether this file has been opened for async I/O. */
231 bool fAsyncIO;
232 /** Handle for sync/async file abstraction.*/
233 PVDIOSTORAGE pStorage;
234 /** Reference counter. */
235 unsigned uReferences;
236 /** Flag whether the file should be deleted on last close. */
237 bool fDelete;
238 /** Pointer to the image we belong to (for debugging purposes). */
239 PVMDKIMAGE pImage;
240 /** Pointer to next file descriptor. */
241 struct VMDKFILE *pNext;
242 /** Pointer to the previous file descriptor. */
243 struct VMDKFILE *pPrev;
244} VMDKFILE, *PVMDKFILE;
245
246/**
247 * VMDK extent data structure.
248 */
249typedef struct VMDKEXTENT
250{
251 /** File handle. */
252 PVMDKFILE pFile;
253 /** Base name of the image extent. */
254 const char *pszBasename;
255 /** Full name of the image extent. */
256 const char *pszFullname;
257 /** Number of sectors in this extent. */
258 uint64_t cSectors;
259 /** Number of sectors per block (grain in VMDK speak). */
260 uint64_t cSectorsPerGrain;
261 /** Starting sector number of descriptor. */
262 uint64_t uDescriptorSector;
263 /** Size of descriptor in sectors. */
264 uint64_t cDescriptorSectors;
265 /** Starting sector number of grain directory. */
266 uint64_t uSectorGD;
267 /** Starting sector number of redundant grain directory. */
268 uint64_t uSectorRGD;
269 /** Total number of metadata sectors. */
270 uint64_t cOverheadSectors;
271 /** Nominal size (i.e. as described by the descriptor) of this extent. */
272 uint64_t cNominalSectors;
273 /** Sector offset (i.e. as described by the descriptor) of this extent. */
274 uint64_t uSectorOffset;
275 /** Number of entries in a grain table. */
276 uint32_t cGTEntries;
277 /** Number of sectors reachable via a grain directory entry. */
278 uint32_t cSectorsPerGDE;
279 /** Number of entries in the grain directory. */
280 uint32_t cGDEntries;
281 /** Pointer to the next free sector. Legacy information. Do not use. */
282 uint32_t uFreeSector;
283 /** Number of this extent in the list of images. */
284 uint32_t uExtent;
285 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
286 char *pDescData;
287 /** Pointer to the grain directory. */
288 uint32_t *pGD;
289 /** Pointer to the redundant grain directory. */
290 uint32_t *pRGD;
291 /** VMDK version of this extent. 1=1.0/1.1 */
292 uint32_t uVersion;
293 /** Type of this extent. */
294 VMDKETYPE enmType;
295 /** Access to this extent. */
296 VMDKACCESS enmAccess;
297 /** Flag whether this extent is marked as unclean. */
298 bool fUncleanShutdown;
299 /** Flag whether the metadata in the extent header needs to be updated. */
300 bool fMetaDirty;
301 /** Flag whether there is a footer in this extent. */
302 bool fFooter;
303 /** Compression type for this extent. */
304 uint16_t uCompression;
305 /** Last grain which has been written to. Only for streamOptimized extents. */
306 uint32_t uLastGrainWritten;
307 /** Sector number of last grain which has been written to. Only for
308 * streamOptimized extents. */
309 uint32_t uLastGrainSector;
310 /** Data size of last grain which has been written to. Only for
311 * streamOptimized extents. */
312 uint32_t cbLastGrainWritten;
313 /** Starting sector of the decompressed grain buffer. */
314 uint32_t uGrainSector;
315 /** Decompressed grain buffer for streamOptimized extents. */
316 void *pvGrain;
317 /** Reference to the image in which this extent is used. Do not use this
318 * on a regular basis to avoid passing pImage references to functions
319 * explicitly. */
320 struct VMDKIMAGE *pImage;
321} VMDKEXTENT, *PVMDKEXTENT;
322
323/**
324 * Grain table cache size. Allocated per image.
325 */
326#define VMDK_GT_CACHE_SIZE 256
327
328/**
329 * Grain table block size. Smaller than an actual grain table block to allow
330 * more grain table blocks to be cached without having to allocate excessive
331 * amounts of memory for the cache.
332 */
333#define VMDK_GT_CACHELINE_SIZE 128
334
335
336/**
337 * Maximum number of lines in a descriptor file. Not worth the effort of
338 * making it variable. Descriptor files are generally very short (~20 lines),
339 * with the exception of sparse files split in 2G chunks, which need for the
340 * maximum size (almost 2T) exactly 1025 lines for the disk database.
341 */
342#define VMDK_DESCRIPTOR_LINES_MAX 1100U
343
344/**
345 * Parsed descriptor information. Allows easy access and update of the
346 * descriptor (whether separate file or not). Free form text files suck.
347 */
348typedef struct VMDKDESCRIPTOR
349{
350 /** Line number of first entry of the disk descriptor. */
351 unsigned uFirstDesc;
352 /** Line number of first entry in the extent description. */
353 unsigned uFirstExtent;
354 /** Line number of first disk database entry. */
355 unsigned uFirstDDB;
356 /** Total number of lines. */
357 unsigned cLines;
358 /** Total amount of memory available for the descriptor. */
359 size_t cbDescAlloc;
360 /** Set if descriptor has been changed and not yet written to disk. */
361 bool fDirty;
362 /** Array of pointers to the data in the descriptor. */
363 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
364 /** Array of line indices pointing to the next non-comment line. */
365 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
366} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
367
368
369/**
370 * Cache entry for translating extent/sector to a sector number in that
371 * extent.
372 */
373typedef struct VMDKGTCACHEENTRY
374{
375 /** Extent number for which this entry is valid. */
376 uint32_t uExtent;
377 /** GT data block number. */
378 uint64_t uGTBlock;
379 /** Data part of the cache entry. */
380 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
381} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
382
383/**
384 * Cache data structure for blocks of grain table entries. For now this is a
385 * fixed size direct mapping cache, but this should be adapted to the size of
386 * the sparse image and maybe converted to a set-associative cache. The
387 * implementation below implements a write-through cache with write allocate.
388 */
389typedef struct VMDKGTCACHE
390{
391 /** Cache entries. */
392 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
393 /** Number of cache entries (currently unused). */
394 unsigned cEntries;
395} VMDKGTCACHE, *PVMDKGTCACHE;
396
397/**
398 * Complete VMDK image data structure. Mainly a collection of extents and a few
399 * extra global data fields.
400 */
401typedef struct VMDKIMAGE
402{
403 /** Image name. */
404 const char *pszFilename;
405 /** Descriptor file if applicable. */
406 PVMDKFILE pFile;
407 /** I/O interface. */
408 PVDINTERFACE pInterfaceIO;
409 /** I/O interface callbacks. */
410 PVDINTERFACEIOINT pInterfaceIOCallbacks;
411
412 /** Pointer to the per-disk VD interface list. */
413 PVDINTERFACE pVDIfsDisk;
414 /** Pointer to the per-image VD interface list. */
415 PVDINTERFACE pVDIfsImage;
416
417 /** Error interface. */
418 PVDINTERFACE pInterfaceError;
419 /** Error interface callbacks. */
420 PVDINTERFACEERROR pInterfaceErrorCallbacks;
421
422 /** Pointer to the image extents. */
423 PVMDKEXTENT pExtents;
424 /** Number of image extents. */
425 unsigned cExtents;
426 /** Pointer to the files list, for opening a file referenced multiple
427 * times only once (happens mainly with raw partition access). */
428 PVMDKFILE pFiles;
429
430 /**
431 * Pointer to an array of segment entries for async I/O.
432 * This is an optimization because the task number to submit is not known
433 * and allocating/freeing an array in the read/write functions every time
434 * is too expensive.
435 */
436 PPDMDATASEG paSegments;
437 /** Entries available in the segments array. */
438 unsigned cSegments;
439
440 /** Open flags passed by VBoxHD layer. */
441 unsigned uOpenFlags;
442 /** Image flags defined during creation or determined during open. */
443 unsigned uImageFlags;
444 /** Total size of the image. */
445 uint64_t cbSize;
446 /** Physical geometry of this image. */
447 VDGEOMETRY PCHSGeometry;
448 /** Logical geometry of this image. */
449 VDGEOMETRY LCHSGeometry;
450 /** Image UUID. */
451 RTUUID ImageUuid;
452 /** Image modification UUID. */
453 RTUUID ModificationUuid;
454 /** Parent image UUID. */
455 RTUUID ParentUuid;
456 /** Parent image modification UUID. */
457 RTUUID ParentModificationUuid;
458
459 /** Pointer to grain table cache, if this image contains sparse extents. */
460 PVMDKGTCACHE pGTCache;
461 /** Pointer to the descriptor (NULL if no separate descriptor file). */
462 char *pDescData;
463 /** Allocation size of the descriptor file. */
464 size_t cbDescAlloc;
465 /** Parsed descriptor file content. */
466 VMDKDESCRIPTOR Descriptor;
467} VMDKIMAGE;
468
469
470/** State for the input callout of the inflate reader. */
471typedef struct VMDKINFLATESTATE
472{
473 /* Image this operation relates to. */
474 PVMDKIMAGE pImage;
475 /* File where the data is stored. */
476 PVMDKFILE pFile;
477 /* Total size of the data to read. */
478 size_t cbSize;
479 /* Offset in the file to read. */
480 uint64_t uFileOffset;
481 /* Current read position. */
482 ssize_t iOffset;
483} VMDKINFLATESTATE;
484
485/** State for the output callout of the deflate writer. */
486typedef struct VMDKDEFLATESTATE
487{
488 /* Image this operation relates to. */
489 PVMDKIMAGE pImage;
490 /* File where the data is to be stored. */
491 PVMDKFILE pFile;
492 /* Offset in the file to write at. */
493 uint64_t uFileOffset;
494 /* Current write position. */
495 ssize_t iOffset;
496} VMDKDEFLATESTATE;
497
498/** Tracks async grain allocation. */
499typedef struct VMDKGRAINALLOCASYNC
500{
501 /** Old size of the extent. Used for rollback after an error. */
502 uint64_t cbExtentOld;
503 /** Flag whether the allocation failed. */
504 bool fIoErr;
505 /** Current number of transfers pending.
506 * If reached 0 and there is an error the old state is restored. */
507 unsigned cIoXfersPending;
508 /** Sector number */
509 uint64_t uSector;
510 /** Flag whether the grain table needs to be updated. */
511 bool fGTUpdateNeeded;
512 /** Extent the allocation happens. */
513 PVMDKEXTENT pExtent;
514 /** New size of the extent, required for the grain table update. */
515 uint64_t cbExtentSize;
516 /** Grain table sector. */
517 uint64_t uGTSector;
518 /** Backup grain table sector. */
519 uint64_t uRGTSector;
520} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
521
522/*******************************************************************************
523* Static Variables *
524*******************************************************************************/
525
526/** NULL-terminated array of supported file extensions. */
527static const char *const s_apszVmdkFileExtensions[] =
528{
529 "vmdk",
530 NULL
531};
532
533/*******************************************************************************
534* Internal Functions *
535*******************************************************************************/
536
537static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
538
539static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
540 bool fDelete);
541
542static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
543static int vmdkFlushImage(PVMDKIMAGE pImage);
544static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
545static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
546
547static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
548
549/**
550 * Internal: signal an error to the frontend.
551 */
552DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
553 const char *pszFormat, ...)
554{
555 va_list va;
556 va_start(va, pszFormat);
557 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
558 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
559 pszFormat, va);
560 va_end(va);
561 return rc;
562}
563
564/**
565 * Internal: signal an informational message to the frontend.
566 */
567DECLINLINE(int) vmdkMessage(PVMDKIMAGE pImage, const char *pszFormat, ...)
568{
569 int rc = VINF_SUCCESS;
570 va_list va;
571 va_start(va, pszFormat);
572 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
573 rc = pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser,
574 pszFormat, va);
575 va_end(va);
576 return rc;
577}
578
579/**
580 * Internal: open a file (using a file descriptor cache to ensure each file
581 * is only opened once - anything else can cause locking problems).
582 */
583static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
584 const char *pszFilename, uint32_t fOpen, bool fAsyncIO)
585{
586 int rc = VINF_SUCCESS;
587 PVMDKFILE pVmdkFile;
588
589 for (pVmdkFile = pImage->pFiles;
590 pVmdkFile != NULL;
591 pVmdkFile = pVmdkFile->pNext)
592 {
593 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
594 {
595 Assert(fOpen == pVmdkFile->fOpen);
596 pVmdkFile->uReferences++;
597
598 *ppVmdkFile = pVmdkFile;
599
600 return rc;
601 }
602 }
603
604 /* If we get here, there's no matching entry in the cache. */
605 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
606 if (!VALID_PTR(pVmdkFile))
607 {
608 *ppVmdkFile = NULL;
609 return VERR_NO_MEMORY;
610 }
611
612 pVmdkFile->pszFilename = RTStrDup(pszFilename);
613 if (!VALID_PTR(pVmdkFile->pszFilename))
614 {
615 RTMemFree(pVmdkFile);
616 *ppVmdkFile = NULL;
617 return VERR_NO_MEMORY;
618 }
619 pVmdkFile->fOpen = fOpen;
620 pVmdkFile->fAsyncIO = fAsyncIO;
621
622 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
623 pszFilename, fOpen,
624 &pVmdkFile->pStorage);
625 if (RT_SUCCESS(rc))
626 {
627 pVmdkFile->uReferences = 1;
628 pVmdkFile->pImage = pImage;
629 pVmdkFile->pNext = pImage->pFiles;
630 if (pImage->pFiles)
631 pImage->pFiles->pPrev = pVmdkFile;
632 pImage->pFiles = pVmdkFile;
633 *ppVmdkFile = pVmdkFile;
634 }
635 else
636 {
637 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
638 RTMemFree(pVmdkFile);
639 *ppVmdkFile = NULL;
640 }
641
642 return rc;
643}
644
645/**
646 * Internal: close a file, updating the file descriptor cache.
647 */
648static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
649{
650 int rc = VINF_SUCCESS;
651 PVMDKFILE pVmdkFile = *ppVmdkFile;
652
653 AssertPtr(pVmdkFile);
654
655 pVmdkFile->fDelete |= fDelete;
656 Assert(pVmdkFile->uReferences);
657 pVmdkFile->uReferences--;
658 if (pVmdkFile->uReferences == 0)
659 {
660 PVMDKFILE pPrev;
661 PVMDKFILE pNext;
662
663 /* Unchain the element from the list. */
664 pPrev = pVmdkFile->pPrev;
665 pNext = pVmdkFile->pNext;
666
667 if (pNext)
668 pNext->pPrev = pPrev;
669 if (pPrev)
670 pPrev->pNext = pNext;
671 else
672 pImage->pFiles = pNext;
673
674 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
675 pVmdkFile->pStorage);
676 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
677 rc = pImage->pInterfaceIOCallbacks->pfnDelete(pImage->pInterfaceIO->pvUser,
678 pVmdkFile->pszFilename);
679 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
680 RTMemFree(pVmdkFile);
681 }
682
683 *ppVmdkFile = NULL;
684 return rc;
685}
686
687/**
688 * Internal: rename a file (sync)
689 */
690DECLINLINE(int) vmdkFileMove(PVMDKIMAGE pImage, const char *pszSrc,
691 const char *pszDst, unsigned fMove)
692{
693 return pImage->pInterfaceIOCallbacks->pfnMove(pImage->pInterfaceIO->pvUser,
694 pszSrc, pszDst, fMove);
695}
696
697/**
698 * Internal: get the size of a file (sync/async)
699 */
700DECLINLINE(int) vmdkFileGetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
701 uint64_t *pcbSize)
702{
703 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
704 pVmdkFile->pStorage,
705 pcbSize);
706}
707
708/**
709 * Internal: set the size of a file (sync/async)
710 */
711DECLINLINE(int) vmdkFileSetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
712 uint64_t cbSize)
713{
714 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
715 pVmdkFile->pStorage,
716 cbSize);
717}
718
719/**
720 * Internal: read from a file (sync)
721 */
722DECLINLINE(int) vmdkFileReadSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
723 uint64_t uOffset, void *pvBuf,
724 size_t cbToRead, size_t *pcbRead)
725{
726 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
727 pVmdkFile->pStorage, uOffset,
728 pvBuf, cbToRead, pcbRead);
729}
730
731/**
732 * Internal: write to a file (sync)
733 */
734DECLINLINE(int) vmdkFileWriteSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
735 uint64_t uOffset, const void *pvBuf,
736 size_t cbToWrite, size_t *pcbWritten)
737{
738 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
739 pVmdkFile->pStorage, uOffset,
740 pvBuf, cbToWrite, pcbWritten);
741}
742
743/**
744 * Internal: flush a file (sync)
745 */
746DECLINLINE(int) vmdkFileFlush(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile)
747{
748 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
749 pVmdkFile->pStorage);
750}
751
752/**
753 * Internal: read user data (async)
754 */
755DECLINLINE(int) vmdkFileReadUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
756 uint64_t uOffset, PVDIOCTX pIoCtx,
757 size_t cbRead)
758{
759 return pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
760 pVmdkFile->pStorage,
761 uOffset, pIoCtx,
762 cbRead);
763}
764
765/**
766 * Internal: write user data (async)
767 */
768DECLINLINE(int) vmdkFileWriteUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
769 uint64_t uOffset, PVDIOCTX pIoCtx,
770 size_t cbWrite,
771 PFNVDXFERCOMPLETED pfnComplete,
772 void *pvCompleteUser)
773{
774 return pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
775 pVmdkFile->pStorage,
776 uOffset, pIoCtx,
777 cbWrite,
778 pfnComplete,
779 pvCompleteUser);
780}
781
782/**
783 * Internal: read metadata (async)
784 */
785DECLINLINE(int) vmdkFileReadMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
786 uint64_t uOffset, void *pvBuffer,
787 size_t cbBuffer, PVDIOCTX pIoCtx,
788 PPVDMETAXFER ppMetaXfer,
789 PFNVDXFERCOMPLETED pfnComplete,
790 void *pvCompleteUser)
791{
792 return pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pImage->pInterfaceIO->pvUser,
793 pVmdkFile->pStorage,
794 uOffset, pvBuffer,
795 cbBuffer, pIoCtx,
796 ppMetaXfer,
797 pfnComplete,
798 pvCompleteUser);
799}
800
801/**
802 * Internal: write metadata (async)
803 */
804DECLINLINE(int) vmdkFileWriteMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
805 uint64_t uOffset, void *pvBuffer,
806 size_t cbBuffer, PVDIOCTX pIoCtx,
807 PFNVDXFERCOMPLETED pfnComplete,
808 void *pvCompleteUser)
809{
810 return pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pImage->pInterfaceIO->pvUser,
811 pVmdkFile->pStorage,
812 uOffset, pvBuffer,
813 cbBuffer, pIoCtx,
814 pfnComplete,
815 pvCompleteUser);
816}
817
818/**
819 * Internal: releases a metadata transfer handle (async)
820 */
821DECLINLINE(void) vmdkFileMetaXferRelease(PVMDKIMAGE pImage, PVDMETAXFER pMetaXfer)
822{
823 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pImage->pInterfaceIO->pvUser,
824 pMetaXfer);
825}
826
827/**
828 * Internal: flush a file (async)
829 */
830DECLINLINE(int) vmdkFileFlushAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
831 PVDIOCTX pIoCtx)
832{
833 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
834 pVmdkFile->pStorage, pIoCtx,
835 NULL, NULL);
836}
837
838/**
839 * Internal: sets the buffer to a specific byte (async)
840 */
841DECLINLINE(int) vmdkFileIoCtxSet(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
842 int ch, size_t cbSet)
843{
844 return pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
845 pIoCtx, ch, cbSet);
846}
847
848
849static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
850{
851 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
852
853 Assert(cbBuf);
854 if (pInflateState->iOffset < 0)
855 {
856 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
857 if (pcbBuf)
858 *pcbBuf = 1;
859 pInflateState->iOffset = 0;
860 return VINF_SUCCESS;
861 }
862 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
863 int rc = vmdkFileReadSync(pInflateState->pImage, pInflateState->pFile,
864 pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
865 if (RT_FAILURE(rc))
866 return rc;
867 pInflateState->uFileOffset += cbBuf;
868 pInflateState->iOffset += cbBuf;
869 pInflateState->cbSize -= cbBuf;
870 Assert(pcbBuf);
871 *pcbBuf = cbBuf;
872 return VINF_SUCCESS;
873}
874
875/**
876 * Internal: read from a file and inflate the compressed data,
877 * distinguishing between async and normal operation
878 */
879DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
880 uint64_t uOffset, void *pvBuf,
881 size_t cbToRead, unsigned uMarker,
882 uint64_t *puLBA, uint32_t *pcbMarkerData)
883{
884 if (pVmdkFile->fAsyncIO)
885 {
886 AssertMsgFailed(("TODO\n"));
887 return VERR_NOT_SUPPORTED;
888 }
889 else
890 {
891 int rc;
892 PRTZIPDECOMP pZip = NULL;
893 VMDKMARKER Marker;
894 uint64_t uCompOffset, cbComp;
895 VMDKINFLATESTATE InflateState;
896 size_t cbActuallyRead;
897 size_t cbMarker = sizeof(Marker);
898
899 if (uMarker == VMDK_MARKER_IGNORE)
900 cbMarker -= sizeof(Marker.uType);
901 rc = vmdkFileReadSync(pImage, pVmdkFile, uOffset, &Marker, cbMarker, NULL);
902 if (RT_FAILURE(rc))
903 return rc;
904 Marker.uSector = RT_LE2H_U64(Marker.uSector);
905 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
906 if ( uMarker != VMDK_MARKER_IGNORE
907 && ( RT_LE2H_U32(Marker.uType) != uMarker
908 || Marker.cbSize != 0))
909 return VERR_VD_VMDK_INVALID_FORMAT;
910 if (Marker.cbSize != 0)
911 {
912 /* Compressed grain marker. Data follows immediately. */
913 uCompOffset = uOffset + 12;
914 cbComp = Marker.cbSize;
915 if (puLBA)
916 *puLBA = Marker.uSector;
917 if (pcbMarkerData)
918 *pcbMarkerData = cbComp + 12;
919 }
920 else
921 {
922 Marker.uType = RT_LE2H_U32(Marker.uType);
923 if (Marker.uType == VMDK_MARKER_EOS)
924 {
925 Assert(uMarker != VMDK_MARKER_EOS);
926 return VERR_VD_VMDK_INVALID_FORMAT;
927 }
928 else if ( Marker.uType == VMDK_MARKER_GT
929 || Marker.uType == VMDK_MARKER_GD
930 || Marker.uType == VMDK_MARKER_FOOTER)
931 {
932 uCompOffset = uOffset + 512;
933 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
934 if (pcbMarkerData)
935 *pcbMarkerData = cbComp + 512;
936 }
937 else
938 {
939 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
940 return VERR_VD_VMDK_INVALID_FORMAT;
941 }
942 }
943 InflateState.pImage = pImage;
944 InflateState.pFile = pVmdkFile;
945 InflateState.cbSize = cbComp;
946 InflateState.uFileOffset = uCompOffset;
947 InflateState.iOffset = -1;
948 /* Sanity check - the expansion ratio should be much less than 2. */
949 Assert(cbComp < 2 * cbToRead);
950 if (cbComp >= 2 * cbToRead)
951 return VERR_VD_VMDK_INVALID_FORMAT;
952
953 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
954 if (RT_FAILURE(rc))
955 return rc;
956 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
957 RTZipDecompDestroy(pZip);
958 if (RT_FAILURE(rc))
959 return rc;
960 if (cbActuallyRead != cbToRead)
961 rc = VERR_VD_VMDK_INVALID_FORMAT;
962 return rc;
963 }
964}
965
966static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
967{
968 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
969
970 Assert(cbBuf);
971 if (pDeflateState->iOffset < 0)
972 {
973 pvBuf = (const uint8_t *)pvBuf + 1;
974 cbBuf--;
975 pDeflateState->iOffset = 0;
976 }
977 if (!cbBuf)
978 return VINF_SUCCESS;
979 int rc = vmdkFileWriteSync(pDeflateState->pImage, pDeflateState->pFile,
980 pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
981 if (RT_FAILURE(rc))
982 return rc;
983 pDeflateState->uFileOffset += cbBuf;
984 pDeflateState->iOffset += cbBuf;
985 return VINF_SUCCESS;
986}
987
988/**
989 * Internal: deflate the uncompressed data and write to a file,
990 * distinguishing between async and normal operation
991 */
992DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
993 uint64_t uOffset, const void *pvBuf,
994 size_t cbToWrite, unsigned uMarker,
995 uint64_t uLBA, uint32_t *pcbMarkerData)
996{
997 if (pVmdkFile->fAsyncIO)
998 {
999 AssertMsgFailed(("TODO\n"));
1000 return VERR_NOT_SUPPORTED;
1001 }
1002 else
1003 {
1004 int rc;
1005 PRTZIPCOMP pZip = NULL;
1006 VMDKMARKER Marker;
1007 uint64_t uCompOffset, cbDecomp;
1008 VMDKDEFLATESTATE DeflateState;
1009
1010 Marker.uSector = RT_H2LE_U64(uLBA);
1011 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
1012 if (uMarker == VMDK_MARKER_IGNORE)
1013 {
1014 /* Compressed grain marker. Data follows immediately. */
1015 uCompOffset = uOffset + 12;
1016 cbDecomp = cbToWrite;
1017 }
1018 else
1019 {
1020 /** @todo implement creating the other marker types */
1021 return VERR_NOT_IMPLEMENTED;
1022 }
1023 DeflateState.pImage = pImage;
1024 DeflateState.pFile = pVmdkFile;
1025 DeflateState.uFileOffset = uCompOffset;
1026 DeflateState.iOffset = -1;
1027
1028 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
1029 if (RT_FAILURE(rc))
1030 return rc;
1031 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
1032 if (RT_SUCCESS(rc))
1033 rc = RTZipCompFinish(pZip);
1034 RTZipCompDestroy(pZip);
1035 if (RT_SUCCESS(rc))
1036 {
1037 if (pcbMarkerData)
1038 *pcbMarkerData = 12 + DeflateState.iOffset;
1039 /* Set the file size to remove old garbage in case the block is
1040 * rewritten. Cannot cause data loss as the code calling this
1041 * guarantees that data gets only appended. */
1042 Assert(DeflateState.uFileOffset > uCompOffset);
1043
1044 /*
1045 * Change the file size only if the size really changed,
1046 * because this is very expensive on some filesystems
1047 * like XFS.
1048 */
1049 uint64_t cbOld;
1050 rc = vmdkFileGetSize(pImage, pVmdkFile, &cbOld);
1051 if (RT_FAILURE(rc))
1052 return rc;
1053
1054 if (cbOld != DeflateState.uFileOffset)
1055 rc = vmdkFileSetSize(pImage, pVmdkFile, DeflateState.uFileOffset);
1056
1057 if (uMarker == VMDK_MARKER_IGNORE)
1058 {
1059 /* Compressed grain marker. */
1060 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
1061 rc = vmdkFileWriteSync(pImage, pVmdkFile, uOffset, &Marker, 12, NULL);
1062 if (RT_FAILURE(rc))
1063 return rc;
1064 }
1065 else
1066 {
1067 /** @todo implement creating the other marker types */
1068 return VERR_NOT_IMPLEMENTED;
1069 }
1070 }
1071 return rc;
1072 }
1073}
1074
1075/**
1076 * Internal: check if all files are closed, prevent leaking resources.
1077 */
1078static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1079{
1080 int rc = VINF_SUCCESS, rc2;
1081 PVMDKFILE pVmdkFile;
1082
1083 Assert(pImage->pFiles == NULL);
1084 for (pVmdkFile = pImage->pFiles;
1085 pVmdkFile != NULL;
1086 pVmdkFile = pVmdkFile->pNext)
1087 {
1088 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1089 pVmdkFile->pszFilename));
1090 pImage->pFiles = pVmdkFile->pNext;
1091
1092 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1093
1094 if (RT_SUCCESS(rc))
1095 rc = rc2;
1096 }
1097 return rc;
1098}
1099
1100/**
1101 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1102 * critical non-ASCII characters.
1103 */
1104static char *vmdkEncodeString(const char *psz)
1105{
1106 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1107 char *pszDst = szEnc;
1108
1109 AssertPtr(psz);
1110
1111 for (; *psz; psz = RTStrNextCp(psz))
1112 {
1113 char *pszDstPrev = pszDst;
1114 RTUNICP Cp = RTStrGetCp(psz);
1115 if (Cp == '\\')
1116 {
1117 pszDst = RTStrPutCp(pszDst, Cp);
1118 pszDst = RTStrPutCp(pszDst, Cp);
1119 }
1120 else if (Cp == '\n')
1121 {
1122 pszDst = RTStrPutCp(pszDst, '\\');
1123 pszDst = RTStrPutCp(pszDst, 'n');
1124 }
1125 else if (Cp == '\r')
1126 {
1127 pszDst = RTStrPutCp(pszDst, '\\');
1128 pszDst = RTStrPutCp(pszDst, 'r');
1129 }
1130 else
1131 pszDst = RTStrPutCp(pszDst, Cp);
1132 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1133 {
1134 pszDst = pszDstPrev;
1135 break;
1136 }
1137 }
1138 *pszDst = '\0';
1139 return RTStrDup(szEnc);
1140}
1141
1142/**
1143 * Internal: decode a string and store it into the specified string.
1144 */
1145static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1146{
1147 int rc = VINF_SUCCESS;
1148 char szBuf[4];
1149
1150 if (!cb)
1151 return VERR_BUFFER_OVERFLOW;
1152
1153 AssertPtr(psz);
1154
1155 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1156 {
1157 char *pszDst = szBuf;
1158 RTUNICP Cp = RTStrGetCp(pszEncoded);
1159 if (Cp == '\\')
1160 {
1161 pszEncoded = RTStrNextCp(pszEncoded);
1162 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1163 if (CpQ == 'n')
1164 RTStrPutCp(pszDst, '\n');
1165 else if (CpQ == 'r')
1166 RTStrPutCp(pszDst, '\r');
1167 else if (CpQ == '\0')
1168 {
1169 rc = VERR_VD_VMDK_INVALID_HEADER;
1170 break;
1171 }
1172 else
1173 RTStrPutCp(pszDst, CpQ);
1174 }
1175 else
1176 pszDst = RTStrPutCp(pszDst, Cp);
1177
1178 /* Need to leave space for terminating NUL. */
1179 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1180 {
1181 rc = VERR_BUFFER_OVERFLOW;
1182 break;
1183 }
1184 memcpy(psz, szBuf, pszDst - szBuf);
1185 psz += pszDst - szBuf;
1186 }
1187 *psz = '\0';
1188 return rc;
1189}
1190
1191static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1192{
1193 int rc = VINF_SUCCESS;
1194 unsigned i;
1195 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1196 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1197
1198 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1199 goto out;
1200
1201 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1202 if (!pGD)
1203 {
1204 rc = VERR_NO_MEMORY;
1205 goto out;
1206 }
1207 pExtent->pGD = pGD;
1208 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1209 * but in reality they are not compressed. */
1210 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1211 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1212 pGD, cbGD, NULL);
1213 AssertRC(rc);
1214 if (RT_FAILURE(rc))
1215 {
1216 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1217 goto out;
1218 }
1219 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1220 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1221
1222 if (pExtent->uSectorRGD)
1223 {
1224 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1225 if (!pRGD)
1226 {
1227 rc = VERR_NO_MEMORY;
1228 goto out;
1229 }
1230 pExtent->pRGD = pRGD;
1231 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1232 * but in reality they are not compressed. */
1233 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1234 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1235 pRGD, cbGD, NULL);
1236 AssertRC(rc);
1237 if (RT_FAILURE(rc))
1238 {
1239 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1240 goto out;
1241 }
1242 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1243 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1244
1245 /* Check grain table and redundant grain table for consistency. */
1246 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1247 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1248 if (!pTmpGT1)
1249 {
1250 rc = VERR_NO_MEMORY;
1251 goto out;
1252 }
1253 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1254 if (!pTmpGT2)
1255 {
1256 RTMemTmpFree(pTmpGT1);
1257 rc = VERR_NO_MEMORY;
1258 goto out;
1259 }
1260
1261 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1262 i < pExtent->cGDEntries;
1263 i++, pGDTmp++, pRGDTmp++)
1264 {
1265 /* If no grain table is allocated skip the entry. */
1266 if (*pGDTmp == 0 && *pRGDTmp == 0)
1267 continue;
1268
1269 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1270 {
1271 /* Just one grain directory entry refers to a not yet allocated
1272 * grain table or both grain directory copies refer to the same
1273 * grain table. Not allowed. */
1274 RTMemTmpFree(pTmpGT1);
1275 RTMemTmpFree(pTmpGT2);
1276 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1277 goto out;
1278 }
1279 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1280 * but in reality they are not compressed. */
1281 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1282 VMDK_SECTOR2BYTE(*pGDTmp),
1283 pTmpGT1, cbGT, NULL);
1284 if (RT_FAILURE(rc))
1285 {
1286 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1287 RTMemTmpFree(pTmpGT1);
1288 RTMemTmpFree(pTmpGT2);
1289 goto out;
1290 }
1291 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1292 * but in reality they are not compressed. */
1293 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1294 VMDK_SECTOR2BYTE(*pRGDTmp),
1295 pTmpGT2, cbGT, NULL);
1296 if (RT_FAILURE(rc))
1297 {
1298 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1299 RTMemTmpFree(pTmpGT1);
1300 RTMemTmpFree(pTmpGT2);
1301 goto out;
1302 }
1303 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1304 {
1305 RTMemTmpFree(pTmpGT1);
1306 RTMemTmpFree(pTmpGT2);
1307 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1308 goto out;
1309 }
1310 }
1311
1312 /** @todo figure out what to do for unclean VMDKs. */
1313 RTMemTmpFree(pTmpGT1);
1314 RTMemTmpFree(pTmpGT2);
1315 }
1316
1317 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1318 {
1319 uint32_t uLastGrainWritten = 0;
1320 uint32_t uLastGrainSector = 0;
1321 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1322 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1323 if (!pTmpGT)
1324 {
1325 rc = VERR_NO_MEMORY;
1326 goto out;
1327 }
1328 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1329 {
1330 /* If no grain table is allocated skip the entry. */
1331 if (*pGDTmp == 0)
1332 continue;
1333
1334 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1335 * but in reality they are not compressed. */
1336 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1337 VMDK_SECTOR2BYTE(*pGDTmp),
1338 pTmpGT, cbGT, NULL);
1339 if (RT_FAILURE(rc))
1340 {
1341 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1342 RTMemTmpFree(pTmpGT);
1343 goto out;
1344 }
1345 uint32_t j;
1346 uint32_t *pGTTmp;
1347 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1348 {
1349 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1350
1351 /* If no grain is allocated skip the entry. */
1352 if (uGTTmp == 0)
1353 continue;
1354
1355 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1356 {
1357 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1358 RTMemTmpFree(pTmpGT);
1359 goto out;
1360 }
1361 uLastGrainSector = uGTTmp;
1362 uLastGrainWritten = i * pExtent->cGTEntries + j;
1363 }
1364 }
1365 RTMemTmpFree(pTmpGT);
1366
1367 /* streamOptimized extents need a grain decompress buffer. */
1368 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1369 if (!pExtent->pvGrain)
1370 {
1371 rc = VERR_NO_MEMORY;
1372 goto out;
1373 }
1374
1375 if (uLastGrainSector)
1376 {
1377 uint64_t uLBA = 0;
1378 uint32_t cbMarker = 0;
1379 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1380 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1381 if (RT_FAILURE(rc))
1382 goto out;
1383
1384 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1385 pExtent->uGrainSector = uLastGrainSector;
1386 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1387 }
1388 pExtent->uLastGrainWritten = uLastGrainWritten;
1389 pExtent->uLastGrainSector = uLastGrainSector;
1390 }
1391
1392out:
1393 if (RT_FAILURE(rc))
1394 vmdkFreeGrainDirectory(pExtent);
1395 return rc;
1396}
1397
1398static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1399 uint64_t uStartSector, bool fPreAlloc)
1400{
1401 int rc = VINF_SUCCESS;
1402 unsigned i;
1403 uint32_t *pGD = NULL, *pRGD = NULL;
1404 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1405 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1406 size_t cbGTRounded;
1407 uint64_t cbOverhead;
1408
1409 if (fPreAlloc)
1410 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1411 else
1412 cbGTRounded = 0;
1413
1414 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1415 if (!pGD)
1416 {
1417 rc = VERR_NO_MEMORY;
1418 goto out;
1419 }
1420 pExtent->pGD = pGD;
1421 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
1422 {
1423 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1424 if (!pRGD)
1425 {
1426 rc = VERR_NO_MEMORY;
1427 goto out;
1428 }
1429 pExtent->pRGD = pRGD;
1430 }
1431 else
1432 pExtent->pRGD = NULL;
1433
1434 if (uStartSector != VMDK_GD_AT_END)
1435 {
1436 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1437 /* For streamOptimized extents there is only one grain directory,
1438 * and for all others take redundant grain directory into account. */
1439 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1440 {
1441 cbOverhead = RT_ALIGN_64(cbOverhead,
1442 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1443 if (pExtent->fFooter)
1444 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead);
1445 else
1446 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead + 512);
1447 }
1448 else
1449 {
1450 cbOverhead += cbGDRounded + cbGTRounded;
1451 cbOverhead = RT_ALIGN_64(cbOverhead,
1452 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1453 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead);
1454 }
1455 if (RT_FAILURE(rc))
1456 goto out;
1457 pExtent->uSectorRGD = uStartSector;
1458 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1459 }
1460 else
1461 {
1462 cbOverhead = 512 + pImage->cbDescAlloc;
1463 pExtent->uSectorGD = uStartSector;
1464 }
1465
1466 if (fPreAlloc)
1467 {
1468 uint32_t uGTSectorLE;
1469 uint64_t uOffsetSectors;
1470
1471 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1472 for (i = 0; i < pExtent->cGDEntries; i++)
1473 {
1474 pRGD[i] = uOffsetSectors;
1475 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1476 /* Write the redundant grain directory entry to disk. */
1477 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1478 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1479 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1480 if (RT_FAILURE(rc))
1481 {
1482 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1483 goto out;
1484 }
1485 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1486 }
1487
1488 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1489 for (i = 0; i < pExtent->cGDEntries; i++)
1490 {
1491 pGD[i] = uOffsetSectors;
1492 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1493 /* Write the grain directory entry to disk. */
1494 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1495 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1496 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1497 if (RT_FAILURE(rc))
1498 {
1499 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1500 goto out;
1501 }
1502 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1503 }
1504 }
1505 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1506
1507 /* streamOptimized extents need a grain decompress buffer. */
1508 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1509 {
1510 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1511 if (!pExtent->pvGrain)
1512 {
1513 rc = VERR_NO_MEMORY;
1514 goto out;
1515 }
1516 }
1517
1518out:
1519 if (RT_FAILURE(rc))
1520 vmdkFreeGrainDirectory(pExtent);
1521 return rc;
1522}
1523
1524static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1525{
1526 if (pExtent->pGD)
1527 {
1528 RTMemFree(pExtent->pGD);
1529 pExtent->pGD = NULL;
1530 }
1531 if (pExtent->pRGD)
1532 {
1533 RTMemFree(pExtent->pRGD);
1534 pExtent->pRGD = NULL;
1535 }
1536}
1537
1538static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1539 char **ppszUnquoted, char **ppszNext)
1540{
1541 char *pszQ;
1542 char *pszUnquoted;
1543
1544 /* Skip over whitespace. */
1545 while (*pszStr == ' ' || *pszStr == '\t')
1546 pszStr++;
1547
1548 if (*pszStr != '"')
1549 {
1550 pszQ = (char *)pszStr;
1551 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1552 pszQ++;
1553 }
1554 else
1555 {
1556 pszStr++;
1557 pszQ = (char *)strchr(pszStr, '"');
1558 if (pszQ == NULL)
1559 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1560 }
1561
1562 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1563 if (!pszUnquoted)
1564 return VERR_NO_MEMORY;
1565 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1566 pszUnquoted[pszQ - pszStr] = '\0';
1567 *ppszUnquoted = pszUnquoted;
1568 if (ppszNext)
1569 *ppszNext = pszQ + 1;
1570 return VINF_SUCCESS;
1571}
1572
1573static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1574 const char *pszLine)
1575{
1576 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1577 ssize_t cbDiff = strlen(pszLine) + 1;
1578
1579 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1580 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1581 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1582
1583 memcpy(pEnd, pszLine, cbDiff);
1584 pDescriptor->cLines++;
1585 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1586 pDescriptor->fDirty = true;
1587
1588 return VINF_SUCCESS;
1589}
1590
1591static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1592 const char *pszKey, const char **ppszValue)
1593{
1594 size_t cbKey = strlen(pszKey);
1595 const char *pszValue;
1596
1597 while (uStart != 0)
1598 {
1599 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1600 {
1601 /* Key matches, check for a '=' (preceded by whitespace). */
1602 pszValue = pDescriptor->aLines[uStart] + cbKey;
1603 while (*pszValue == ' ' || *pszValue == '\t')
1604 pszValue++;
1605 if (*pszValue == '=')
1606 {
1607 *ppszValue = pszValue + 1;
1608 break;
1609 }
1610 }
1611 uStart = pDescriptor->aNextLines[uStart];
1612 }
1613 return !!uStart;
1614}
1615
1616static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1617 unsigned uStart,
1618 const char *pszKey, const char *pszValue)
1619{
1620 char *pszTmp;
1621 size_t cbKey = strlen(pszKey);
1622 unsigned uLast = 0;
1623
1624 while (uStart != 0)
1625 {
1626 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1627 {
1628 /* Key matches, check for a '=' (preceded by whitespace). */
1629 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1630 while (*pszTmp == ' ' || *pszTmp == '\t')
1631 pszTmp++;
1632 if (*pszTmp == '=')
1633 {
1634 pszTmp++;
1635 while (*pszTmp == ' ' || *pszTmp == '\t')
1636 pszTmp++;
1637 break;
1638 }
1639 }
1640 if (!pDescriptor->aNextLines[uStart])
1641 uLast = uStart;
1642 uStart = pDescriptor->aNextLines[uStart];
1643 }
1644 if (uStart)
1645 {
1646 if (pszValue)
1647 {
1648 /* Key already exists, replace existing value. */
1649 size_t cbOldVal = strlen(pszTmp);
1650 size_t cbNewVal = strlen(pszValue);
1651 ssize_t cbDiff = cbNewVal - cbOldVal;
1652 /* Check for buffer overflow. */
1653 if ( pDescriptor->aLines[pDescriptor->cLines]
1654 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1655 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1656
1657 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1658 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1659 memcpy(pszTmp, pszValue, cbNewVal + 1);
1660 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1661 pDescriptor->aLines[i] += cbDiff;
1662 }
1663 else
1664 {
1665 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1666 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1667 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1668 {
1669 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1670 if (pDescriptor->aNextLines[i])
1671 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1672 else
1673 pDescriptor->aNextLines[i-1] = 0;
1674 }
1675 pDescriptor->cLines--;
1676 /* Adjust starting line numbers of following descriptor sections. */
1677 if (uStart < pDescriptor->uFirstExtent)
1678 pDescriptor->uFirstExtent--;
1679 if (uStart < pDescriptor->uFirstDDB)
1680 pDescriptor->uFirstDDB--;
1681 }
1682 }
1683 else
1684 {
1685 /* Key doesn't exist, append after the last entry in this category. */
1686 if (!pszValue)
1687 {
1688 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1689 return VINF_SUCCESS;
1690 }
1691 cbKey = strlen(pszKey);
1692 size_t cbValue = strlen(pszValue);
1693 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1694 /* Check for buffer overflow. */
1695 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1696 || ( pDescriptor->aLines[pDescriptor->cLines]
1697 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1698 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1699 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1700 {
1701 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1702 if (pDescriptor->aNextLines[i - 1])
1703 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1704 else
1705 pDescriptor->aNextLines[i] = 0;
1706 }
1707 uStart = uLast + 1;
1708 pDescriptor->aNextLines[uLast] = uStart;
1709 pDescriptor->aNextLines[uStart] = 0;
1710 pDescriptor->cLines++;
1711 pszTmp = pDescriptor->aLines[uStart];
1712 memmove(pszTmp + cbDiff, pszTmp,
1713 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1714 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1715 pDescriptor->aLines[uStart][cbKey] = '=';
1716 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1717 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1718 pDescriptor->aLines[i] += cbDiff;
1719
1720 /* Adjust starting line numbers of following descriptor sections. */
1721 if (uStart <= pDescriptor->uFirstExtent)
1722 pDescriptor->uFirstExtent++;
1723 if (uStart <= pDescriptor->uFirstDDB)
1724 pDescriptor->uFirstDDB++;
1725 }
1726 pDescriptor->fDirty = true;
1727 return VINF_SUCCESS;
1728}
1729
1730static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1731 uint32_t *puValue)
1732{
1733 const char *pszValue;
1734
1735 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1736 &pszValue))
1737 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1738 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1739}
1740
1741static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1742 const char *pszKey, const char **ppszValue)
1743{
1744 const char *pszValue;
1745 char *pszValueUnquoted;
1746
1747 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1748 &pszValue))
1749 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1750 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1751 if (RT_FAILURE(rc))
1752 return rc;
1753 *ppszValue = pszValueUnquoted;
1754 return rc;
1755}
1756
1757static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1758 const char *pszKey, const char *pszValue)
1759{
1760 char *pszValueQuoted;
1761
1762 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1763 if (RT_FAILURE(rc))
1764 return rc;
1765 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1766 pszValueQuoted);
1767 RTStrFree(pszValueQuoted);
1768 return rc;
1769}
1770
1771static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1772 PVMDKDESCRIPTOR pDescriptor)
1773{
1774 unsigned uEntry = pDescriptor->uFirstExtent;
1775 ssize_t cbDiff;
1776
1777 if (!uEntry)
1778 return;
1779
1780 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1781 /* Move everything including \0 in the entry marking the end of buffer. */
1782 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1783 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1784 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1785 {
1786 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1787 if (pDescriptor->aNextLines[i])
1788 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1789 else
1790 pDescriptor->aNextLines[i - 1] = 0;
1791 }
1792 pDescriptor->cLines--;
1793 if (pDescriptor->uFirstDDB)
1794 pDescriptor->uFirstDDB--;
1795
1796 return;
1797}
1798
1799static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1800 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1801 VMDKETYPE enmType, const char *pszBasename,
1802 uint64_t uSectorOffset)
1803{
1804 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1805 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1806 char *pszTmp;
1807 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1808 char szExt[1024];
1809 ssize_t cbDiff;
1810
1811 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1812 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1813
1814 /* Find last entry in extent description. */
1815 while (uStart)
1816 {
1817 if (!pDescriptor->aNextLines[uStart])
1818 uLast = uStart;
1819 uStart = pDescriptor->aNextLines[uStart];
1820 }
1821
1822 if (enmType == VMDKETYPE_ZERO)
1823 {
1824 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1825 cNominalSectors, apszType[enmType]);
1826 }
1827 else if (enmType == VMDKETYPE_FLAT)
1828 {
1829 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1830 apszAccess[enmAccess], cNominalSectors,
1831 apszType[enmType], pszBasename, uSectorOffset);
1832 }
1833 else
1834 {
1835 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1836 apszAccess[enmAccess], cNominalSectors,
1837 apszType[enmType], pszBasename);
1838 }
1839 cbDiff = strlen(szExt) + 1;
1840
1841 /* Check for buffer overflow. */
1842 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1843 || ( pDescriptor->aLines[pDescriptor->cLines]
1844 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1845 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1846
1847 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1848 {
1849 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1850 if (pDescriptor->aNextLines[i - 1])
1851 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1852 else
1853 pDescriptor->aNextLines[i] = 0;
1854 }
1855 uStart = uLast + 1;
1856 pDescriptor->aNextLines[uLast] = uStart;
1857 pDescriptor->aNextLines[uStart] = 0;
1858 pDescriptor->cLines++;
1859 pszTmp = pDescriptor->aLines[uStart];
1860 memmove(pszTmp + cbDiff, pszTmp,
1861 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1862 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1863 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1864 pDescriptor->aLines[i] += cbDiff;
1865
1866 /* Adjust starting line numbers of following descriptor sections. */
1867 if (uStart <= pDescriptor->uFirstDDB)
1868 pDescriptor->uFirstDDB++;
1869
1870 pDescriptor->fDirty = true;
1871 return VINF_SUCCESS;
1872}
1873
1874static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1875 const char *pszKey, const char **ppszValue)
1876{
1877 const char *pszValue;
1878 char *pszValueUnquoted;
1879
1880 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1881 &pszValue))
1882 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1883 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1884 if (RT_FAILURE(rc))
1885 return rc;
1886 *ppszValue = pszValueUnquoted;
1887 return rc;
1888}
1889
1890static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1891 const char *pszKey, uint32_t *puValue)
1892{
1893 const char *pszValue;
1894 char *pszValueUnquoted;
1895
1896 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1897 &pszValue))
1898 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1899 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1900 if (RT_FAILURE(rc))
1901 return rc;
1902 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1903 RTMemTmpFree(pszValueUnquoted);
1904 return rc;
1905}
1906
1907static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1908 const char *pszKey, PRTUUID pUuid)
1909{
1910 const char *pszValue;
1911 char *pszValueUnquoted;
1912
1913 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1914 &pszValue))
1915 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1916 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1917 if (RT_FAILURE(rc))
1918 return rc;
1919 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1920 RTMemTmpFree(pszValueUnquoted);
1921 return rc;
1922}
1923
1924static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1925 const char *pszKey, const char *pszVal)
1926{
1927 int rc;
1928 char *pszValQuoted;
1929
1930 if (pszVal)
1931 {
1932 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1933 if (RT_FAILURE(rc))
1934 return rc;
1935 }
1936 else
1937 pszValQuoted = NULL;
1938 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1939 pszValQuoted);
1940 if (pszValQuoted)
1941 RTStrFree(pszValQuoted);
1942 return rc;
1943}
1944
1945static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1946 const char *pszKey, PCRTUUID pUuid)
1947{
1948 char *pszUuid;
1949
1950 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1951 if (RT_FAILURE(rc))
1952 return rc;
1953 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1954 pszUuid);
1955 RTStrFree(pszUuid);
1956 return rc;
1957}
1958
1959static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1960 const char *pszKey, uint32_t uValue)
1961{
1962 char *pszValue;
1963
1964 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1965 if (RT_FAILURE(rc))
1966 return rc;
1967 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1968 pszValue);
1969 RTStrFree(pszValue);
1970 return rc;
1971}
1972
1973static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1974 size_t cbDescData,
1975 PVMDKDESCRIPTOR pDescriptor)
1976{
1977 int rc = VINF_SUCCESS;
1978 unsigned cLine = 0, uLastNonEmptyLine = 0;
1979 char *pTmp = pDescData;
1980
1981 pDescriptor->cbDescAlloc = cbDescData;
1982 while (*pTmp != '\0')
1983 {
1984 pDescriptor->aLines[cLine++] = pTmp;
1985 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1986 {
1987 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1988 goto out;
1989 }
1990
1991 while (*pTmp != '\0' && *pTmp != '\n')
1992 {
1993 if (*pTmp == '\r')
1994 {
1995 if (*(pTmp + 1) != '\n')
1996 {
1997 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1998 goto out;
1999 }
2000 else
2001 {
2002 /* Get rid of CR character. */
2003 *pTmp = '\0';
2004 }
2005 }
2006 pTmp++;
2007 }
2008 /* Get rid of LF character. */
2009 if (*pTmp == '\n')
2010 {
2011 *pTmp = '\0';
2012 pTmp++;
2013 }
2014 }
2015 pDescriptor->cLines = cLine;
2016 /* Pointer right after the end of the used part of the buffer. */
2017 pDescriptor->aLines[cLine] = pTmp;
2018
2019 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2020 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
2021 {
2022 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2023 goto out;
2024 }
2025
2026 /* Initialize those, because we need to be able to reopen an image. */
2027 pDescriptor->uFirstDesc = 0;
2028 pDescriptor->uFirstExtent = 0;
2029 pDescriptor->uFirstDDB = 0;
2030 for (unsigned i = 0; i < cLine; i++)
2031 {
2032 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2033 {
2034 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2035 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2036 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2037 {
2038 /* An extent descriptor. */
2039 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2040 {
2041 /* Incorrect ordering of entries. */
2042 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2043 goto out;
2044 }
2045 if (!pDescriptor->uFirstExtent)
2046 {
2047 pDescriptor->uFirstExtent = i;
2048 uLastNonEmptyLine = 0;
2049 }
2050 }
2051 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2052 {
2053 /* A disk database entry. */
2054 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2055 {
2056 /* Incorrect ordering of entries. */
2057 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2058 goto out;
2059 }
2060 if (!pDescriptor->uFirstDDB)
2061 {
2062 pDescriptor->uFirstDDB = i;
2063 uLastNonEmptyLine = 0;
2064 }
2065 }
2066 else
2067 {
2068 /* A normal entry. */
2069 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2070 {
2071 /* Incorrect ordering of entries. */
2072 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2073 goto out;
2074 }
2075 if (!pDescriptor->uFirstDesc)
2076 {
2077 pDescriptor->uFirstDesc = i;
2078 uLastNonEmptyLine = 0;
2079 }
2080 }
2081 if (uLastNonEmptyLine)
2082 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2083 uLastNonEmptyLine = i;
2084 }
2085 }
2086
2087out:
2088 return rc;
2089}
2090
2091static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2092 PCVDGEOMETRY pPCHSGeometry)
2093{
2094 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2095 VMDK_DDB_GEO_PCHS_CYLINDERS,
2096 pPCHSGeometry->cCylinders);
2097 if (RT_FAILURE(rc))
2098 return rc;
2099 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2100 VMDK_DDB_GEO_PCHS_HEADS,
2101 pPCHSGeometry->cHeads);
2102 if (RT_FAILURE(rc))
2103 return rc;
2104 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2105 VMDK_DDB_GEO_PCHS_SECTORS,
2106 pPCHSGeometry->cSectors);
2107 return rc;
2108}
2109
2110static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2111 PCVDGEOMETRY pLCHSGeometry)
2112{
2113 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2114 VMDK_DDB_GEO_LCHS_CYLINDERS,
2115 pLCHSGeometry->cCylinders);
2116 if (RT_FAILURE(rc))
2117 return rc;
2118 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2119 VMDK_DDB_GEO_LCHS_HEADS,
2120
2121 pLCHSGeometry->cHeads);
2122 if (RT_FAILURE(rc))
2123 return rc;
2124 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2125 VMDK_DDB_GEO_LCHS_SECTORS,
2126 pLCHSGeometry->cSectors);
2127 return rc;
2128}
2129
2130static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2131 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2132{
2133 int rc;
2134
2135 pDescriptor->uFirstDesc = 0;
2136 pDescriptor->uFirstExtent = 0;
2137 pDescriptor->uFirstDDB = 0;
2138 pDescriptor->cLines = 0;
2139 pDescriptor->cbDescAlloc = cbDescData;
2140 pDescriptor->fDirty = false;
2141 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2142 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2143
2144 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2145 if (RT_FAILURE(rc))
2146 goto out;
2147 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2148 if (RT_FAILURE(rc))
2149 goto out;
2150 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2151 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2152 if (RT_FAILURE(rc))
2153 goto out;
2154 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2155 if (RT_FAILURE(rc))
2156 goto out;
2157 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2158 if (RT_FAILURE(rc))
2159 goto out;
2160 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2161 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2162 if (RT_FAILURE(rc))
2163 goto out;
2164 /* The trailing space is created by VMware, too. */
2165 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2166 if (RT_FAILURE(rc))
2167 goto out;
2168 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2169 if (RT_FAILURE(rc))
2170 goto out;
2171 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2172 if (RT_FAILURE(rc))
2173 goto out;
2174 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2175 if (RT_FAILURE(rc))
2176 goto out;
2177 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2178
2179 /* Now that the framework is in place, use the normal functions to insert
2180 * the remaining keys. */
2181 char szBuf[9];
2182 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2183 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2184 "CID", szBuf);
2185 if (RT_FAILURE(rc))
2186 goto out;
2187 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2188 "parentCID", "ffffffff");
2189 if (RT_FAILURE(rc))
2190 goto out;
2191
2192 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2193 if (RT_FAILURE(rc))
2194 goto out;
2195
2196out:
2197 return rc;
2198}
2199
2200static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2201 size_t cbDescData)
2202{
2203 int rc;
2204 unsigned cExtents;
2205 unsigned uLine;
2206 unsigned i;
2207
2208 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2209 &pImage->Descriptor);
2210 if (RT_FAILURE(rc))
2211 return rc;
2212
2213 /* Check version, must be 1. */
2214 uint32_t uVersion;
2215 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2216 if (RT_FAILURE(rc))
2217 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2218 if (uVersion != 1)
2219 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2220
2221 /* Get image creation type and determine image flags. */
2222 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2223 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2224 &pszCreateType);
2225 if (RT_FAILURE(rc))
2226 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2227 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2228 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2229 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2230 else if ( !strcmp(pszCreateType, "partitionedDevice")
2231 || !strcmp(pszCreateType, "fullDevice"))
2232 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2233 else if (!strcmp(pszCreateType, "streamOptimized"))
2234 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2235 else if (!strcmp(pszCreateType, "vmfs"))
2236 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2237 RTStrFree((char *)(void *)pszCreateType);
2238
2239 /* Count the number of extent config entries. */
2240 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2241 uLine != 0;
2242 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2243 /* nothing */;
2244
2245 if (!pImage->pDescData && cExtents != 1)
2246 {
2247 /* Monolithic image, must have only one extent (already opened). */
2248 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2249 }
2250
2251 if (pImage->pDescData)
2252 {
2253 /* Non-monolithic image, extents need to be allocated. */
2254 rc = vmdkCreateExtents(pImage, cExtents);
2255 if (RT_FAILURE(rc))
2256 return rc;
2257 }
2258
2259 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2260 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2261 {
2262 char *pszLine = pImage->Descriptor.aLines[uLine];
2263
2264 /* Access type of the extent. */
2265 if (!strncmp(pszLine, "RW", 2))
2266 {
2267 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2268 pszLine += 2;
2269 }
2270 else if (!strncmp(pszLine, "RDONLY", 6))
2271 {
2272 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2273 pszLine += 6;
2274 }
2275 else if (!strncmp(pszLine, "NOACCESS", 8))
2276 {
2277 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2278 pszLine += 8;
2279 }
2280 else
2281 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2282 if (*pszLine++ != ' ')
2283 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2284
2285 /* Nominal size of the extent. */
2286 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2287 &pImage->pExtents[i].cNominalSectors);
2288 if (RT_FAILURE(rc))
2289 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2290 if (*pszLine++ != ' ')
2291 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2292
2293 /* Type of the extent. */
2294#ifdef VBOX_WITH_VMDK_ESX
2295 /** @todo Add the ESX extent types. Not necessary for now because
2296 * the ESX extent types are only used inside an ESX server. They are
2297 * automatically converted if the VMDK is exported. */
2298#endif /* VBOX_WITH_VMDK_ESX */
2299 if (!strncmp(pszLine, "SPARSE", 6))
2300 {
2301 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2302 pszLine += 6;
2303 }
2304 else if (!strncmp(pszLine, "FLAT", 4))
2305 {
2306 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2307 pszLine += 4;
2308 }
2309 else if (!strncmp(pszLine, "ZERO", 4))
2310 {
2311 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2312 pszLine += 4;
2313 }
2314 else if (!strncmp(pszLine, "VMFS", 4))
2315 {
2316 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2317 pszLine += 4;
2318 }
2319 else
2320 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2321
2322 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2323 {
2324 /* This one has no basename or offset. */
2325 if (*pszLine == ' ')
2326 pszLine++;
2327 if (*pszLine != '\0')
2328 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2329 pImage->pExtents[i].pszBasename = NULL;
2330 }
2331 else
2332 {
2333 /* All other extent types have basename and optional offset. */
2334 if (*pszLine++ != ' ')
2335 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2336
2337 /* Basename of the image. Surrounded by quotes. */
2338 char *pszBasename;
2339 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2340 if (RT_FAILURE(rc))
2341 return rc;
2342 pImage->pExtents[i].pszBasename = pszBasename;
2343 if (*pszLine == ' ')
2344 {
2345 pszLine++;
2346 if (*pszLine != '\0')
2347 {
2348 /* Optional offset in extent specified. */
2349 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2350 &pImage->pExtents[i].uSectorOffset);
2351 if (RT_FAILURE(rc))
2352 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2353 }
2354 }
2355
2356 if (*pszLine != '\0')
2357 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2358 }
2359 }
2360
2361 /* Determine PCHS geometry (autogenerate if necessary). */
2362 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2363 VMDK_DDB_GEO_PCHS_CYLINDERS,
2364 &pImage->PCHSGeometry.cCylinders);
2365 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2366 pImage->PCHSGeometry.cCylinders = 0;
2367 else if (RT_FAILURE(rc))
2368 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2369 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2370 VMDK_DDB_GEO_PCHS_HEADS,
2371 &pImage->PCHSGeometry.cHeads);
2372 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2373 pImage->PCHSGeometry.cHeads = 0;
2374 else if (RT_FAILURE(rc))
2375 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2376 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2377 VMDK_DDB_GEO_PCHS_SECTORS,
2378 &pImage->PCHSGeometry.cSectors);
2379 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2380 pImage->PCHSGeometry.cSectors = 0;
2381 else if (RT_FAILURE(rc))
2382 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2383 if ( pImage->PCHSGeometry.cCylinders == 0
2384 || pImage->PCHSGeometry.cHeads == 0
2385 || pImage->PCHSGeometry.cHeads > 16
2386 || pImage->PCHSGeometry.cSectors == 0
2387 || pImage->PCHSGeometry.cSectors > 63)
2388 {
2389 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2390 * as the total image size isn't known yet). */
2391 pImage->PCHSGeometry.cCylinders = 0;
2392 pImage->PCHSGeometry.cHeads = 16;
2393 pImage->PCHSGeometry.cSectors = 63;
2394 }
2395
2396 /* Determine LCHS geometry (set to 0 if not specified). */
2397 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2398 VMDK_DDB_GEO_LCHS_CYLINDERS,
2399 &pImage->LCHSGeometry.cCylinders);
2400 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2401 pImage->LCHSGeometry.cCylinders = 0;
2402 else if (RT_FAILURE(rc))
2403 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2404 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2405 VMDK_DDB_GEO_LCHS_HEADS,
2406 &pImage->LCHSGeometry.cHeads);
2407 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2408 pImage->LCHSGeometry.cHeads = 0;
2409 else if (RT_FAILURE(rc))
2410 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2411 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2412 VMDK_DDB_GEO_LCHS_SECTORS,
2413 &pImage->LCHSGeometry.cSectors);
2414 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2415 pImage->LCHSGeometry.cSectors = 0;
2416 else if (RT_FAILURE(rc))
2417 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2418 if ( pImage->LCHSGeometry.cCylinders == 0
2419 || pImage->LCHSGeometry.cHeads == 0
2420 || pImage->LCHSGeometry.cSectors == 0)
2421 {
2422 pImage->LCHSGeometry.cCylinders = 0;
2423 pImage->LCHSGeometry.cHeads = 0;
2424 pImage->LCHSGeometry.cSectors = 0;
2425 }
2426
2427 /* Get image UUID. */
2428 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2429 &pImage->ImageUuid);
2430 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2431 {
2432 /* Image without UUID. Probably created by VMware and not yet used
2433 * by VirtualBox. Can only be added for images opened in read/write
2434 * mode, so don't bother producing a sensible UUID otherwise. */
2435 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2436 RTUuidClear(&pImage->ImageUuid);
2437 else
2438 {
2439 rc = RTUuidCreate(&pImage->ImageUuid);
2440 if (RT_FAILURE(rc))
2441 return rc;
2442 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2443 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2444 if (RT_FAILURE(rc))
2445 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2446 }
2447 }
2448 else if (RT_FAILURE(rc))
2449 return rc;
2450
2451 /* Get image modification UUID. */
2452 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2453 VMDK_DDB_MODIFICATION_UUID,
2454 &pImage->ModificationUuid);
2455 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2456 {
2457 /* Image without UUID. Probably created by VMware and not yet used
2458 * by VirtualBox. Can only be added for images opened in read/write
2459 * mode, so don't bother producing a sensible UUID otherwise. */
2460 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2461 RTUuidClear(&pImage->ModificationUuid);
2462 else
2463 {
2464 rc = RTUuidCreate(&pImage->ModificationUuid);
2465 if (RT_FAILURE(rc))
2466 return rc;
2467 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2468 VMDK_DDB_MODIFICATION_UUID,
2469 &pImage->ModificationUuid);
2470 if (RT_FAILURE(rc))
2471 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2472 }
2473 }
2474 else if (RT_FAILURE(rc))
2475 return rc;
2476
2477 /* Get UUID of parent image. */
2478 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2479 &pImage->ParentUuid);
2480 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2481 {
2482 /* Image without UUID. Probably created by VMware and not yet used
2483 * by VirtualBox. Can only be added for images opened in read/write
2484 * mode, so don't bother producing a sensible UUID otherwise. */
2485 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2486 RTUuidClear(&pImage->ParentUuid);
2487 else
2488 {
2489 rc = RTUuidClear(&pImage->ParentUuid);
2490 if (RT_FAILURE(rc))
2491 return rc;
2492 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2493 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2494 if (RT_FAILURE(rc))
2495 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2496 }
2497 }
2498 else if (RT_FAILURE(rc))
2499 return rc;
2500
2501 /* Get parent image modification UUID. */
2502 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2503 VMDK_DDB_PARENT_MODIFICATION_UUID,
2504 &pImage->ParentModificationUuid);
2505 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2506 {
2507 /* Image without UUID. Probably created by VMware and not yet used
2508 * by VirtualBox. Can only be added for images opened in read/write
2509 * mode, so don't bother producing a sensible UUID otherwise. */
2510 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2511 RTUuidClear(&pImage->ParentModificationUuid);
2512 else
2513 {
2514 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2515 if (RT_FAILURE(rc))
2516 return rc;
2517 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2518 VMDK_DDB_PARENT_MODIFICATION_UUID,
2519 &pImage->ParentModificationUuid);
2520 if (RT_FAILURE(rc))
2521 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2522 }
2523 }
2524 else if (RT_FAILURE(rc))
2525 return rc;
2526
2527 return VINF_SUCCESS;
2528}
2529
2530/**
2531 * Internal: write/update the descriptor part of the image.
2532 */
2533static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2534{
2535 int rc = VINF_SUCCESS;
2536 uint64_t cbLimit;
2537 uint64_t uOffset;
2538 PVMDKFILE pDescFile;
2539
2540 if (pImage->pDescData)
2541 {
2542 /* Separate descriptor file. */
2543 uOffset = 0;
2544 cbLimit = 0;
2545 pDescFile = pImage->pFile;
2546 }
2547 else
2548 {
2549 /* Embedded descriptor file. */
2550 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2551 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2552 pDescFile = pImage->pExtents[0].pFile;
2553 }
2554 /* Bail out if there is no file to write to. */
2555 if (pDescFile == NULL)
2556 return VERR_INVALID_PARAMETER;
2557
2558 /*
2559 * Allocate temporary descriptor buffer.
2560 * In case there is no limit allocate a default
2561 * and increase if required.
2562 */
2563 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2564 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2565 unsigned offDescriptor = 0;
2566
2567 if (!pszDescriptor)
2568 return VERR_NO_MEMORY;
2569
2570 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2571 {
2572 const char *psz = pImage->Descriptor.aLines[i];
2573 size_t cb = strlen(psz);
2574
2575 /*
2576 * Increase the descriptor if there is no limit and
2577 * there is not enough room left for this line.
2578 */
2579 if (offDescriptor + cb + 1 > cbDescriptor)
2580 {
2581 if (cbLimit)
2582 {
2583 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2584 break;
2585 }
2586 else
2587 {
2588 char *pszDescriptorNew = NULL;
2589 LogFlow(("Increasing descriptor cache\n"));
2590
2591 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2592 if (!pszDescriptorNew)
2593 {
2594 rc = VERR_NO_MEMORY;
2595 break;
2596 }
2597 pszDescriptorNew = pszDescriptor;
2598 cbDescriptor += cb + 4 * _1K;
2599 }
2600 }
2601
2602 if (cb > 0)
2603 {
2604 memcpy(pszDescriptor + offDescriptor, psz, cb);
2605 offDescriptor += cb;
2606 }
2607
2608 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2609 offDescriptor++;
2610 }
2611
2612 if (RT_SUCCESS(rc))
2613 {
2614 rc = vmdkFileWriteSync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2615 if (RT_FAILURE(rc))
2616 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2617 }
2618
2619 if (RT_SUCCESS(rc) && !cbLimit)
2620 {
2621 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2622 if (RT_FAILURE(rc))
2623 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2624 }
2625
2626 if (RT_SUCCESS(rc))
2627 pImage->Descriptor.fDirty = false;
2628
2629 RTMemFree(pszDescriptor);
2630 return rc;
2631}
2632
2633/**
2634 * Internal: write/update the descriptor part of the image - async version.
2635 */
2636static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2637{
2638 int rc = VINF_SUCCESS;
2639 uint64_t cbLimit;
2640 uint64_t uOffset;
2641 PVMDKFILE pDescFile;
2642
2643 if (pImage->pDescData)
2644 {
2645 /* Separate descriptor file. */
2646 uOffset = 0;
2647 cbLimit = 0;
2648 pDescFile = pImage->pFile;
2649 }
2650 else
2651 {
2652 /* Embedded descriptor file. */
2653 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2654 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2655 pDescFile = pImage->pExtents[0].pFile;
2656 }
2657 /* Bail out if there is no file to write to. */
2658 if (pDescFile == NULL)
2659 return VERR_INVALID_PARAMETER;
2660
2661 /*
2662 * Allocate temporary descriptor buffer.
2663 * In case there is no limit allocate a default
2664 * and increase if required.
2665 */
2666 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2667 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2668 unsigned offDescriptor = 0;
2669
2670 if (!pszDescriptor)
2671 return VERR_NO_MEMORY;
2672
2673 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2674 {
2675 const char *psz = pImage->Descriptor.aLines[i];
2676 size_t cb = strlen(psz);
2677
2678 /*
2679 * Increase the descriptor if there is no limit and
2680 * there is not enough room left for this line.
2681 */
2682 if (offDescriptor + cb + 1 > cbDescriptor)
2683 {
2684 if (cbLimit)
2685 {
2686 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2687 break;
2688 }
2689 else
2690 {
2691 char *pszDescriptorNew = NULL;
2692 LogFlow(("Increasing descriptor cache\n"));
2693
2694 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2695 if (!pszDescriptorNew)
2696 {
2697 rc = VERR_NO_MEMORY;
2698 break;
2699 }
2700 pszDescriptorNew = pszDescriptor;
2701 cbDescriptor += cb + 4 * _1K;
2702 }
2703 }
2704
2705 if (cb > 0)
2706 {
2707 memcpy(pszDescriptor + offDescriptor, psz, cb);
2708 offDescriptor += cb;
2709 }
2710
2711 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2712 offDescriptor++;
2713 }
2714
2715 if (RT_SUCCESS(rc))
2716 {
2717 rc = vmdkFileWriteSync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2718 if (RT_FAILURE(rc))
2719 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2720 }
2721
2722 if (RT_SUCCESS(rc) && !cbLimit)
2723 {
2724 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2725 if (RT_FAILURE(rc))
2726 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2727 }
2728
2729 if (RT_SUCCESS(rc))
2730 pImage->Descriptor.fDirty = false;
2731
2732 RTMemFree(pszDescriptor);
2733 return rc;
2734
2735}
2736
2737/**
2738 * Internal: validate the consistency check values in a binary header.
2739 */
2740static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2741{
2742 int rc = VINF_SUCCESS;
2743 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2744 {
2745 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2746 return rc;
2747 }
2748 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2749 {
2750 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2751 return rc;
2752 }
2753 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2754 && ( pHeader->singleEndLineChar != '\n'
2755 || pHeader->nonEndLineChar != ' '
2756 || pHeader->doubleEndLineChar1 != '\r'
2757 || pHeader->doubleEndLineChar2 != '\n') )
2758 {
2759 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2760 return rc;
2761 }
2762 return rc;
2763}
2764
2765/**
2766 * Internal: read metadata belonging to an extent with binary header, i.e.
2767 * as found in monolithic files.
2768 */
2769static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2770{
2771 SparseExtentHeader Header;
2772 uint64_t cSectorsPerGDE;
2773
2774 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2775 AssertRC(rc);
2776 if (RT_FAILURE(rc))
2777 {
2778 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2779 goto out;
2780 }
2781 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2782 if (RT_FAILURE(rc))
2783 goto out;
2784 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2785 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2786 {
2787 /* Read the footer, which isn't compressed and comes before the
2788 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2789 * VMware reality. Theory and practice have very little in common. */
2790 uint64_t cbSize;
2791 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
2792 AssertRC(rc);
2793 if (RT_FAILURE(rc))
2794 {
2795 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2796 goto out;
2797 }
2798 cbSize = RT_ALIGN_64(cbSize, 512);
2799 rc = vmdkFileReadSync(pImage, pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2800 AssertRC(rc);
2801 if (RT_FAILURE(rc))
2802 {
2803 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2804 goto out;
2805 }
2806 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2807 if (RT_FAILURE(rc))
2808 goto out;
2809 pExtent->fFooter = true;
2810 }
2811 pExtent->uVersion = RT_LE2H_U32(Header.version);
2812 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2813 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2814 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2815 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2816 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2817 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2818 {
2819 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2820 goto out;
2821 }
2822 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2823 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2824 {
2825 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2826 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2827 }
2828 else
2829 {
2830 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2831 pExtent->uSectorRGD = 0;
2832 }
2833 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2834 {
2835 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2836 goto out;
2837 }
2838 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2839 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2840 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2841 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2842 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2843 {
2844 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2845 goto out;
2846 }
2847 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2848 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2849
2850 /* Fix up the number of descriptor sectors, as some flat images have
2851 * really just one, and this causes failures when inserting the UUID
2852 * values and other extra information. */
2853 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2854 {
2855 /* Do it the easy way - just fix it for flat images which have no
2856 * other complicated metadata which needs space too. */
2857 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2858 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2859 pExtent->cDescriptorSectors = 4;
2860 }
2861
2862out:
2863 if (RT_FAILURE(rc))
2864 vmdkFreeExtentData(pImage, pExtent, false);
2865
2866 return rc;
2867}
2868
2869/**
2870 * Internal: read additional metadata belonging to an extent. For those
2871 * extents which have no additional metadata just verify the information.
2872 */
2873static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2874{
2875 int rc = VINF_SUCCESS;
2876 uint64_t cbExtentSize;
2877
2878 /* The image must be a multiple of a sector in size and contain the data
2879 * area (flat images only). If not, it means the image is at least
2880 * truncated, or even seriously garbled. */
2881 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
2882 if (RT_FAILURE(rc))
2883 {
2884 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2885 goto out;
2886 }
2887/* disabled the check as there are too many truncated vmdk images out there */
2888#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2889 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2890 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2891 {
2892 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2893 goto out;
2894 }
2895#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2896 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2897 goto out;
2898
2899 /* The spec says that this must be a power of two and greater than 8,
2900 * but probably they meant not less than 8. */
2901 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2902 || pExtent->cSectorsPerGrain < 8)
2903 {
2904 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2905 goto out;
2906 }
2907
2908 /* This code requires that a grain table must hold a power of two multiple
2909 * of the number of entries per GT cache entry. */
2910 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2911 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2912 {
2913 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2914 goto out;
2915 }
2916
2917 rc = vmdkReadGrainDirectory(pImage, pExtent);
2918
2919out:
2920 if (RT_FAILURE(rc))
2921 vmdkFreeExtentData(pImage, pExtent, false);
2922
2923 return rc;
2924}
2925
2926/**
2927 * Internal: write/update the metadata for a sparse extent.
2928 */
2929static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2930 uint64_t uOffset)
2931{
2932 SparseExtentHeader Header;
2933
2934 memset(&Header, '\0', sizeof(Header));
2935 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2936 Header.version = RT_H2LE_U32(pExtent->uVersion);
2937 Header.flags = RT_H2LE_U32(RT_BIT(0));
2938 if (pExtent->pRGD)
2939 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2940 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2941 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2942 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2943 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2944 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2945 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2946 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2947 if (pExtent->fFooter && uOffset == 0)
2948 {
2949 if (pExtent->pRGD)
2950 {
2951 Assert(pExtent->uSectorRGD);
2952 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2953 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2954 }
2955 else
2956 {
2957 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2958 }
2959 }
2960 else
2961 {
2962 if (pExtent->pRGD)
2963 {
2964 Assert(pExtent->uSectorRGD);
2965 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2966 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2967 }
2968 else
2969 {
2970 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2971 }
2972 }
2973 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2974 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2975 Header.singleEndLineChar = '\n';
2976 Header.nonEndLineChar = ' ';
2977 Header.doubleEndLineChar1 = '\r';
2978 Header.doubleEndLineChar2 = '\n';
2979 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2980
2981 int rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2982 AssertRC(rc);
2983 if (RT_FAILURE(rc))
2984 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2985 return rc;
2986}
2987
2988/**
2989 * Internal: write/update the metadata for a sparse extent - async version.
2990 */
2991static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2992 uint64_t uOffset, PVDIOCTX pIoCtx)
2993{
2994 SparseExtentHeader Header;
2995
2996 memset(&Header, '\0', sizeof(Header));
2997 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2998 Header.version = RT_H2LE_U32(pExtent->uVersion);
2999 Header.flags = RT_H2LE_U32(RT_BIT(0));
3000 if (pExtent->pRGD)
3001 Header.flags |= RT_H2LE_U32(RT_BIT(1));
3002 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3003 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
3004 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
3005 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
3006 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
3007 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
3008 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
3009 if (pExtent->fFooter && uOffset == 0)
3010 {
3011 if (pExtent->pRGD)
3012 {
3013 Assert(pExtent->uSectorRGD);
3014 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3015 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3016 }
3017 else
3018 {
3019 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3020 }
3021 }
3022 else
3023 {
3024 if (pExtent->pRGD)
3025 {
3026 Assert(pExtent->uSectorRGD);
3027 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3028 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3029 }
3030 else
3031 {
3032 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3033 }
3034 }
3035 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3036 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3037 Header.singleEndLineChar = '\n';
3038 Header.nonEndLineChar = ' ';
3039 Header.doubleEndLineChar1 = '\r';
3040 Header.doubleEndLineChar2 = '\n';
3041 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3042
3043 int rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
3044 uOffset, &Header, sizeof(Header),
3045 pIoCtx, NULL, NULL);
3046 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3047 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3048 return rc;
3049}
3050
3051#ifdef VBOX_WITH_VMDK_ESX
3052/**
3053 * Internal: unused code to read the metadata of a sparse ESX extent.
3054 *
3055 * Such extents never leave ESX server, so this isn't ever used.
3056 */
3057static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
3058{
3059 COWDisk_Header Header;
3060 uint64_t cSectorsPerGDE;
3061
3062 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
3063 AssertRC(rc);
3064 if (RT_FAILURE(rc))
3065 goto out;
3066 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
3067 || RT_LE2H_U32(Header.version) != 1
3068 || RT_LE2H_U32(Header.flags) != 3)
3069 {
3070 rc = VERR_VD_VMDK_INVALID_HEADER;
3071 goto out;
3072 }
3073 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
3074 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
3075 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
3076 /* The spec says that this must be between 1 sector and 1MB. This code
3077 * assumes it's a power of two, so check that requirement, too. */
3078 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
3079 || pExtent->cSectorsPerGrain == 0
3080 || pExtent->cSectorsPerGrain > 2048)
3081 {
3082 rc = VERR_VD_VMDK_INVALID_HEADER;
3083 goto out;
3084 }
3085 pExtent->uDescriptorSector = 0;
3086 pExtent->cDescriptorSectors = 0;
3087 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
3088 pExtent->uSectorRGD = 0;
3089 pExtent->cOverheadSectors = 0;
3090 pExtent->cGTEntries = 4096;
3091 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3092 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
3093 {
3094 rc = VERR_VD_VMDK_INVALID_HEADER;
3095 goto out;
3096 }
3097 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3098 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3099 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
3100 {
3101 /* Inconsistency detected. Computed number of GD entries doesn't match
3102 * stored value. Better be safe than sorry. */
3103 rc = VERR_VD_VMDK_INVALID_HEADER;
3104 goto out;
3105 }
3106 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
3107 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
3108
3109 rc = vmdkReadGrainDirectory(pImage, pExtent);
3110
3111out:
3112 if (RT_FAILURE(rc))
3113 vmdkFreeExtentData(pImage, pExtent, false);
3114
3115 return rc;
3116}
3117#endif /* VBOX_WITH_VMDK_ESX */
3118
3119/**
3120 * Internal: free the memory used by the extent data structure, optionally
3121 * deleting the referenced files.
3122 */
3123static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3124 bool fDelete)
3125{
3126 vmdkFreeGrainDirectory(pExtent);
3127 if (pExtent->pDescData)
3128 {
3129 RTMemFree(pExtent->pDescData);
3130 pExtent->pDescData = NULL;
3131 }
3132 if (pExtent->pFile != NULL)
3133 {
3134 /* Do not delete raw extents, these have full and base names equal. */
3135 vmdkFileClose(pImage, &pExtent->pFile,
3136 fDelete
3137 && pExtent->pszFullname
3138 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3139 }
3140 if (pExtent->pszBasename)
3141 {
3142 RTMemTmpFree((void *)pExtent->pszBasename);
3143 pExtent->pszBasename = NULL;
3144 }
3145 if (pExtent->pszFullname)
3146 {
3147 RTStrFree((char *)(void *)pExtent->pszFullname);
3148 pExtent->pszFullname = NULL;
3149 }
3150 if (pExtent->pvGrain)
3151 {
3152 RTMemFree(pExtent->pvGrain);
3153 pExtent->pvGrain = NULL;
3154 }
3155}
3156
3157/**
3158 * Internal: allocate grain table cache if necessary for this image.
3159 */
3160static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3161{
3162 PVMDKEXTENT pExtent;
3163
3164 /* Allocate grain table cache if any sparse extent is present. */
3165 for (unsigned i = 0; i < pImage->cExtents; i++)
3166 {
3167 pExtent = &pImage->pExtents[i];
3168 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3169#ifdef VBOX_WITH_VMDK_ESX
3170 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3171#endif /* VBOX_WITH_VMDK_ESX */
3172 )
3173 {
3174 /* Allocate grain table cache. */
3175 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3176 if (!pImage->pGTCache)
3177 return VERR_NO_MEMORY;
3178 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3179 {
3180 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3181 pGCE->uExtent = UINT32_MAX;
3182 }
3183 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3184 break;
3185 }
3186 }
3187
3188 return VINF_SUCCESS;
3189}
3190
3191/**
3192 * Internal: allocate the given number of extents.
3193 */
3194static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3195{
3196 int rc = VINF_SUCCESS;
3197 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3198 if (pImage)
3199 {
3200 for (unsigned i = 0; i < cExtents; i++)
3201 {
3202 pExtents[i].pFile = NULL;
3203 pExtents[i].pszBasename = NULL;
3204 pExtents[i].pszFullname = NULL;
3205 pExtents[i].pGD = NULL;
3206 pExtents[i].pRGD = NULL;
3207 pExtents[i].pDescData = NULL;
3208 pExtents[i].uVersion = 1;
3209 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3210 pExtents[i].uExtent = i;
3211 pExtents[i].pImage = pImage;
3212 }
3213 pImage->pExtents = pExtents;
3214 pImage->cExtents = cExtents;
3215 }
3216 else
3217 rc = VERR_NO_MEMORY;
3218
3219 return rc;
3220}
3221
3222/**
3223 * Internal: Open an image, constructing all necessary data structures.
3224 */
3225static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3226{
3227 int rc;
3228 uint32_t u32Magic;
3229 PVMDKFILE pFile;
3230 PVMDKEXTENT pExtent;
3231
3232 pImage->uOpenFlags = uOpenFlags;
3233
3234 /* Try to get error interface. */
3235 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3236 if (pImage->pInterfaceError)
3237 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3238
3239 /* Get I/O interface. */
3240 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
3241 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
3242 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
3243 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
3244
3245 /*
3246 * Open the image.
3247 * We don't have to check for asynchronous access because
3248 * we only support raw access and the opened file is a description
3249 * file were no data is stored.
3250 */
3251
3252 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3253 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */),
3254 false /* fAsyncIO */);
3255 if (RT_FAILURE(rc))
3256 {
3257 /* Do NOT signal an appropriate error here, as the VD layer has the
3258 * choice of retrying the open if it failed. */
3259 goto out;
3260 }
3261 pImage->pFile = pFile;
3262
3263 /* Read magic (if present). */
3264 rc = vmdkFileReadSync(pImage, pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3265 if (RT_FAILURE(rc))
3266 {
3267 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3268 goto out;
3269 }
3270
3271 /* Handle the file according to its magic number. */
3272 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3273 {
3274 /* It's a hosted single-extent image. */
3275 rc = vmdkCreateExtents(pImage, 1);
3276 if (RT_FAILURE(rc))
3277 goto out;
3278 /* The opened file is passed to the extent. No separate descriptor
3279 * file, so no need to keep anything open for the image. */
3280 pExtent = &pImage->pExtents[0];
3281 pExtent->pFile = pFile;
3282 pImage->pFile = NULL;
3283 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3284 if (!pExtent->pszFullname)
3285 {
3286 rc = VERR_NO_MEMORY;
3287 goto out;
3288 }
3289 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3290 if (RT_FAILURE(rc))
3291 goto out;
3292
3293 /* As we're dealing with a monolithic image here, there must
3294 * be a descriptor embedded in the image file. */
3295 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3296 {
3297 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3298 goto out;
3299 }
3300 /* HACK: extend the descriptor if it is unusually small and it fits in
3301 * the unused space after the image header. Allows opening VMDK files
3302 * with extremely small descriptor in read/write mode. */
3303 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3304 && pExtent->cDescriptorSectors < 3
3305 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3306 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3307 {
3308 pExtent->cDescriptorSectors = 4;
3309 pExtent->fMetaDirty = true;
3310 }
3311 /* Read the descriptor from the extent. */
3312 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3313 if (!pExtent->pDescData)
3314 {
3315 rc = VERR_NO_MEMORY;
3316 goto out;
3317 }
3318 rc = vmdkFileReadSync(pImage, pExtent->pFile,
3319 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3320 pExtent->pDescData,
3321 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3322 AssertRC(rc);
3323 if (RT_FAILURE(rc))
3324 {
3325 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3326 goto out;
3327 }
3328
3329 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3330 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3331 if (RT_FAILURE(rc))
3332 goto out;
3333
3334 rc = vmdkReadMetaExtent(pImage, pExtent);
3335 if (RT_FAILURE(rc))
3336 goto out;
3337
3338 /* Mark the extent as unclean if opened in read-write mode. */
3339 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3340 {
3341 pExtent->fUncleanShutdown = true;
3342 pExtent->fMetaDirty = true;
3343 }
3344 }
3345 else
3346 {
3347 /* Allocate at least 10K, and make sure that there is 5K free space
3348 * in case new entries need to be added to the descriptor. Never
3349 * alocate more than 128K, because that's no valid descriptor file
3350 * and will result in the correct "truncated read" error handling. */
3351 uint64_t cbFileSize;
3352 rc = vmdkFileGetSize(pImage, pFile, &cbFileSize);
3353 if (RT_FAILURE(rc))
3354 goto out;
3355
3356 uint64_t cbSize = cbFileSize;
3357 if (cbSize % VMDK_SECTOR2BYTE(10))
3358 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3359 else
3360 cbSize += VMDK_SECTOR2BYTE(10);
3361 cbSize = RT_MIN(cbSize, _128K);
3362 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3363 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3364 if (!pImage->pDescData)
3365 {
3366 rc = VERR_NO_MEMORY;
3367 goto out;
3368 }
3369
3370 size_t cbRead;
3371 rc = vmdkFileReadSync(pImage, pImage->pFile, 0, pImage->pDescData,
3372 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3373 &cbRead);
3374 if (RT_FAILURE(rc))
3375 {
3376 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3377 goto out;
3378 }
3379 if (cbRead == pImage->cbDescAlloc)
3380 {
3381 /* Likely the read is truncated. Better fail a bit too early
3382 * (normally the descriptor is much smaller than our buffer). */
3383 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3384 goto out;
3385 }
3386
3387 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3388 pImage->cbDescAlloc);
3389 if (RT_FAILURE(rc))
3390 goto out;
3391
3392 /*
3393 * We have to check for the asynchronous open flag. The
3394 * extents are parsed and the type of all are known now.
3395 * Check if every extent is either FLAT or ZERO.
3396 */
3397 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3398 {
3399 unsigned cFlatExtents = 0;
3400
3401 for (unsigned i = 0; i < pImage->cExtents; i++)
3402 {
3403 pExtent = &pImage->pExtents[i];
3404
3405 if (( pExtent->enmType != VMDKETYPE_FLAT
3406 && pExtent->enmType != VMDKETYPE_ZERO
3407 && pExtent->enmType != VMDKETYPE_VMFS)
3408 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3409 {
3410 /*
3411 * Opened image contains at least one none flat or zero extent.
3412 * Return error but don't set error message as the caller
3413 * has the chance to open in non async I/O mode.
3414 */
3415 rc = VERR_NOT_SUPPORTED;
3416 goto out;
3417 }
3418 if (pExtent->enmType == VMDKETYPE_FLAT)
3419 cFlatExtents++;
3420 }
3421 }
3422
3423 for (unsigned i = 0; i < pImage->cExtents; i++)
3424 {
3425 pExtent = &pImage->pExtents[i];
3426
3427 if (pExtent->pszBasename)
3428 {
3429 /* Hack to figure out whether the specified name in the
3430 * extent descriptor is absolute. Doesn't always work, but
3431 * should be good enough for now. */
3432 char *pszFullname;
3433 /** @todo implement proper path absolute check. */
3434 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3435 {
3436 pszFullname = RTStrDup(pExtent->pszBasename);
3437 if (!pszFullname)
3438 {
3439 rc = VERR_NO_MEMORY;
3440 goto out;
3441 }
3442 }
3443 else
3444 {
3445 size_t cbDirname;
3446 char *pszDirname = RTStrDup(pImage->pszFilename);
3447 if (!pszDirname)
3448 {
3449 rc = VERR_NO_MEMORY;
3450 goto out;
3451 }
3452 RTPathStripFilename(pszDirname);
3453 cbDirname = strlen(pszDirname);
3454 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3455 RTPATH_SLASH, pExtent->pszBasename);
3456 RTStrFree(pszDirname);
3457 if (RT_FAILURE(rc))
3458 goto out;
3459 }
3460 pExtent->pszFullname = pszFullname;
3461 }
3462 else
3463 pExtent->pszFullname = NULL;
3464
3465 switch (pExtent->enmType)
3466 {
3467 case VMDKETYPE_HOSTED_SPARSE:
3468 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3469 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3470 false /* fCreate */),
3471 false /* fAsyncIO */);
3472 if (RT_FAILURE(rc))
3473 {
3474 /* Do NOT signal an appropriate error here, as the VD
3475 * layer has the choice of retrying the open if it
3476 * failed. */
3477 goto out;
3478 }
3479 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3480 if (RT_FAILURE(rc))
3481 goto out;
3482 rc = vmdkReadMetaExtent(pImage, pExtent);
3483 if (RT_FAILURE(rc))
3484 goto out;
3485
3486 /* Mark extent as unclean if opened in read-write mode. */
3487 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3488 {
3489 pExtent->fUncleanShutdown = true;
3490 pExtent->fMetaDirty = true;
3491 }
3492 break;
3493 case VMDKETYPE_VMFS:
3494 case VMDKETYPE_FLAT:
3495 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3496 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3497 false /* fCreate */),
3498 true /* fAsyncIO */);
3499 if (RT_FAILURE(rc))
3500 {
3501 /* Do NOT signal an appropriate error here, as the VD
3502 * layer has the choice of retrying the open if it
3503 * failed. */
3504 goto out;
3505 }
3506 break;
3507 case VMDKETYPE_ZERO:
3508 /* Nothing to do. */
3509 break;
3510 default:
3511 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3512 }
3513 }
3514 }
3515
3516 /* Make sure this is not reached accidentally with an error status. */
3517 AssertRC(rc);
3518
3519 /* Determine PCHS geometry if not set. */
3520 if (pImage->PCHSGeometry.cCylinders == 0)
3521 {
3522 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3523 / pImage->PCHSGeometry.cHeads
3524 / pImage->PCHSGeometry.cSectors;
3525 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3526 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3527 {
3528 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3529 AssertRC(rc);
3530 }
3531 }
3532
3533 /* Update the image metadata now in case has changed. */
3534 rc = vmdkFlushImage(pImage);
3535 if (RT_FAILURE(rc))
3536 goto out;
3537
3538 /* Figure out a few per-image constants from the extents. */
3539 pImage->cbSize = 0;
3540 for (unsigned i = 0; i < pImage->cExtents; i++)
3541 {
3542 pExtent = &pImage->pExtents[i];
3543 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3544#ifdef VBOX_WITH_VMDK_ESX
3545 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3546#endif /* VBOX_WITH_VMDK_ESX */
3547 )
3548 {
3549 /* Here used to be a check whether the nominal size of an extent
3550 * is a multiple of the grain size. The spec says that this is
3551 * always the case, but unfortunately some files out there in the
3552 * wild violate the spec (e.g. ReactOS 0.3.1). */
3553 }
3554 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3555 }
3556
3557 for (unsigned i = 0; i < pImage->cExtents; i++)
3558 {
3559 pExtent = &pImage->pExtents[i];
3560 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3561 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3562 {
3563 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3564 break;
3565 }
3566 }
3567
3568 rc = vmdkAllocateGrainTableCache(pImage);
3569 if (RT_FAILURE(rc))
3570 goto out;
3571
3572out:
3573 if (RT_FAILURE(rc))
3574 vmdkFreeImage(pImage, false);
3575 return rc;
3576}
3577
3578/**
3579 * Internal: create VMDK images for raw disk/partition access.
3580 */
3581static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3582 uint64_t cbSize)
3583{
3584 int rc = VINF_SUCCESS;
3585 PVMDKEXTENT pExtent;
3586
3587 if (pRaw->fRawDisk)
3588 {
3589 /* Full raw disk access. This requires setting up a descriptor
3590 * file and open the (flat) raw disk. */
3591 rc = vmdkCreateExtents(pImage, 1);
3592 if (RT_FAILURE(rc))
3593 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3594 pExtent = &pImage->pExtents[0];
3595 /* Create raw disk descriptor file. */
3596 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3597 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3598 true /* fCreate */),
3599 false /* fAsyncIO */);
3600 if (RT_FAILURE(rc))
3601 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3602
3603 /* Set up basename for extent description. Cannot use StrDup. */
3604 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3605 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3606 if (!pszBasename)
3607 return VERR_NO_MEMORY;
3608 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3609 pExtent->pszBasename = pszBasename;
3610 /* For raw disks the full name is identical to the base name. */
3611 pExtent->pszFullname = RTStrDup(pszBasename);
3612 if (!pExtent->pszFullname)
3613 return VERR_NO_MEMORY;
3614 pExtent->enmType = VMDKETYPE_FLAT;
3615 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3616 pExtent->uSectorOffset = 0;
3617 pExtent->enmAccess = VMDKACCESS_READWRITE;
3618 pExtent->fMetaDirty = false;
3619
3620 /* Open flat image, the raw disk. */
3621 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3622 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3623 false /* fCreate */),
3624 false /* fAsyncIO */);
3625 if (RT_FAILURE(rc))
3626 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3627 }
3628 else
3629 {
3630 /* Raw partition access. This requires setting up a descriptor
3631 * file, write the partition information to a flat extent and
3632 * open all the (flat) raw disk partitions. */
3633
3634 /* First pass over the partition data areas to determine how many
3635 * extents we need. One data area can require up to 2 extents, as
3636 * it might be necessary to skip over unpartitioned space. */
3637 unsigned cExtents = 0;
3638 uint64_t uStart = 0;
3639 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3640 {
3641 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3642 if (uStart > pPart->uStart)
3643 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3644
3645 if (uStart < pPart->uStart)
3646 cExtents++;
3647 uStart = pPart->uStart + pPart->cbData;
3648 cExtents++;
3649 }
3650 /* Another extent for filling up the rest of the image. */
3651 if (uStart != cbSize)
3652 cExtents++;
3653
3654 rc = vmdkCreateExtents(pImage, cExtents);
3655 if (RT_FAILURE(rc))
3656 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3657
3658 /* Create raw partition descriptor file. */
3659 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3660 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3661 true /* fCreate */),
3662 false /* fAsyncIO */);
3663 if (RT_FAILURE(rc))
3664 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3665
3666 /* Create base filename for the partition table extent. */
3667 /** @todo remove fixed buffer without creating memory leaks. */
3668 char pszPartition[1024];
3669 const char *pszBase = RTPathFilename(pImage->pszFilename);
3670 const char *pszExt = RTPathExt(pszBase);
3671 if (pszExt == NULL)
3672 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3673 char *pszBaseBase = RTStrDup(pszBase);
3674 if (!pszBaseBase)
3675 return VERR_NO_MEMORY;
3676 RTPathStripExt(pszBaseBase);
3677 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3678 pszBaseBase, pszExt);
3679 RTStrFree(pszBaseBase);
3680
3681 /* Second pass over the partitions, now define all extents. */
3682 uint64_t uPartOffset = 0;
3683 cExtents = 0;
3684 uStart = 0;
3685 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3686 {
3687 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3688 pExtent = &pImage->pExtents[cExtents++];
3689
3690 if (uStart < pPart->uStart)
3691 {
3692 pExtent->pszBasename = NULL;
3693 pExtent->pszFullname = NULL;
3694 pExtent->enmType = VMDKETYPE_ZERO;
3695 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3696 pExtent->uSectorOffset = 0;
3697 pExtent->enmAccess = VMDKACCESS_READWRITE;
3698 pExtent->fMetaDirty = false;
3699 /* go to next extent */
3700 pExtent = &pImage->pExtents[cExtents++];
3701 }
3702 uStart = pPart->uStart + pPart->cbData;
3703
3704 if (pPart->pvPartitionData)
3705 {
3706 /* Set up basename for extent description. Can't use StrDup. */
3707 size_t cbBasename = strlen(pszPartition) + 1;
3708 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3709 if (!pszBasename)
3710 return VERR_NO_MEMORY;
3711 memcpy(pszBasename, pszPartition, cbBasename);
3712 pExtent->pszBasename = pszBasename;
3713
3714 /* Set up full name for partition extent. */
3715 size_t cbDirname;
3716 char *pszDirname = RTStrDup(pImage->pszFilename);
3717 if (!pszDirname)
3718 return VERR_NO_MEMORY;
3719 RTPathStripFilename(pszDirname);
3720 cbDirname = strlen(pszDirname);
3721 char *pszFullname;
3722 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3723 RTPATH_SLASH, pExtent->pszBasename);
3724 RTStrFree(pszDirname);
3725 if (RT_FAILURE(rc))
3726 return rc;
3727 pExtent->pszFullname = pszFullname;
3728 pExtent->enmType = VMDKETYPE_FLAT;
3729 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3730 pExtent->uSectorOffset = uPartOffset;
3731 pExtent->enmAccess = VMDKACCESS_READWRITE;
3732 pExtent->fMetaDirty = false;
3733
3734 /* Create partition table flat image. */
3735 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3736 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3737 true /* fCreate */),
3738 false /* fAsyncIO */);
3739 if (RT_FAILURE(rc))
3740 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3741 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
3742 VMDK_SECTOR2BYTE(uPartOffset),
3743 pPart->pvPartitionData,
3744 pPart->cbData, NULL);
3745 if (RT_FAILURE(rc))
3746 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3747 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3748 }
3749 else
3750 {
3751 if (pPart->pszRawDevice)
3752 {
3753 /* Set up basename for extent descr. Can't use StrDup. */
3754 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3755 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3756 if (!pszBasename)
3757 return VERR_NO_MEMORY;
3758 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3759 pExtent->pszBasename = pszBasename;
3760 /* For raw disks full name is identical to base name. */
3761 pExtent->pszFullname = RTStrDup(pszBasename);
3762 if (!pExtent->pszFullname)
3763 return VERR_NO_MEMORY;
3764 pExtent->enmType = VMDKETYPE_FLAT;
3765 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3766 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3767 pExtent->enmAccess = VMDKACCESS_READWRITE;
3768 pExtent->fMetaDirty = false;
3769
3770 /* Open flat image, the raw partition. */
3771 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3772 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3773 false /* fCreate */),
3774 false /* fAsyncIO */);
3775 if (RT_FAILURE(rc))
3776 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3777 }
3778 else
3779 {
3780 pExtent->pszBasename = NULL;
3781 pExtent->pszFullname = NULL;
3782 pExtent->enmType = VMDKETYPE_ZERO;
3783 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3784 pExtent->uSectorOffset = 0;
3785 pExtent->enmAccess = VMDKACCESS_READWRITE;
3786 pExtent->fMetaDirty = false;
3787 }
3788 }
3789 }
3790 /* Another extent for filling up the rest of the image. */
3791 if (uStart != cbSize)
3792 {
3793 pExtent = &pImage->pExtents[cExtents++];
3794 pExtent->pszBasename = NULL;
3795 pExtent->pszFullname = NULL;
3796 pExtent->enmType = VMDKETYPE_ZERO;
3797 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3798 pExtent->uSectorOffset = 0;
3799 pExtent->enmAccess = VMDKACCESS_READWRITE;
3800 pExtent->fMetaDirty = false;
3801 }
3802 }
3803
3804 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3805 pRaw->fRawDisk ?
3806 "fullDevice" : "partitionedDevice");
3807 if (RT_FAILURE(rc))
3808 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3809 return rc;
3810}
3811
3812/**
3813 * Internal: create a regular (i.e. file-backed) VMDK image.
3814 */
3815static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3816 unsigned uImageFlags,
3817 PFNVDPROGRESS pfnProgress, void *pvUser,
3818 unsigned uPercentStart, unsigned uPercentSpan)
3819{
3820 int rc = VINF_SUCCESS;
3821 unsigned cExtents = 1;
3822 uint64_t cbOffset = 0;
3823 uint64_t cbRemaining = cbSize;
3824
3825 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3826 {
3827 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3828 /* Do proper extent computation: need one smaller extent if the total
3829 * size isn't evenly divisible by the split size. */
3830 if (cbSize % VMDK_2G_SPLIT_SIZE)
3831 cExtents++;
3832 }
3833 rc = vmdkCreateExtents(pImage, cExtents);
3834 if (RT_FAILURE(rc))
3835 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3836
3837 /* Basename strings needed for constructing the extent names. */
3838 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3839 AssertPtr(pszBasenameSubstr);
3840 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3841
3842 /* Create searate descriptor file if necessary. */
3843 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3844 {
3845 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3846 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3847 true /* fCreate */),
3848 false /* fAsyncIO */);
3849 if (RT_FAILURE(rc))
3850 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3851 }
3852 else
3853 pImage->pFile = NULL;
3854
3855 /* Set up all extents. */
3856 for (unsigned i = 0; i < cExtents; i++)
3857 {
3858 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3859 uint64_t cbExtent = cbRemaining;
3860
3861 /* Set up fullname/basename for extent description. Cannot use StrDup
3862 * for basename, as it is not guaranteed that the memory can be freed
3863 * with RTMemTmpFree, which must be used as in other code paths
3864 * StrDup is not usable. */
3865 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3866 {
3867 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3868 if (!pszBasename)
3869 return VERR_NO_MEMORY;
3870 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3871 pExtent->pszBasename = pszBasename;
3872 }
3873 else
3874 {
3875 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3876 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3877 RTPathStripExt(pszBasenameBase);
3878 char *pszTmp;
3879 size_t cbTmp;
3880 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3881 {
3882 if (cExtents == 1)
3883 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3884 pszBasenameExt);
3885 else
3886 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3887 i+1, pszBasenameExt);
3888 }
3889 else
3890 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3891 pszBasenameExt);
3892 RTStrFree(pszBasenameBase);
3893 if (RT_FAILURE(rc))
3894 return rc;
3895 cbTmp = strlen(pszTmp) + 1;
3896 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3897 if (!pszBasename)
3898 return VERR_NO_MEMORY;
3899 memcpy(pszBasename, pszTmp, cbTmp);
3900 RTStrFree(pszTmp);
3901 pExtent->pszBasename = pszBasename;
3902 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3903 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3904 }
3905 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3906 RTPathStripFilename(pszBasedirectory);
3907 char *pszFullname;
3908 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3909 RTPATH_SLASH, pExtent->pszBasename);
3910 RTStrFree(pszBasedirectory);
3911 if (RT_FAILURE(rc))
3912 return rc;
3913 pExtent->pszFullname = pszFullname;
3914
3915 /* Create file for extent. */
3916 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3917 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3918 true /* fCreate */),
3919 false /* fAsyncIO */);
3920 if (RT_FAILURE(rc))
3921 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3922 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3923 {
3924 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbExtent);
3925 if (RT_FAILURE(rc))
3926 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3927
3928 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3929 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3930 * file and the guest could complain about an ATA timeout. */
3931
3932 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3933 * Currently supported file systems are ext4 and ocfs2. */
3934
3935 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3936 const size_t cbBuf = 128 * _1K;
3937 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3938 if (!pvBuf)
3939 return VERR_NO_MEMORY;
3940
3941 uint64_t uOff = 0;
3942 /* Write data to all image blocks. */
3943 while (uOff < cbExtent)
3944 {
3945 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3946
3947 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3948 if (RT_FAILURE(rc))
3949 {
3950 RTMemFree(pvBuf);
3951 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3952 }
3953
3954 uOff += cbChunk;
3955
3956 if (pfnProgress)
3957 {
3958 rc = pfnProgress(pvUser,
3959 uPercentStart + uOff * uPercentSpan / cbExtent);
3960 if (RT_FAILURE(rc))
3961 {
3962 RTMemFree(pvBuf);
3963 return rc;
3964 }
3965 }
3966 }
3967 RTMemTmpFree(pvBuf);
3968 }
3969
3970 /* Place descriptor file information (where integrated). */
3971 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3972 {
3973 pExtent->uDescriptorSector = 1;
3974 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3975 /* The descriptor is part of the (only) extent. */
3976 pExtent->pDescData = pImage->pDescData;
3977 pImage->pDescData = NULL;
3978 }
3979
3980 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3981 {
3982 uint64_t cSectorsPerGDE, cSectorsPerGD;
3983 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3984 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3985 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3986 pExtent->cGTEntries = 512;
3987 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3988 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3989 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3990 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3991 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3992 {
3993 /* The spec says version is 1 for all VMDKs, but the vast
3994 * majority of streamOptimized VMDKs actually contain
3995 * version 3 - so go with the majority. Both are acepted. */
3996 pExtent->uVersion = 3;
3997 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3998 }
3999 }
4000 else
4001 {
4002 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4003 pExtent->enmType = VMDKETYPE_VMFS;
4004 else
4005 pExtent->enmType = VMDKETYPE_FLAT;
4006 }
4007
4008 pExtent->enmAccess = VMDKACCESS_READWRITE;
4009 pExtent->fUncleanShutdown = true;
4010 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4011 pExtent->uSectorOffset = 0;
4012 pExtent->fMetaDirty = true;
4013
4014 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4015 {
4016 /* fPreAlloc should never be false because VMware can't use such images. */
4017 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4018 RT_MAX( pExtent->uDescriptorSector
4019 + pExtent->cDescriptorSectors,
4020 1),
4021 true /* fPreAlloc */);
4022 if (RT_FAILURE(rc))
4023 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4024 }
4025
4026 if (RT_SUCCESS(rc) && pfnProgress)
4027 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
4028
4029 cbRemaining -= cbExtent;
4030 cbOffset += cbExtent;
4031 }
4032
4033 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4034 {
4035 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
4036 * controller type is set in an image. */
4037 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
4038 if (RT_FAILURE(rc))
4039 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
4040 }
4041
4042 const char *pszDescType = NULL;
4043 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4044 {
4045 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4046 pszDescType = "vmfs";
4047 else
4048 pszDescType = (cExtents == 1)
4049 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4050 }
4051 else
4052 {
4053 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4054 pszDescType = "streamOptimized";
4055 else
4056 {
4057 pszDescType = (cExtents == 1)
4058 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4059 }
4060 }
4061 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4062 pszDescType);
4063 if (RT_FAILURE(rc))
4064 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4065 return rc;
4066}
4067
4068/**
4069 * Internal. Clear the grain table buffer for real stream optimized writing.
4070 */
4071static void vmdksClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
4072{
4073 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4074 for (uint32_t i = 0; i < cCacheLines; i++)
4075 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
4076 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4077}
4078
4079/**
4080 * Internal. Flush the grain table buffer for real stream optimized writing.
4081 */
4082static int vmdksFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4083 uint32_t uGDEntry)
4084{
4085 int rc = VINF_SUCCESS;
4086 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4087
4088 uint64_t uFileOffset;
4089 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
4090 AssertRC(rc);
4091 /* Align to sector, as the previous write could have been any size. */
4092 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4093
4094 /* Grain table marker. */
4095 /** @todo check me! */
4096 uint8_t aMarker[512];
4097 memset(aMarker, '\0', sizeof(aMarker));
4098 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4099 pMarker->cbSize = RT_H2LE_U32(pExtent->cGTEntries * sizeof(uint32_t));
4100 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
4101 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4102 aMarker, sizeof(aMarker), NULL);
4103 AssertRC(rc);
4104 uFileOffset += 512;
4105
4106 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
4107 return VERR_INTERNAL_ERROR;
4108
4109 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4110
4111 for (uint32_t i = 0; i < cCacheLines; i++)
4112 {
4113 /* Convert the grain table to little endian in place, as it will not
4114 * be used at all after this function has been called. */
4115 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4116 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4117 *pGTTmp = RT_H2LE_U32(*pGTTmp);
4118
4119 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4120 &pImage->pGTCache->aGTCache[i].aGTData[0],
4121 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t),
4122 NULL);
4123 }
4124 return rc;
4125}
4126
4127/**
4128 * Internal. Free all allocated space for representing a real stream optimized
4129 * image, and optionally delete the image from disk.
4130 */
4131static int vmdksFreeImage(PVMDKIMAGE pImage, bool fDelete)
4132{
4133 int rc = VINF_SUCCESS;
4134
4135 /* Freeing a never allocated image (e.g. because the open failed) is
4136 * not signalled as an error. After all nothing bad happens. */
4137 if (pImage)
4138 {
4139 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4140 {
4141 /* Check if all extents are clean. */
4142 for (unsigned i = 0; i < pImage->cExtents; i++)
4143 {
4144 Assert(!pImage->pExtents[i].fUncleanShutdown);
4145 }
4146 }
4147
4148 /* No need to write any pending data if the file will be deleted. */
4149 if (!fDelete && pImage->pExtents)
4150 {
4151 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4152 uint32_t uLastGDEntry = pExtent->uLastGrainWritten / pExtent->cGTEntries;
4153 if (uLastGDEntry != pExtent->cGDEntries)
4154 {
4155 rc = vmdksFlushGT(pImage, pExtent, uLastGDEntry);
4156 AssertRC(rc);
4157 vmdksClearGT(pImage, pExtent);
4158 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
4159 {
4160 rc = vmdksFlushGT(pImage, pExtent, i);
4161 AssertRC(rc);
4162 }
4163 }
4164
4165 uint64_t uFileOffset;
4166 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
4167 AssertRC(rc);
4168 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4169
4170 /* Grain directory marker. */
4171 /** @todo check me! */
4172 uint8_t aMarker[512];
4173 memset(aMarker, '\0', sizeof(aMarker));
4174 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4175 pMarker->cbSize = RT_H2LE_U32(pExtent->cGDEntries * sizeof(uint32_t));
4176 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
4177 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4178 aMarker, sizeof(aMarker), NULL);
4179 AssertRC(rc);
4180 uFileOffset += 512;
4181
4182 /* Write grain directory in little endian style. The array will
4183 * not be used after this, so convert in place. */
4184 uint32_t *pGDTmp = pExtent->pGD;
4185 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
4186 *pGDTmp = RT_H2LE_U32(*pGDTmp);
4187 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4188 pExtent->pGD,
4189 pExtent->cGDEntries * sizeof(uint32_t),
4190 NULL);
4191 AssertRC(rc);
4192
4193 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
4194 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
4195 uFileOffset = RT_ALIGN_64(uFileOffset + pExtent->cGDEntries * sizeof(uint32_t), 512);
4196
4197 /* End of stream marker. */
4198 memset(aMarker, '\0', sizeof(aMarker));
4199 /** @todo check me! */
4200 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4201 aMarker, sizeof(aMarker), NULL);
4202 AssertRC(rc);
4203
4204 uFileOffset += 512;
4205 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
4206 AssertRC(rc);
4207 }
4208
4209 if (pImage->pExtents != NULL)
4210 {
4211 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4212 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4213 RTMemFree(pImage->pExtents);
4214 pImage->pExtents = NULL;
4215 }
4216 pImage->cExtents = 0;
4217 if (pImage->pFile != NULL)
4218 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4219 vmdkFileCheckAllClose(pImage);
4220
4221 if (pImage->pGTCache)
4222 {
4223 RTMemFree(pImage->pGTCache);
4224 pImage->pGTCache = NULL;
4225 }
4226 if (pImage->pDescData)
4227 {
4228 RTMemFree(pImage->pDescData);
4229 pImage->pDescData = NULL;
4230 }
4231 }
4232
4233 LogFlowFunc(("returns %Rrc\n", rc));
4234 return rc;
4235}
4236
4237/**
4238 * Internal: Create a real stream optimized VMDK using only linear writes.
4239 */
4240static int vmdksCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4241 unsigned uImageFlags, const char *pszComment,
4242 PCVDGEOMETRY pPCHSGeometry,
4243 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4244 PFNVDPROGRESS pfnProgress, void *pvUser,
4245 unsigned uPercentStart, unsigned uPercentSpan)
4246{
4247 int rc;
4248
4249 pImage->uImageFlags = uImageFlags;
4250
4251 /* Try to get error interface. */
4252 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4253 if (pImage->pInterfaceError)
4254 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4255
4256 /* Get I/O interface. */
4257 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
4258 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
4259 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
4260 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
4261
4262 PVMDKEXTENT pExtent;
4263 char *pszBasenameSubstr, *pszBasedirectory, *pszBasename;
4264 size_t cbBasenameSubstr;
4265
4266 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4267 &pImage->Descriptor);
4268 if (RT_FAILURE(rc))
4269 {
4270 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4271 goto out;
4272 }
4273
4274 rc = vmdkCreateExtents(pImage, 1);
4275 if (RT_FAILURE(rc))
4276 {
4277 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4278 goto out;
4279 }
4280
4281 /* Basename strings needed for constructing the extent names. */
4282 pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4283 AssertPtr(pszBasenameSubstr);
4284 cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4285
4286 /* No separate descriptor file. */
4287 pImage->pFile = NULL;
4288
4289 /* Set up all extents. */
4290 pExtent = &pImage->pExtents[0];
4291
4292 /* Set up fullname/basename for extent description. Cannot use StrDup
4293 * for basename, as it is not guaranteed that the memory can be freed
4294 * with RTMemTmpFree, which must be used as in other code paths
4295 * StrDup is not usable. */
4296 pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4297 if (!pszBasename)
4298 {
4299 rc = VERR_NO_MEMORY;
4300 goto out;
4301 }
4302 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4303 pExtent->pszBasename = pszBasename;
4304
4305 pszBasedirectory = RTStrDup(pImage->pszFilename);
4306 RTPathStripFilename(pszBasedirectory);
4307 char *pszFullname;
4308 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
4309 RTPATH_SLASH, pExtent->pszBasename);
4310 RTStrFree(pszBasedirectory);
4311 if (RT_FAILURE(rc))
4312 goto out;
4313 pExtent->pszFullname = pszFullname;
4314
4315 /* Create file for extent. */
4316 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
4317 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4318 true /* fCreate */),
4319 false /* fAsyncIO */);
4320 if (RT_FAILURE(rc))
4321 {
4322 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4323 goto out;
4324 }
4325
4326 /* Place descriptor file information. */
4327 pExtent->uDescriptorSector = 1;
4328 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4329 /* The descriptor is part of the (only) extent. */
4330 pExtent->pDescData = pImage->pDescData;
4331 pImage->pDescData = NULL;
4332
4333 uint64_t cSectorsPerGDE, cSectorsPerGD;
4334 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4335 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
4336 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4337 pExtent->cGTEntries = 512;
4338 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4339 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4340 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4341 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4342
4343 /* The spec says version is 1 for all VMDKs, but the vast
4344 * majority of streamOptimized VMDKs actually contain
4345 * version 3 - so go with the majority. Both are acepted. */
4346 pExtent->uVersion = 3;
4347 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4348 pExtent->fFooter = true;
4349
4350 pExtent->enmAccess = VMDKACCESS_READONLY;
4351 pExtent->fUncleanShutdown = false;
4352 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4353 pExtent->uSectorOffset = 0;
4354 pExtent->fMetaDirty = true;
4355
4356 /* Create grain directory, without preallocating it straight away. It will
4357 * be constructed on the fly when writing out the data and written when
4358 * closing the image. The end effect is that the full grain directory is
4359 * allocated, which is a requirement of the VMDK specs. */
4360 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4361 RT_MAX( pExtent->uDescriptorSector
4362 + pExtent->cDescriptorSectors,
4363 1),
4364 false /* fPreAlloc */);
4365 if (RT_FAILURE(rc))
4366 {
4367 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4368 goto out;
4369 }
4370
4371 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4372 "streamOptimized");
4373 if (RT_FAILURE(rc))
4374 {
4375 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4376 goto out;
4377 }
4378
4379 if (pfnProgress)
4380 pfnProgress(pvUser, uPercentStart + uPercentSpan * 20 / 100);
4381
4382 pImage->cbSize = cbSize;
4383
4384 Assert(pImage->cExtents == 1);
4385
4386 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4387 pExtent->cNominalSectors, pExtent->enmType,
4388 pExtent->pszBasename, pExtent->uSectorOffset);
4389 if (RT_FAILURE(rc))
4390 {
4391 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4392 goto out;
4393 }
4394 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4395
4396 if ( pPCHSGeometry->cCylinders != 0
4397 && pPCHSGeometry->cHeads != 0
4398 && pPCHSGeometry->cSectors != 0)
4399 {
4400 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4401 if (RT_FAILURE(rc))
4402 goto out;
4403 }
4404 if ( pLCHSGeometry->cCylinders != 0
4405 && pLCHSGeometry->cHeads != 0
4406 && pLCHSGeometry->cSectors != 0)
4407 {
4408 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4409 if (RT_FAILURE(rc))
4410 goto out;
4411 }
4412
4413 pImage->LCHSGeometry = *pLCHSGeometry;
4414 pImage->PCHSGeometry = *pPCHSGeometry;
4415
4416 pImage->ImageUuid = *pUuid;
4417 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4418 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4419 if (RT_FAILURE(rc))
4420 {
4421 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4422 goto out;
4423 }
4424 RTUuidClear(&pImage->ParentUuid);
4425 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4426 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4427 if (RT_FAILURE(rc))
4428 {
4429 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4430 goto out;
4431 }
4432 RTUuidClear(&pImage->ModificationUuid);
4433 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4434 VMDK_DDB_MODIFICATION_UUID,
4435 &pImage->ModificationUuid);
4436 if (RT_FAILURE(rc))
4437 {
4438 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4439 goto out;
4440 }
4441 RTUuidClear(&pImage->ParentModificationUuid);
4442 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4443 VMDK_DDB_PARENT_MODIFICATION_UUID,
4444 &pImage->ParentModificationUuid);
4445 if (RT_FAILURE(rc))
4446 {
4447 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4448 goto out;
4449 }
4450
4451 rc = vmdkAllocateGrainTableCache(pImage);
4452 if (RT_FAILURE(rc))
4453 goto out;
4454
4455 rc = vmdkSetImageComment(pImage, pszComment);
4456 if (RT_FAILURE(rc))
4457 {
4458 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4459 goto out;
4460 }
4461
4462 if (pfnProgress)
4463 pfnProgress(pvUser, uPercentStart + uPercentSpan * 50 / 100);
4464
4465 /* Now that all descriptor entries are complete, shrink it to the minimum
4466 * size. It will never be changed afterwards anyway. */
4467 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
4468 - pImage->Descriptor.aLines[0], 512));
4469 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0);
4470 if (RT_FAILURE(rc))
4471 {
4472 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
4473 goto out;
4474 }
4475
4476 if (pfnProgress)
4477 pfnProgress(pvUser, uPercentStart + uPercentSpan * 70 / 100);
4478
4479 rc = vmdkWriteDescriptor(pImage);
4480 if (RT_FAILURE(rc))
4481 {
4482 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
4483 goto out;
4484 }
4485
4486out:
4487 if (RT_SUCCESS(rc) && pfnProgress)
4488 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4489
4490 if (RT_FAILURE(rc))
4491 vmdksFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4492 return rc;
4493}
4494
4495/**
4496 * Internal: The actual code for creating any VMDK variant currently in
4497 * existence on hosted environments.
4498 */
4499static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4500 unsigned uImageFlags, const char *pszComment,
4501 PCVDGEOMETRY pPCHSGeometry,
4502 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4503 PFNVDPROGRESS pfnProgress, void *pvUser,
4504 unsigned uPercentStart, unsigned uPercentSpan)
4505{
4506 int rc;
4507
4508 pImage->uImageFlags = uImageFlags;
4509
4510 /* Try to get error interface. */
4511 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4512 if (pImage->pInterfaceError)
4513 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4514
4515 /* Get I/O interface. */
4516 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
4517 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
4518 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
4519 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
4520
4521 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4522 &pImage->Descriptor);
4523 if (RT_FAILURE(rc))
4524 {
4525 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4526 goto out;
4527 }
4528
4529 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4530 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4531 {
4532 /* Raw disk image (includes raw partition). */
4533 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4534 /* As the comment is misused, zap it so that no garbage comment
4535 * is set below. */
4536 pszComment = NULL;
4537 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4538 }
4539 else
4540 {
4541 /* Regular fixed or sparse image (monolithic or split). */
4542 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4543 pfnProgress, pvUser, uPercentStart,
4544 uPercentSpan * 95 / 100);
4545 }
4546
4547 if (RT_FAILURE(rc))
4548 goto out;
4549
4550 if (RT_SUCCESS(rc) && pfnProgress)
4551 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4552
4553 pImage->cbSize = cbSize;
4554
4555 for (unsigned i = 0; i < pImage->cExtents; i++)
4556 {
4557 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4558
4559 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4560 pExtent->cNominalSectors, pExtent->enmType,
4561 pExtent->pszBasename, pExtent->uSectorOffset);
4562 if (RT_FAILURE(rc))
4563 {
4564 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4565 goto out;
4566 }
4567 }
4568 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4569
4570 if ( pPCHSGeometry->cCylinders != 0
4571 && pPCHSGeometry->cHeads != 0
4572 && pPCHSGeometry->cSectors != 0)
4573 {
4574 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4575 if (RT_FAILURE(rc))
4576 goto out;
4577 }
4578 if ( pLCHSGeometry->cCylinders != 0
4579 && pLCHSGeometry->cHeads != 0
4580 && pLCHSGeometry->cSectors != 0)
4581 {
4582 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4583 if (RT_FAILURE(rc))
4584 goto out;
4585 }
4586
4587 pImage->LCHSGeometry = *pLCHSGeometry;
4588 pImage->PCHSGeometry = *pPCHSGeometry;
4589
4590 pImage->ImageUuid = *pUuid;
4591 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4592 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4593 if (RT_FAILURE(rc))
4594 {
4595 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4596 goto out;
4597 }
4598 RTUuidClear(&pImage->ParentUuid);
4599 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4600 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4601 if (RT_FAILURE(rc))
4602 {
4603 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4604 goto out;
4605 }
4606 RTUuidClear(&pImage->ModificationUuid);
4607 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4608 VMDK_DDB_MODIFICATION_UUID,
4609 &pImage->ModificationUuid);
4610 if (RT_FAILURE(rc))
4611 {
4612 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4613 goto out;
4614 }
4615 RTUuidClear(&pImage->ParentModificationUuid);
4616 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4617 VMDK_DDB_PARENT_MODIFICATION_UUID,
4618 &pImage->ParentModificationUuid);
4619 if (RT_FAILURE(rc))
4620 {
4621 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4622 goto out;
4623 }
4624
4625 rc = vmdkAllocateGrainTableCache(pImage);
4626 if (RT_FAILURE(rc))
4627 goto out;
4628
4629 rc = vmdkSetImageComment(pImage, pszComment);
4630 if (RT_FAILURE(rc))
4631 {
4632 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4633 goto out;
4634 }
4635
4636 if (RT_SUCCESS(rc) && pfnProgress)
4637 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4638
4639 rc = vmdkFlushImage(pImage);
4640
4641out:
4642 if (RT_SUCCESS(rc) && pfnProgress)
4643 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4644
4645 if (RT_FAILURE(rc))
4646 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4647 return rc;
4648}
4649
4650/**
4651 * Internal: Update image comment.
4652 */
4653static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4654{
4655 char *pszCommentEncoded;
4656 if (pszComment)
4657 {
4658 pszCommentEncoded = vmdkEncodeString(pszComment);
4659 if (!pszCommentEncoded)
4660 return VERR_NO_MEMORY;
4661 }
4662 else
4663 pszCommentEncoded = NULL;
4664 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4665 "ddb.comment", pszCommentEncoded);
4666 if (pszComment)
4667 RTStrFree(pszCommentEncoded);
4668 if (RT_FAILURE(rc))
4669 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4670 return VINF_SUCCESS;
4671}
4672
4673/**
4674 * Internal. Free all allocated space for representing an image, and optionally
4675 * delete the image from disk.
4676 */
4677static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4678{
4679 int rc = VINF_SUCCESS;
4680
4681 /* Freeing a never allocated image (e.g. because the open failed) is
4682 * not signalled as an error. After all nothing bad happens. */
4683 if (pImage)
4684 {
4685 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4686 {
4687 /* Mark all extents as clean. */
4688 for (unsigned i = 0; i < pImage->cExtents; i++)
4689 {
4690 if ( ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4691#ifdef VBOX_WITH_VMDK_ESX
4692 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4693#endif /* VBOX_WITH_VMDK_ESX */
4694 )
4695 && pImage->pExtents[i].fUncleanShutdown)
4696 {
4697 pImage->pExtents[i].fUncleanShutdown = false;
4698 pImage->pExtents[i].fMetaDirty = true;
4699 }
4700 }
4701 }
4702 vmdkFlushImage(pImage);
4703
4704 if (pImage->pExtents != NULL)
4705 {
4706 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4707 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4708 RTMemFree(pImage->pExtents);
4709 pImage->pExtents = NULL;
4710 }
4711 pImage->cExtents = 0;
4712 if (pImage->pFile != NULL)
4713 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4714 vmdkFileCheckAllClose(pImage);
4715
4716 if (pImage->pGTCache)
4717 {
4718 RTMemFree(pImage->pGTCache);
4719 pImage->pGTCache = NULL;
4720 }
4721 if (pImage->pDescData)
4722 {
4723 RTMemFree(pImage->pDescData);
4724 pImage->pDescData = NULL;
4725 }
4726 }
4727
4728 LogFlowFunc(("returns %Rrc\n", rc));
4729 return rc;
4730}
4731
4732/**
4733 * Internal. Flush image data (and metadata) to disk.
4734 */
4735static int vmdkFlushImage(PVMDKIMAGE pImage)
4736{
4737 PVMDKEXTENT pExtent;
4738 int rc = VINF_SUCCESS;
4739
4740 /* Update descriptor if changed. */
4741 if (pImage->Descriptor.fDirty)
4742 {
4743 rc = vmdkWriteDescriptor(pImage);
4744 if (RT_FAILURE(rc))
4745 goto out;
4746 }
4747
4748 for (unsigned i = 0; i < pImage->cExtents; i++)
4749 {
4750 pExtent = &pImage->pExtents[i];
4751 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4752 {
4753 switch (pExtent->enmType)
4754 {
4755 case VMDKETYPE_HOSTED_SPARSE:
4756 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0);
4757 if (RT_FAILURE(rc))
4758 goto out;
4759 if (pExtent->fFooter)
4760 {
4761 uint64_t cbSize;
4762 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
4763 if (RT_FAILURE(rc))
4764 goto out;
4765 cbSize = RT_ALIGN_64(cbSize, 512);
4766 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
4767 if (RT_FAILURE(rc))
4768 goto out;
4769 }
4770 break;
4771#ifdef VBOX_WITH_VMDK_ESX
4772 case VMDKETYPE_ESX_SPARSE:
4773 /** @todo update the header. */
4774 break;
4775#endif /* VBOX_WITH_VMDK_ESX */
4776 case VMDKETYPE_VMFS:
4777 case VMDKETYPE_FLAT:
4778 /* Nothing to do. */
4779 break;
4780 case VMDKETYPE_ZERO:
4781 default:
4782 AssertMsgFailed(("extent with type %d marked as dirty\n",
4783 pExtent->enmType));
4784 break;
4785 }
4786 }
4787 switch (pExtent->enmType)
4788 {
4789 case VMDKETYPE_HOSTED_SPARSE:
4790#ifdef VBOX_WITH_VMDK_ESX
4791 case VMDKETYPE_ESX_SPARSE:
4792#endif /* VBOX_WITH_VMDK_ESX */
4793 case VMDKETYPE_VMFS:
4794 case VMDKETYPE_FLAT:
4795 /** @todo implement proper path absolute check. */
4796 if ( pExtent->pFile != NULL
4797 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4798 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4799 rc = vmdkFileFlush(pImage, pExtent->pFile);
4800 break;
4801 case VMDKETYPE_ZERO:
4802 /* No need to do anything for this extent. */
4803 break;
4804 default:
4805 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4806 break;
4807 }
4808 }
4809
4810out:
4811 return rc;
4812}
4813
4814/**
4815 * Internal. Flush image data (and metadata) to disk - async version.
4816 */
4817static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4818{
4819 PVMDKEXTENT pExtent;
4820 int rc = VINF_SUCCESS;
4821
4822 /* Update descriptor if changed. */
4823 if (pImage->Descriptor.fDirty)
4824 {
4825 rc = vmdkWriteDescriptor(pImage);
4826 if (RT_FAILURE(rc))
4827 goto out;
4828 }
4829
4830 for (unsigned i = 0; i < pImage->cExtents; i++)
4831 {
4832 pExtent = &pImage->pExtents[i];
4833 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4834 {
4835 switch (pExtent->enmType)
4836 {
4837 case VMDKETYPE_HOSTED_SPARSE:
4838 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4839 break;
4840#ifdef VBOX_WITH_VMDK_ESX
4841 case VMDKETYPE_ESX_SPARSE:
4842 /** @todo update the header. */
4843 break;
4844#endif /* VBOX_WITH_VMDK_ESX */
4845 case VMDKETYPE_VMFS:
4846 case VMDKETYPE_FLAT:
4847 /* Nothing to do. */
4848 break;
4849 case VMDKETYPE_ZERO:
4850 default:
4851 AssertMsgFailed(("extent with type %d marked as dirty\n",
4852 pExtent->enmType));
4853 break;
4854 }
4855 }
4856 switch (pExtent->enmType)
4857 {
4858 case VMDKETYPE_HOSTED_SPARSE:
4859#ifdef VBOX_WITH_VMDK_ESX
4860 case VMDKETYPE_ESX_SPARSE:
4861#endif /* VBOX_WITH_VMDK_ESX */
4862 case VMDKETYPE_VMFS:
4863 case VMDKETYPE_FLAT:
4864 /** @todo implement proper path absolute check. */
4865 if ( pExtent->pFile != NULL
4866 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4867 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4868 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
4869 break;
4870 case VMDKETYPE_ZERO:
4871 /* No need to do anything for this extent. */
4872 break;
4873 default:
4874 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4875 break;
4876 }
4877 }
4878
4879out:
4880 return rc;
4881}
4882
4883/**
4884 * Internal. Find extent corresponding to the sector number in the disk.
4885 */
4886static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4887 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4888{
4889 PVMDKEXTENT pExtent = NULL;
4890 int rc = VINF_SUCCESS;
4891
4892 for (unsigned i = 0; i < pImage->cExtents; i++)
4893 {
4894 if (offSector < pImage->pExtents[i].cNominalSectors)
4895 {
4896 pExtent = &pImage->pExtents[i];
4897 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4898 break;
4899 }
4900 offSector -= pImage->pExtents[i].cNominalSectors;
4901 }
4902
4903 if (pExtent)
4904 *ppExtent = pExtent;
4905 else
4906 rc = VERR_IO_SECTOR_NOT_FOUND;
4907
4908 return rc;
4909}
4910
4911/**
4912 * Internal. Hash function for placing the grain table hash entries.
4913 */
4914static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4915 unsigned uExtent)
4916{
4917 /** @todo this hash function is quite simple, maybe use a better one which
4918 * scrambles the bits better. */
4919 return (uSector + uExtent) % pCache->cEntries;
4920}
4921
4922/**
4923 * Internal. Get sector number in the extent file from the relative sector
4924 * number in the extent.
4925 */
4926static int vmdkGetSector(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4927 uint64_t uSector, uint64_t *puExtentSector)
4928{
4929 PVMDKGTCACHE pCache = pImage->pGTCache;
4930 uint64_t uGDIndex, uGTSector, uGTBlock;
4931 uint32_t uGTHash, uGTBlockIndex;
4932 PVMDKGTCACHEENTRY pGTCacheEntry;
4933 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4934 int rc;
4935
4936 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4937 if (uGDIndex >= pExtent->cGDEntries)
4938 return VERR_OUT_OF_RANGE;
4939 uGTSector = pExtent->pGD[uGDIndex];
4940 if (!uGTSector)
4941 {
4942 /* There is no grain table referenced by this grain directory
4943 * entry. So there is absolutely no data in this area. */
4944 *puExtentSector = 0;
4945 return VINF_SUCCESS;
4946 }
4947
4948 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4949 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4950 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4951 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4952 || pGTCacheEntry->uGTBlock != uGTBlock)
4953 {
4954 /* Cache miss, fetch data from disk. */
4955 rc = vmdkFileReadSync(pImage, pExtent->pFile,
4956 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4957 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4958 if (RT_FAILURE(rc))
4959 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4960 pGTCacheEntry->uExtent = pExtent->uExtent;
4961 pGTCacheEntry->uGTBlock = uGTBlock;
4962 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4963 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4964 }
4965 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4966 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4967 if (uGrainSector)
4968 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4969 else
4970 *puExtentSector = 0;
4971 return VINF_SUCCESS;
4972}
4973
4974/**
4975 * Internal. Get sector number in the extent file from the relative sector
4976 * number in the extent - version for async access.
4977 */
4978static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4979 PVMDKEXTENT pExtent, uint64_t uSector,
4980 uint64_t *puExtentSector)
4981{
4982 PVMDKGTCACHE pCache = pImage->pGTCache;
4983 uint64_t uGDIndex, uGTSector, uGTBlock;
4984 uint32_t uGTHash, uGTBlockIndex;
4985 PVMDKGTCACHEENTRY pGTCacheEntry;
4986 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4987 int rc;
4988
4989 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4990 if (uGDIndex >= pExtent->cGDEntries)
4991 return VERR_OUT_OF_RANGE;
4992 uGTSector = pExtent->pGD[uGDIndex];
4993 if (!uGTSector)
4994 {
4995 /* There is no grain table referenced by this grain directory
4996 * entry. So there is absolutely no data in this area. */
4997 *puExtentSector = 0;
4998 return VINF_SUCCESS;
4999 }
5000
5001 LogFlowFunc(("uGTSector=%llu\n", uGTSector));
5002
5003 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5004 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5005 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5006 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5007 || pGTCacheEntry->uGTBlock != uGTBlock)
5008 {
5009 /* Cache miss, fetch data from disk. */
5010 PVDMETAXFER pMetaXfer;
5011 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
5012 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5013 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5014 if (RT_FAILURE(rc))
5015 return rc;
5016 /* We can release the metadata transfer immediately. */
5017 vmdkFileMetaXferRelease(pImage, pMetaXfer);
5018 pGTCacheEntry->uExtent = pExtent->uExtent;
5019 pGTCacheEntry->uGTBlock = uGTBlock;
5020 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5021 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5022 }
5023 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5024 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5025 if (uGrainSector)
5026 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5027 else
5028 *puExtentSector = 0;
5029 return VINF_SUCCESS;
5030}
5031
5032/**
5033 * Internal. Allocates a new grain table (if necessary), writes the grain
5034 * and updates the grain table. The cache is also updated by this operation.
5035 * This is separate from vmdkGetSector, because that should be as fast as
5036 * possible. Most code from vmdkGetSector also appears here.
5037 */
5038static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5039 uint64_t uSector, const void *pvBuf,
5040 uint64_t cbWrite)
5041{
5042 PVMDKGTCACHE pCache = pImage->pGTCache;
5043 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
5044 uint64_t cbExtentSize;
5045 uint32_t uGTHash, uGTBlockIndex;
5046 PVMDKGTCACHEENTRY pGTCacheEntry;
5047 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5048 int rc;
5049
5050 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5051 if (uGDIndex >= pExtent->cGDEntries)
5052 return VERR_OUT_OF_RANGE;
5053 uGTSector = pExtent->pGD[uGDIndex];
5054 if (pExtent->pRGD)
5055 uRGTSector = pExtent->pRGD[uGDIndex];
5056 else
5057 uRGTSector = 0; /**< avoid compiler warning */
5058 if (!uGTSector)
5059 {
5060 /* There is no grain table referenced by this grain directory
5061 * entry. So there is absolutely no data in this area. Allocate
5062 * a new grain table and put the reference to it in the GDs. */
5063 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5064 if (RT_FAILURE(rc))
5065 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5066 Assert(!(cbExtentSize % 512));
5067 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
5068 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5069 /* For writable streamOptimized extents the final sector is the
5070 * end-of-stream marker. Will be re-added after the grain table.
5071 * If the file has a footer it also will be re-added before EOS. */
5072 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5073 {
5074 uint64_t uEOSOff = 0;
5075 uGTSector--;
5076 if (pExtent->fFooter)
5077 {
5078 uGTSector--;
5079 uEOSOff = 512;
5080 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
5081 if (RT_FAILURE(rc))
5082 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
5083 }
5084 pExtent->uLastGrainSector = 0;
5085 uint8_t aEOS[512];
5086 memset(aEOS, '\0', sizeof(aEOS));
5087 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5088 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
5089 aEOS, sizeof(aEOS), NULL);
5090 if (RT_FAILURE(rc))
5091 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
5092 }
5093 /* Normally the grain table is preallocated for hosted sparse extents
5094 * that support more than 32 bit sector numbers. So this shouldn't
5095 * ever happen on a valid extent. */
5096 if (uGTSector > UINT32_MAX)
5097 return VERR_VD_VMDK_INVALID_HEADER;
5098 /* Write grain table by writing the required number of grain table
5099 * cache chunks. Avoids dynamic memory allocation, but is a bit
5100 * slower. But as this is a pretty infrequently occurring case it
5101 * should be acceptable. */
5102 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
5103 for (unsigned i = 0;
5104 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5105 i++)
5106 {
5107 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5108 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
5109 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5110 if (RT_FAILURE(rc))
5111 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5112 }
5113 if (pExtent->pRGD)
5114 {
5115 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5116 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5117 if (RT_FAILURE(rc))
5118 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5119 Assert(!(cbExtentSize % 512));
5120 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5121 /* For writable streamOptimized extents the final sector is the
5122 * end-of-stream marker. Will be re-added after the grain table.
5123 * If the file has a footer it also will be re-added before EOS. */
5124 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5125 {
5126 uint64_t uEOSOff = 0;
5127 uRGTSector--;
5128 if (pExtent->fFooter)
5129 {
5130 uRGTSector--;
5131 uEOSOff = 512;
5132 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
5133 if (RT_FAILURE(rc))
5134 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
5135 }
5136 pExtent->uLastGrainSector = 0;
5137 uint8_t aEOS[512];
5138 memset(aEOS, '\0', sizeof(aEOS));
5139 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5140 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
5141 aEOS, sizeof(aEOS), NULL);
5142 if (RT_FAILURE(rc))
5143 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
5144 }
5145 /* Normally the redundant grain table is preallocated for hosted
5146 * sparse extents that support more than 32 bit sector numbers. So
5147 * this shouldn't ever happen on a valid extent. */
5148 if (uRGTSector > UINT32_MAX)
5149 return VERR_VD_VMDK_INVALID_HEADER;
5150 /* Write backup grain table by writing the required number of grain
5151 * table cache chunks. Avoids dynamic memory allocation, but is a
5152 * bit slower. But as this is a pretty infrequently occurring case
5153 * it should be acceptable. */
5154 for (unsigned i = 0;
5155 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5156 i++)
5157 {
5158 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5159 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
5160 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5161 if (RT_FAILURE(rc))
5162 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5163 }
5164 }
5165
5166 /* Update the grain directory on disk (doing it before writing the
5167 * grain table will result in a garbled extent if the operation is
5168 * aborted for some reason. Otherwise the worst that can happen is
5169 * some unused sectors in the extent. */
5170 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5171 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5172 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5173 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
5174 if (RT_FAILURE(rc))
5175 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5176 if (pExtent->pRGD)
5177 {
5178 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5179 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5180 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
5181 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
5182 if (RT_FAILURE(rc))
5183 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5184 }
5185
5186 /* As the final step update the in-memory copy of the GDs. */
5187 pExtent->pGD[uGDIndex] = uGTSector;
5188 if (pExtent->pRGD)
5189 pExtent->pRGD[uGDIndex] = uRGTSector;
5190 }
5191
5192 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5193 if (RT_FAILURE(rc))
5194 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5195 Assert(!(cbExtentSize % 512));
5196
5197 /* Write the data. Always a full grain, or we're in big trouble. */
5198 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5199 {
5200 /* For streamOptimized extents this is a little more difficult, as the
5201 * cached data also needs to be updated, to handle updating the last
5202 * written block properly. Also we're trying to avoid unnecessary gaps.
5203 * Additionally the end-of-stream marker needs to be written. */
5204 if (!pExtent->uLastGrainSector)
5205 {
5206 cbExtentSize -= 512;
5207 if (pExtent->fFooter)
5208 cbExtentSize -= 512;
5209 }
5210 else
5211 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
5212 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5213 uint32_t cbGrain = 0;
5214 rc = vmdkFileDeflateSync(pImage, pExtent->pFile, cbExtentSize,
5215 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
5216 if (RT_FAILURE(rc))
5217 {
5218 pExtent->uGrainSector = 0;
5219 pExtent->uLastGrainSector = 0;
5220 AssertRC(rc);
5221 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
5222 }
5223 cbGrain = RT_ALIGN(cbGrain, 512);
5224 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
5225 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
5226 pExtent->cbLastGrainWritten = cbGrain;
5227 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
5228 pExtent->uGrainSector = uSector;
5229
5230 uint64_t uEOSOff = 0;
5231 if (pExtent->fFooter)
5232 {
5233 uEOSOff = 512;
5234 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
5235 if (RT_FAILURE(rc))
5236 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
5237 }
5238 uint8_t aEOS[512];
5239 memset(aEOS, '\0', sizeof(aEOS));
5240 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5241 cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
5242 aEOS, sizeof(aEOS), NULL);
5243 if (RT_FAILURE(rc))
5244 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
5245 }
5246 else
5247 {
5248 rc = vmdkFileWriteSync(pImage, pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
5249 if (RT_FAILURE(rc))
5250 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5251 }
5252
5253 /* Update the grain table (and the cache). */
5254 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5255 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5256 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5257 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5258 || pGTCacheEntry->uGTBlock != uGTBlock)
5259 {
5260 /* Cache miss, fetch data from disk. */
5261 rc = vmdkFileReadSync(pImage, pExtent->pFile,
5262 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5263 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5264 if (RT_FAILURE(rc))
5265 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5266 pGTCacheEntry->uExtent = pExtent->uExtent;
5267 pGTCacheEntry->uGTBlock = uGTBlock;
5268 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5269 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5270 }
5271 else
5272 {
5273 /* Cache hit. Convert grain table block back to disk format, otherwise
5274 * the code below will write garbage for all but the updated entry. */
5275 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5276 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5277 }
5278 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5279 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
5280 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
5281 /* Update grain table on disk. */
5282 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5283 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5284 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5285 if (RT_FAILURE(rc))
5286 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5287 if (pExtent->pRGD)
5288 {
5289 /* Update backup grain table on disk. */
5290 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5291 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5292 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5293 if (RT_FAILURE(rc))
5294 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5295 }
5296#ifdef VBOX_WITH_VMDK_ESX
5297 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
5298 {
5299 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
5300 pExtent->fMetaDirty = true;
5301 }
5302#endif /* VBOX_WITH_VMDK_ESX */
5303 return rc;
5304}
5305
5306/**
5307 * Internal: Updates the grain table during a async grain allocation.
5308 */
5309static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5310 PVDIOCTX pIoCtx,
5311 PVMDKGRAINALLOCASYNC pGrainAlloc)
5312{
5313 int rc = VINF_SUCCESS;
5314 PVMDKGTCACHE pCache = pImage->pGTCache;
5315 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5316 uint32_t uGTHash, uGTBlockIndex;
5317 uint64_t uGTSector, uRGTSector, uGTBlock;
5318 uint64_t uSector = pGrainAlloc->uSector;
5319 PVMDKGTCACHEENTRY pGTCacheEntry;
5320
5321 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5322 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5323
5324 uGTSector = pGrainAlloc->uGTSector;
5325 uRGTSector = pGrainAlloc->uRGTSector;
5326 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5327
5328 /* Update the grain table (and the cache). */
5329 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5330 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5331 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5332 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5333 || pGTCacheEntry->uGTBlock != uGTBlock)
5334 {
5335 /* Cache miss, fetch data from disk. */
5336 LogFlow(("Cache miss, fetch data from disk\n"));
5337 PVDMETAXFER pMetaXfer = NULL;
5338 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
5339 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5340 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5341 &pMetaXfer, vmdkAllocGrainAsyncComplete, pGrainAlloc);
5342 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5343 {
5344 pGrainAlloc->cIoXfersPending++;
5345 pGrainAlloc->fGTUpdateNeeded = true;
5346 /* Leave early, we will be called again after the read completed. */
5347 LogFlowFunc(("Metadata read in progress, leaving\n"));
5348 return rc;
5349 }
5350 else if (RT_FAILURE(rc))
5351 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5352 vmdkFileMetaXferRelease(pImage, pMetaXfer);
5353 pGTCacheEntry->uExtent = pExtent->uExtent;
5354 pGTCacheEntry->uGTBlock = uGTBlock;
5355 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5356 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5357 }
5358 else
5359 {
5360 /* Cache hit. Convert grain table block back to disk format, otherwise
5361 * the code below will write garbage for all but the updated entry. */
5362 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5363 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5364 }
5365 pGrainAlloc->fGTUpdateNeeded = false;
5366 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5367 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize));
5368 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize);
5369 /* Update grain table on disk. */
5370 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5371 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5372 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5373 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5374 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5375 pGrainAlloc->cIoXfersPending++;
5376 else if (RT_FAILURE(rc))
5377 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5378 if (pExtent->pRGD)
5379 {
5380 /* Update backup grain table on disk. */
5381 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5382 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5383 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5384 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5385 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5386 pGrainAlloc->cIoXfersPending++;
5387 else if (RT_FAILURE(rc))
5388 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5389 }
5390#ifdef VBOX_WITH_VMDK_ESX
5391 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
5392 {
5393 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
5394 pExtent->fMetaDirty = true;
5395 }
5396#endif /* VBOX_WITH_VMDK_ESX */
5397
5398 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5399
5400 return rc;
5401}
5402
5403/**
5404 * Internal - complete the grain allocation by updating disk grain table if required.
5405 */
5406static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5407{
5408 int rc = VINF_SUCCESS;
5409 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5410 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5411 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
5412
5413 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5414 pBackendData, pIoCtx, pvUser, rcReq));
5415
5416 pGrainAlloc->cIoXfersPending--;
5417 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5418 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent,
5419 pIoCtx, pGrainAlloc);
5420
5421 if (!pGrainAlloc->cIoXfersPending)
5422 {
5423 /* Grain allocation completed. */
5424 RTMemFree(pGrainAlloc);
5425 }
5426
5427 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5428 return rc;
5429}
5430
5431/**
5432 * Internal. Allocates a new grain table (if necessary) - async version.
5433 */
5434static int vmdkAllocGrainAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5435 PVDIOCTX pIoCtx, uint64_t uSector,
5436 uint64_t cbWrite)
5437{
5438 PVMDKGTCACHE pCache = pImage->pGTCache;
5439 uint64_t uGDIndex, uGTSector, uRGTSector;
5440 uint64_t cbExtentSize;
5441 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5442 int rc;
5443
5444 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5445 pCache, pExtent, pIoCtx, uSector, cbWrite));
5446
5447 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
5448
5449 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5450 if (!pGrainAlloc)
5451 return VERR_NO_MEMORY;
5452
5453 pGrainAlloc->pExtent = pExtent;
5454 pGrainAlloc->uSector = uSector;
5455
5456 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5457 if (uGDIndex >= pExtent->cGDEntries)
5458 return VERR_OUT_OF_RANGE;
5459 uGTSector = pExtent->pGD[uGDIndex];
5460 if (pExtent->pRGD)
5461 uRGTSector = pExtent->pRGD[uGDIndex];
5462 else
5463 uRGTSector = 0; /**< avoid compiler warning */
5464 if (!uGTSector)
5465 {
5466 LogFlow(("Allocating new grain table\n"));
5467
5468 /* There is no grain table referenced by this grain directory
5469 * entry. So there is absolutely no data in this area. Allocate
5470 * a new grain table and put the reference to it in the GDs. */
5471 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5472 if (RT_FAILURE(rc))
5473 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5474 Assert(!(cbExtentSize % 512));
5475
5476 pGrainAlloc->cbExtentOld = cbExtentSize;
5477
5478 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
5479 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5480
5481 /* Normally the grain table is preallocated for hosted sparse extents
5482 * that support more than 32 bit sector numbers. So this shouldn't
5483 * ever happen on a valid extent. */
5484 if (uGTSector > UINT32_MAX)
5485 return VERR_VD_VMDK_INVALID_HEADER;
5486
5487 /* Write grain table by writing the required number of grain table
5488 * cache chunks. Allocate memory dynamically here or we flood the
5489 * metadata cache with very small entries.
5490 */
5491 size_t cbGTDataTmp = (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE) * VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5492 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5493
5494 if (!paGTDataTmp)
5495 return VERR_NO_MEMORY;
5496
5497 memset(paGTDataTmp, '\0', cbGTDataTmp);
5498 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5499 VMDK_SECTOR2BYTE(uGTSector),
5500 paGTDataTmp, cbGTDataTmp, pIoCtx,
5501 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5502 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5503 pGrainAlloc->cIoXfersPending++;
5504 else if (RT_FAILURE(rc))
5505 {
5506 RTMemTmpFree(paGTDataTmp);
5507 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5508 }
5509
5510 if (pExtent->pRGD)
5511 {
5512 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5513 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5514 if (RT_FAILURE(rc))
5515 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5516 Assert(!(cbExtentSize % 512));
5517 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5518
5519 /* Normally the redundant grain table is preallocated for hosted
5520 * sparse extents that support more than 32 bit sector numbers. So
5521 * this shouldn't ever happen on a valid extent. */
5522 if (uRGTSector > UINT32_MAX)
5523 {
5524 RTMemTmpFree(paGTDataTmp);
5525 return VERR_VD_VMDK_INVALID_HEADER;
5526 }
5527 /* Write backup grain table by writing the required number of grain
5528 * table cache chunks. */
5529 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5530 VMDK_SECTOR2BYTE(uRGTSector),
5531 paGTDataTmp, cbGTDataTmp, pIoCtx,
5532 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5533 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5534 pGrainAlloc->cIoXfersPending++;
5535 else if (RT_FAILURE(rc))
5536 {
5537 RTMemTmpFree(paGTDataTmp);
5538 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5539 }
5540 }
5541
5542 RTMemTmpFree(paGTDataTmp);
5543
5544 /* Update the grain directory on disk (doing it before writing the
5545 * grain table will result in a garbled extent if the operation is
5546 * aborted for some reason. Otherwise the worst that can happen is
5547 * some unused sectors in the extent. */
5548 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5549 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5550 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5551 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5552 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5553 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5554 pGrainAlloc->cIoXfersPending++;
5555 else if (RT_FAILURE(rc))
5556 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5557 if (pExtent->pRGD)
5558 {
5559 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5560 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5561 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5562 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5563 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5564 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5565 pGrainAlloc->cIoXfersPending++;
5566 else if (RT_FAILURE(rc))
5567 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5568 }
5569
5570 /* As the final step update the in-memory copy of the GDs. */
5571 pExtent->pGD[uGDIndex] = uGTSector;
5572 if (pExtent->pRGD)
5573 pExtent->pRGD[uGDIndex] = uRGTSector;
5574 }
5575
5576 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5577 pGrainAlloc->uGTSector = uGTSector;
5578 pGrainAlloc->uRGTSector = uRGTSector;
5579
5580 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5581 if (RT_FAILURE(rc))
5582 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5583 Assert(!(cbExtentSize % 512));
5584
5585 if (!pGrainAlloc->cbExtentOld)
5586 pGrainAlloc->cbExtentOld = cbExtentSize;
5587
5588 pGrainAlloc->cbExtentSize = cbExtentSize;
5589
5590 /* Write the data. Always a full grain, or we're in big trouble. */
5591 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
5592 cbExtentSize, pIoCtx, cbWrite,
5593 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5594 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5595 pGrainAlloc->cIoXfersPending++;
5596 else if (RT_FAILURE(rc))
5597 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5598
5599 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
5600
5601 if (!pGrainAlloc->cIoXfersPending)
5602 {
5603 /* Grain allocation completed. */
5604 RTMemFree(pGrainAlloc);
5605 }
5606
5607 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5608
5609 return rc;
5610}
5611
5612/**
5613 * Replaces a fragment of a string with the specified string.
5614 *
5615 * @returns Pointer to the allocated UTF-8 string.
5616 * @param pszWhere UTF-8 string to search in.
5617 * @param pszWhat UTF-8 string to search for.
5618 * @param pszByWhat UTF-8 string to replace the found string with.
5619 */
5620static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5621 const char *pszByWhat)
5622{
5623 AssertPtr(pszWhere);
5624 AssertPtr(pszWhat);
5625 AssertPtr(pszByWhat);
5626 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5627 if (!pszFoundStr)
5628 return NULL;
5629 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5630 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5631 if (pszNewStr)
5632 {
5633 char *pszTmp = pszNewStr;
5634 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5635 pszTmp += pszFoundStr - pszWhere;
5636 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5637 pszTmp += strlen(pszByWhat);
5638 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5639 }
5640 return pszNewStr;
5641}
5642
5643
5644/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5645static int vmdksCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5646 PVDINTERFACE pVDIfsImage)
5647{
5648 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
5649 int rc = VINF_SUCCESS;
5650
5651 if ( !VALID_PTR(pszFilename)
5652 || !*pszFilename
5653 || strchr(pszFilename, '"'))
5654 {
5655 rc = VERR_INVALID_PARAMETER;
5656 goto out;
5657 }
5658
5659 /* Always return failure, to avoid opening other VMDK files via this
5660 * special VMDK streamOptimized format backend. */
5661 rc = VERR_NOT_SUPPORTED;
5662
5663out:
5664 LogFlowFunc(("returns %Rrc\n", rc));
5665 return rc;
5666}
5667
5668
5669/** @copydoc VBOXHDDBACKEND::pfnOpen */
5670static int vmdksOpen(const char *pszFilename, unsigned uOpenFlags,
5671 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5672 void **ppBackendData)
5673{
5674 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5675 int rc;
5676
5677 rc = VERR_NOT_SUPPORTED;
5678 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5679 return rc;
5680}
5681
5682/** @copydoc VBOXHDDBACKEND::pfnCreate */
5683static int vmdksCreate(const char *pszFilename, uint64_t cbSize,
5684 unsigned uImageFlags, const char *pszComment,
5685 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5686 PCRTUUID pUuid, unsigned uOpenFlags,
5687 unsigned uPercentStart, unsigned uPercentSpan,
5688 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5689 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
5690{
5691 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5692 int rc;
5693 PVMDKIMAGE pImage;
5694
5695 PFNVDPROGRESS pfnProgress = NULL;
5696 void *pvUser = NULL;
5697 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
5698 VDINTERFACETYPE_PROGRESS);
5699 PVDINTERFACEPROGRESS pCbProgress = NULL;
5700 if (pIfProgress)
5701 {
5702 pCbProgress = VDGetInterfaceProgress(pIfProgress);
5703 pfnProgress = pCbProgress->pfnProgress;
5704 pvUser = pIfProgress->pvUser;
5705 }
5706
5707 /* Check open flags. No flags are supported. */
5708 if (uOpenFlags != VD_OPEN_FLAGS_NORMAL)
5709 {
5710 rc = VERR_INVALID_PARAMETER;
5711 goto out;
5712 }
5713
5714 /* Check image flags. No flags are supported. */
5715 if (uImageFlags != VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5716 {
5717 rc = VERR_INVALID_PARAMETER;
5718 goto out;
5719 }
5720
5721 /* Check size. Maximum 2TB-64K. */
5722 if ( !cbSize
5723 || cbSize >= _1T * 2 - _64K)
5724 {
5725 rc = VERR_VD_INVALID_SIZE;
5726 goto out;
5727 }
5728
5729 /* Check remaining arguments. */
5730 if ( !VALID_PTR(pszFilename)
5731 || !*pszFilename
5732 || strchr(pszFilename, '"')
5733 || !VALID_PTR(pPCHSGeometry)
5734 || !VALID_PTR(pLCHSGeometry))
5735 {
5736 rc = VERR_INVALID_PARAMETER;
5737 goto out;
5738 }
5739
5740 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5741 if (!pImage)
5742 {
5743 rc = VERR_NO_MEMORY;
5744 goto out;
5745 }
5746 pImage->pszFilename = pszFilename;
5747 pImage->pFile = NULL;
5748 pImage->pExtents = NULL;
5749 pImage->pFiles = NULL;
5750 pImage->pGTCache = NULL;
5751 pImage->pDescData = NULL;
5752 pImage->pVDIfsDisk = pVDIfsDisk;
5753 pImage->pVDIfsImage = pVDIfsImage;
5754 /* Descriptors for stream optimized images are small, so don't waste
5755 * space in the resulting image and allocate a small buffer. */
5756 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(4);
5757 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5758 if (!pImage->pDescData)
5759 {
5760 rc = VERR_NO_MEMORY;
5761 goto out;
5762 }
5763
5764 rc = vmdksCreateImage(pImage, cbSize, uImageFlags, pszComment,
5765 pPCHSGeometry, pLCHSGeometry, pUuid,
5766 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5767 if (RT_SUCCESS(rc))
5768 {
5769 /* Image is always writable. */
5770 *ppBackendData = pImage;
5771 }
5772 else
5773 {
5774 RTMemFree(pImage->pDescData);
5775 RTMemFree(pImage);
5776 }
5777
5778out:
5779 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5780 return rc;
5781}
5782
5783/** @copydoc VBOXHDDBACKEND::pfnClose */
5784static int vmdksClose(void *pBackendData, bool fDelete)
5785{
5786 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5787 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5788 int rc;
5789
5790 rc = vmdksFreeImage(pImage, fDelete);
5791 RTMemFree(pImage);
5792
5793 LogFlowFunc(("returns %Rrc\n", rc));
5794 return rc;
5795}
5796
5797/** @copydoc VBOXHDDBACKEND::pfnRead */
5798static int vmdksRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5799 size_t cbToRead, size_t *pcbActuallyRead)
5800{
5801 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5802 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5803 int rc;
5804
5805 rc = VERR_NOT_SUPPORTED;
5806 LogFlowFunc(("returns %Rrc\n", rc));
5807 return rc;
5808}
5809
5810/** @copydoc VBOXHDDBACKEND::pfnWrite */
5811static int vmdksWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5812 size_t cbToWrite, size_t *pcbWriteProcess,
5813 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5814{
5815 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5816 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5817 PVMDKEXTENT pExtent;
5818 uint64_t uSector;
5819 uint32_t uGrain;
5820 uint32_t uGDEntry, uLastGDEntry;
5821 uint32_t cbGrain = 0;
5822 uint32_t uCacheLine, uCacheEntry;
5823 const void *pData = pvBuf;
5824 int rc;
5825
5826 AssertPtr(pImage);
5827 Assert(uOffset % 512 == 0);
5828 Assert(cbToWrite % 512 == 0);
5829
5830 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5831 {
5832 rc = VERR_VD_IMAGE_READ_ONLY;
5833 goto out;
5834 }
5835
5836 pExtent = &pImage->pExtents[0];
5837 uSector = VMDK_BYTE2SECTOR(uOffset);
5838
5839 /* Very strict requirements: always write at least one full grain, with
5840 * proper alignment. Everything else would require reading of already
5841 * written data, which we don't support for obvious reasons. The only
5842 * exception is the last grain, and only if the image size specifies
5843 * that only some portion holds data. In any case the write must be
5844 * within the image limits, no "overshoot" allowed. */
5845 if ( cbToWrite == 0
5846 || ( cbToWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5847 && pImage->cbSize - uOffset >= VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5848 || uOffset % VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5849 || uOffset + cbToWrite > pImage->cbSize)
5850 {
5851 rc = VERR_INVALID_PARAMETER;
5852 goto out;
5853 }
5854
5855 /* Clip write range to at most the rest of the grain. */
5856 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5857
5858 /* Do not allow to go back. */
5859 uGrain = VMDK_BYTE2SECTOR(uOffset) / pExtent->cSectorsPerGrain;
5860 uCacheLine = uGrain / VMDK_GT_CACHELINE_SIZE % VMDK_GT_CACHE_SIZE;
5861 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5862 uGDEntry = uGrain / pExtent->cGTEntries;
5863 uLastGDEntry = pExtent->uLastGrainWritten / pExtent->cGTEntries;
5864 if (uGrain < pExtent->uLastGrainWritten)
5865 {
5866 rc = VERR_VD_VMDK_INVALID_WRITE;
5867 goto out;
5868 }
5869
5870 if (uGDEntry != uLastGDEntry)
5871 {
5872 rc = vmdksFlushGT(pImage, pExtent, uLastGDEntry);
5873 if (RT_FAILURE(rc))
5874 goto out;
5875 vmdksClearGT(pImage, pExtent);
5876 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5877 {
5878 rc = vmdksFlushGT(pImage, pExtent, i);
5879 if (RT_FAILURE(rc))
5880 goto out;
5881 }
5882 }
5883
5884 /* Check access permissions as defined in the extent descriptor.
5885 * May sound a bit paradoxical, but we created the image with a
5886 * readonly extent since the resulting image is kind of "write once". */
5887 if (pExtent->enmAccess != VMDKACCESS_READONLY)
5888 {
5889 rc = VERR_VD_VMDK_INVALID_STATE;
5890 goto out;
5891 }
5892
5893 uint64_t uFileOffset;
5894 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
5895 if (RT_FAILURE(rc))
5896 goto out;
5897 /* Align to sector, as the previous write could have been any size. */
5898 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5899
5900 /* Paranoia check: extent type, grain table buffer presence and
5901 * grain table buffer space. Also grain table entry must be clear. */
5902 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5903 || !pImage->pGTCache
5904 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5905 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5906 {
5907 rc = VERR_INTERNAL_ERROR;
5908 goto out;
5909 }
5910
5911 /* Update grain table entry. */
5912 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5913
5914 if (cbToWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5915 {
5916 memcpy(pExtent->pvGrain, pvBuf, cbToWrite);
5917 memset((char *)pExtent->pvGrain + cbToWrite, '\0',
5918 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite);
5919 pData = pExtent->pvGrain;
5920 }
5921 rc = vmdkFileDeflateSync(pImage, pExtent->pFile, uFileOffset,
5922 pData, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5923 VMDK_MARKER_IGNORE, uSector, &cbGrain);
5924 if (RT_FAILURE(rc))
5925 {
5926 pExtent->uGrainSector = 0;
5927 pExtent->uLastGrainSector = 0;
5928 AssertRC(rc);
5929 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5930 }
5931 cbGrain = RT_ALIGN(cbGrain, 512);
5932 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(uFileOffset);
5933 pExtent->uLastGrainWritten = uGrain;
5934 pExtent->cbLastGrainWritten = cbGrain;
5935
5936 if (pcbWriteProcess)
5937 *pcbWriteProcess = cbToWrite;
5938
5939out:
5940 LogFlowFunc(("returns %Rrc\n", rc));
5941 return rc;
5942}
5943
5944/** @copydoc VBOXHDDBACKEND::pfnFlush */
5945static int vmdksFlush(void *pBackendData)
5946{
5947 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5948 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5949 int rc;
5950
5951 AssertPtr(pImage);
5952
5953 /* Pure dummy operation, closing takes care of everything. */
5954 rc = VINF_SUCCESS;
5955 LogFlowFunc(("returns %Rrc\n", rc));
5956 return rc;
5957}
5958
5959/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5960static int vmdksSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
5961{
5962 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5963 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5964 int rc;
5965
5966 AssertPtr(pImage);
5967
5968 if (pImage)
5969 {
5970 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5971 rc = VERR_VD_IMAGE_READ_ONLY;
5972 else
5973 rc = VERR_NOT_SUPPORTED;
5974 }
5975 else
5976 rc = VERR_VD_NOT_OPENED;
5977
5978 LogFlowFunc(("returns %Rrc\n", rc));
5979 return rc;
5980}
5981
5982/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5983static int vmdksSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
5984{
5985 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5986 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5987 int rc;
5988
5989 AssertPtr(pImage);
5990
5991 if (pImage)
5992 {
5993 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5994 rc = VERR_VD_IMAGE_READ_ONLY;
5995 else
5996 rc = VERR_NOT_SUPPORTED;
5997 }
5998 else
5999 rc = VERR_VD_NOT_OPENED;
6000
6001 LogFlowFunc(("returns %Rrc\n", rc));
6002 return rc;
6003}
6004
6005/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6006static int vmdksSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6007{
6008 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
6009 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6010 int rc = VINF_SUCCESS;
6011
6012 /* Image must be opened and the new flags must be the same as before. */
6013 if (!pImage || pImage->uOpenFlags != uOpenFlags)
6014 {
6015 rc = VERR_INVALID_PARAMETER;
6016 goto out;
6017 }
6018
6019out:
6020 LogFlowFunc(("returns %Rrc\n", rc));
6021 return rc;
6022}
6023
6024/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6025static int vmdksSetComment(void *pBackendData, const char *pszComment)
6026{
6027 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6028 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6029 int rc;
6030
6031 AssertPtr(pImage);
6032
6033 if (pImage)
6034 {
6035 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6036 rc = VERR_VD_IMAGE_READ_ONLY;
6037 else
6038 rc = VERR_NOT_SUPPORTED;
6039 }
6040 else
6041 rc = VERR_VD_NOT_OPENED;
6042
6043 LogFlowFunc(("returns %Rrc\n", rc));
6044 return rc;
6045}
6046
6047/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6048static int vmdksSetUuid(void *pBackendData, PCRTUUID pUuid)
6049{
6050 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6051 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6052 int rc;
6053
6054 LogFlowFunc(("%RTuuid\n", pUuid));
6055 AssertPtr(pImage);
6056
6057 if (pImage)
6058 {
6059 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6060 rc = VERR_VD_IMAGE_READ_ONLY;
6061 else
6062 rc = VERR_NOT_SUPPORTED;
6063 }
6064 else
6065 rc = VERR_VD_NOT_OPENED;
6066
6067 LogFlowFunc(("returns %Rrc\n", rc));
6068 return rc;
6069}
6070
6071/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6072static int vmdksSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6073{
6074 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6075 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6076 int rc;
6077
6078 AssertPtr(pImage);
6079
6080 if (pImage)
6081 {
6082 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6083 rc = VERR_VD_IMAGE_READ_ONLY;
6084 else
6085 rc = VERR_NOT_SUPPORTED;
6086 }
6087 else
6088 rc = VERR_VD_NOT_OPENED;
6089
6090 LogFlowFunc(("returns %Rrc\n", rc));
6091 return rc;
6092}
6093
6094/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6095static int vmdksSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6096{
6097 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6098 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6099 int rc;
6100
6101 AssertPtr(pImage);
6102
6103 if (pImage)
6104 {
6105 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6106 rc = VERR_VD_IMAGE_READ_ONLY;
6107 else
6108 rc = VERR_NOT_SUPPORTED;
6109 }
6110 else
6111 rc = VERR_VD_NOT_OPENED;
6112
6113 LogFlowFunc(("returns %Rrc\n", rc));
6114 return rc;
6115}
6116
6117/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6118static int vmdksSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6119{
6120 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6121 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6122 int rc;
6123
6124 AssertPtr(pImage);
6125
6126 if (pImage)
6127 {
6128 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6129 rc = VERR_VD_IMAGE_READ_ONLY;
6130 else
6131 rc = VERR_NOT_SUPPORTED;
6132 }
6133 else
6134 rc = VERR_VD_NOT_OPENED;
6135
6136 LogFlowFunc(("returns %Rrc\n", rc));
6137 return rc;
6138}
6139
6140
6141/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
6142static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6143 PVDINTERFACE pVDIfsImage)
6144{
6145 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6146 int rc = VINF_SUCCESS;
6147 PVMDKIMAGE pImage;
6148
6149 if ( !pszFilename
6150 || !*pszFilename
6151 || strchr(pszFilename, '"'))
6152 {
6153 rc = VERR_INVALID_PARAMETER;
6154 goto out;
6155 }
6156
6157 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6158 if (!pImage)
6159 {
6160 rc = VERR_NO_MEMORY;
6161 goto out;
6162 }
6163 pImage->pszFilename = pszFilename;
6164 pImage->pFile = NULL;
6165 pImage->pExtents = NULL;
6166 pImage->pFiles = NULL;
6167 pImage->pGTCache = NULL;
6168 pImage->pDescData = NULL;
6169 pImage->pVDIfsDisk = pVDIfsDisk;
6170 pImage->pVDIfsImage = pVDIfsImage;
6171 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6172 * much as possible in vmdkOpenImage. */
6173 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6174 vmdkFreeImage(pImage, false);
6175 RTMemFree(pImage);
6176
6177out:
6178 LogFlowFunc(("returns %Rrc\n", rc));
6179 return rc;
6180}
6181
6182/** @copydoc VBOXHDDBACKEND::pfnOpen */
6183static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6184 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6185 void **ppBackendData)
6186{
6187 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
6188 int rc;
6189 PVMDKIMAGE pImage;
6190
6191 /* Check open flags. All valid flags are supported. */
6192 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
6193 {
6194 rc = VERR_INVALID_PARAMETER;
6195 goto out;
6196 }
6197
6198 /* Check remaining arguments. */
6199 if ( !VALID_PTR(pszFilename)
6200 || !*pszFilename
6201 || strchr(pszFilename, '"'))
6202 {
6203 rc = VERR_INVALID_PARAMETER;
6204 goto out;
6205 }
6206
6207
6208 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6209 if (!pImage)
6210 {
6211 rc = VERR_NO_MEMORY;
6212 goto out;
6213 }
6214 pImage->pszFilename = pszFilename;
6215 pImage->pFile = NULL;
6216 pImage->pExtents = NULL;
6217 pImage->pFiles = NULL;
6218 pImage->pGTCache = NULL;
6219 pImage->pDescData = NULL;
6220 pImage->pVDIfsDisk = pVDIfsDisk;
6221 pImage->pVDIfsImage = pVDIfsImage;
6222
6223 rc = vmdkOpenImage(pImage, uOpenFlags);
6224 if (RT_SUCCESS(rc))
6225 *ppBackendData = pImage;
6226
6227out:
6228 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6229 return rc;
6230}
6231
6232/** @copydoc VBOXHDDBACKEND::pfnCreate */
6233static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
6234 unsigned uImageFlags, const char *pszComment,
6235 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6236 PCRTUUID pUuid, unsigned uOpenFlags,
6237 unsigned uPercentStart, unsigned uPercentSpan,
6238 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6239 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
6240{
6241 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
6242 int rc;
6243 PVMDKIMAGE pImage;
6244
6245 PFNVDPROGRESS pfnProgress = NULL;
6246 void *pvUser = NULL;
6247 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
6248 VDINTERFACETYPE_PROGRESS);
6249 PVDINTERFACEPROGRESS pCbProgress = NULL;
6250 if (pIfProgress)
6251 {
6252 pCbProgress = VDGetInterfaceProgress(pIfProgress);
6253 pfnProgress = pCbProgress->pfnProgress;
6254 pvUser = pIfProgress->pvUser;
6255 }
6256
6257 /* Check open flags. All valid flags are supported. */
6258 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
6259 {
6260 rc = VERR_INVALID_PARAMETER;
6261 goto out;
6262 }
6263
6264 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
6265 if ( !cbSize
6266 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
6267 {
6268 rc = VERR_VD_INVALID_SIZE;
6269 goto out;
6270 }
6271
6272 /* Check remaining arguments. */
6273 if ( !VALID_PTR(pszFilename)
6274 || !*pszFilename
6275 || strchr(pszFilename, '"')
6276 || !VALID_PTR(pPCHSGeometry)
6277 || !VALID_PTR(pLCHSGeometry)
6278#ifndef VBOX_WITH_VMDK_ESX
6279 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6280 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
6281#endif
6282 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6283 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
6284 {
6285 rc = VERR_INVALID_PARAMETER;
6286 goto out;
6287 }
6288
6289 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6290 if (!pImage)
6291 {
6292 rc = VERR_NO_MEMORY;
6293 goto out;
6294 }
6295 pImage->pszFilename = pszFilename;
6296 pImage->pFile = NULL;
6297 pImage->pExtents = NULL;
6298 pImage->pFiles = NULL;
6299 pImage->pGTCache = NULL;
6300 pImage->pDescData = NULL;
6301 pImage->pVDIfsDisk = pVDIfsDisk;
6302 pImage->pVDIfsImage = pVDIfsImage;
6303 /* Descriptors for split images can be pretty large, especially if the
6304 * filename is long. So prepare for the worst, and allocate quite some
6305 * memory for the descriptor in this case. */
6306 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6307 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6308 else
6309 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6310 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6311 if (!pImage->pDescData)
6312 {
6313 rc = VERR_NO_MEMORY;
6314 goto out;
6315 }
6316
6317 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6318 pPCHSGeometry, pLCHSGeometry, pUuid,
6319 pfnProgress, pvUser, uPercentStart, uPercentSpan);
6320 if (RT_SUCCESS(rc))
6321 {
6322 /* So far the image is opened in read/write mode. Make sure the
6323 * image is opened in read-only mode if the caller requested that. */
6324 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6325 {
6326 vmdkFreeImage(pImage, false);
6327 rc = vmdkOpenImage(pImage, uOpenFlags);
6328 if (RT_FAILURE(rc))
6329 goto out;
6330 }
6331 *ppBackendData = pImage;
6332 }
6333 else
6334 {
6335 RTMemFree(pImage->pDescData);
6336 RTMemFree(pImage);
6337 }
6338
6339out:
6340 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6341 return rc;
6342}
6343
6344/** @copydoc VBOXHDDBACKEND::pfnRename */
6345static int vmdkRename(void *pBackendData, const char *pszFilename)
6346{
6347 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6348
6349 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6350 int rc = VINF_SUCCESS;
6351 char **apszOldName = NULL;
6352 char **apszNewName = NULL;
6353 char **apszNewLines = NULL;
6354 char *pszOldDescName = NULL;
6355 bool fImageFreed = false;
6356 bool fEmbeddedDesc = false;
6357 unsigned cExtents = pImage->cExtents;
6358 char *pszNewBaseName = NULL;
6359 char *pszOldBaseName = NULL;
6360 char *pszNewFullName = NULL;
6361 char *pszOldFullName = NULL;
6362 const char *pszOldImageName;
6363 unsigned i, line;
6364 VMDKDESCRIPTOR DescriptorCopy;
6365 VMDKEXTENT ExtentCopy;
6366
6367 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
6368
6369 /* Check arguments. */
6370 if ( !pImage
6371 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6372 || !VALID_PTR(pszFilename)
6373 || !*pszFilename)
6374 {
6375 rc = VERR_INVALID_PARAMETER;
6376 goto out;
6377 }
6378
6379 /*
6380 * Allocate an array to store both old and new names of renamed files
6381 * in case we have to roll back the changes. Arrays are initialized
6382 * with zeros. We actually save stuff when and if we change it.
6383 */
6384 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
6385 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
6386 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
6387 if (!apszOldName || !apszNewName || !apszNewLines)
6388 {
6389 rc = VERR_NO_MEMORY;
6390 goto out;
6391 }
6392
6393 /* Save the descriptor size and position. */
6394 if (pImage->pDescData)
6395 {
6396 /* Separate descriptor file. */
6397 fEmbeddedDesc = false;
6398 }
6399 else
6400 {
6401 /* Embedded descriptor file. */
6402 ExtentCopy = pImage->pExtents[0];
6403 fEmbeddedDesc = true;
6404 }
6405 /* Save the descriptor content. */
6406 DescriptorCopy.cLines = pImage->Descriptor.cLines;
6407 for (i = 0; i < DescriptorCopy.cLines; i++)
6408 {
6409 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6410 if (!DescriptorCopy.aLines[i])
6411 {
6412 rc = VERR_NO_MEMORY;
6413 goto out;
6414 }
6415 }
6416
6417 /* Prepare both old and new base names used for string replacement. */
6418 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6419 RTPathStripExt(pszNewBaseName);
6420 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6421 RTPathStripExt(pszOldBaseName);
6422 /* Prepare both old and new full names used for string replacement. */
6423 pszNewFullName = RTStrDup(pszFilename);
6424 RTPathStripExt(pszNewFullName);
6425 pszOldFullName = RTStrDup(pImage->pszFilename);
6426 RTPathStripExt(pszOldFullName);
6427
6428 /* --- Up to this point we have not done any damage yet. --- */
6429
6430 /* Save the old name for easy access to the old descriptor file. */
6431 pszOldDescName = RTStrDup(pImage->pszFilename);
6432 /* Save old image name. */
6433 pszOldImageName = pImage->pszFilename;
6434
6435 /* Update the descriptor with modified extent names. */
6436 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6437 i < cExtents;
6438 i++, line = pImage->Descriptor.aNextLines[line])
6439 {
6440 /* Assume that vmdkStrReplace will fail. */
6441 rc = VERR_NO_MEMORY;
6442 /* Update the descriptor. */
6443 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6444 pszOldBaseName, pszNewBaseName);
6445 if (!apszNewLines[i])
6446 goto rollback;
6447 pImage->Descriptor.aLines[line] = apszNewLines[i];
6448 }
6449 /* Make sure the descriptor gets written back. */
6450 pImage->Descriptor.fDirty = true;
6451 /* Flush the descriptor now, in case it is embedded. */
6452 vmdkFlushImage(pImage);
6453
6454 /* Close and rename/move extents. */
6455 for (i = 0; i < cExtents; i++)
6456 {
6457 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6458 /* Compose new name for the extent. */
6459 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6460 pszOldFullName, pszNewFullName);
6461 if (!apszNewName[i])
6462 goto rollback;
6463 /* Close the extent file. */
6464 vmdkFileClose(pImage, &pExtent->pFile, false);
6465 /* Rename the extent file. */
6466 rc = vmdkFileMove(pImage, pExtent->pszFullname, apszNewName[i], 0);
6467 if (RT_FAILURE(rc))
6468 goto rollback;
6469 /* Remember the old name. */
6470 apszOldName[i] = RTStrDup(pExtent->pszFullname);
6471 }
6472 /* Release all old stuff. */
6473 vmdkFreeImage(pImage, false);
6474
6475 fImageFreed = true;
6476
6477 /* Last elements of new/old name arrays are intended for
6478 * storing descriptor's names.
6479 */
6480 apszNewName[cExtents] = RTStrDup(pszFilename);
6481 /* Rename the descriptor file if it's separate. */
6482 if (!fEmbeddedDesc)
6483 {
6484 rc = vmdkFileMove(pImage, pImage->pszFilename, apszNewName[cExtents], 0);
6485 if (RT_FAILURE(rc))
6486 goto rollback;
6487 /* Save old name only if we may need to change it back. */
6488 apszOldName[cExtents] = RTStrDup(pszFilename);
6489 }
6490
6491 /* Update pImage with the new information. */
6492 pImage->pszFilename = pszFilename;
6493
6494 /* Open the new image. */
6495 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6496 if (RT_SUCCESS(rc))
6497 goto out;
6498
6499rollback:
6500 /* Roll back all changes in case of failure. */
6501 if (RT_FAILURE(rc))
6502 {
6503 int rrc;
6504 if (!fImageFreed)
6505 {
6506 /*
6507 * Some extents may have been closed, close the rest. We will
6508 * re-open the whole thing later.
6509 */
6510 vmdkFreeImage(pImage, false);
6511 }
6512 /* Rename files back. */
6513 for (i = 0; i <= cExtents; i++)
6514 {
6515 if (apszOldName[i])
6516 {
6517 rrc = vmdkFileMove(pImage, apszNewName[i], apszOldName[i], 0);
6518 AssertRC(rrc);
6519 }
6520 }
6521 /* Restore the old descriptor. */
6522 PVMDKFILE pFile;
6523 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
6524 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6525 false /* fCreate */),
6526 false /* fAsyncIO */);
6527 AssertRC(rrc);
6528 if (fEmbeddedDesc)
6529 {
6530 ExtentCopy.pFile = pFile;
6531 pImage->pExtents = &ExtentCopy;
6532 }
6533 else
6534 {
6535 /* Shouldn't be null for separate descriptor.
6536 * There will be no access to the actual content.
6537 */
6538 pImage->pDescData = pszOldDescName;
6539 pImage->pFile = pFile;
6540 }
6541 pImage->Descriptor = DescriptorCopy;
6542 vmdkWriteDescriptor(pImage);
6543 vmdkFileClose(pImage, &pFile, false);
6544 /* Get rid of the stuff we implanted. */
6545 pImage->pExtents = NULL;
6546 pImage->pFile = NULL;
6547 pImage->pDescData = NULL;
6548 /* Re-open the image back. */
6549 pImage->pszFilename = pszOldImageName;
6550 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6551 AssertRC(rrc);
6552 }
6553
6554out:
6555 for (i = 0; i < DescriptorCopy.cLines; i++)
6556 if (DescriptorCopy.aLines[i])
6557 RTStrFree(DescriptorCopy.aLines[i]);
6558 if (apszOldName)
6559 {
6560 for (i = 0; i <= cExtents; i++)
6561 if (apszOldName[i])
6562 RTStrFree(apszOldName[i]);
6563 RTMemTmpFree(apszOldName);
6564 }
6565 if (apszNewName)
6566 {
6567 for (i = 0; i <= cExtents; i++)
6568 if (apszNewName[i])
6569 RTStrFree(apszNewName[i]);
6570 RTMemTmpFree(apszNewName);
6571 }
6572 if (apszNewLines)
6573 {
6574 for (i = 0; i < cExtents; i++)
6575 if (apszNewLines[i])
6576 RTStrFree(apszNewLines[i]);
6577 RTMemTmpFree(apszNewLines);
6578 }
6579 if (pszOldDescName)
6580 RTStrFree(pszOldDescName);
6581 if (pszOldBaseName)
6582 RTStrFree(pszOldBaseName);
6583 if (pszNewBaseName)
6584 RTStrFree(pszNewBaseName);
6585 if (pszOldFullName)
6586 RTStrFree(pszOldFullName);
6587 if (pszNewFullName)
6588 RTStrFree(pszNewFullName);
6589 LogFlowFunc(("returns %Rrc\n", rc));
6590 return rc;
6591}
6592
6593/** @copydoc VBOXHDDBACKEND::pfnClose */
6594static int vmdkClose(void *pBackendData, bool fDelete)
6595{
6596 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6597 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6598 int rc;
6599
6600 rc = vmdkFreeImage(pImage, fDelete);
6601 RTMemFree(pImage);
6602
6603 LogFlowFunc(("returns %Rrc\n", rc));
6604 return rc;
6605}
6606
6607/** @copydoc VBOXHDDBACKEND::pfnRead */
6608static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
6609 size_t cbToRead, size_t *pcbActuallyRead)
6610{
6611 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
6612 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6613 PVMDKEXTENT pExtent;
6614 uint64_t uSectorExtentRel;
6615 uint64_t uSectorExtentAbs;
6616 int rc;
6617
6618 AssertPtr(pImage);
6619 Assert(uOffset % 512 == 0);
6620 Assert(cbToRead % 512 == 0);
6621
6622 if ( uOffset + cbToRead > pImage->cbSize
6623 || cbToRead == 0)
6624 {
6625 rc = VERR_INVALID_PARAMETER;
6626 goto out;
6627 }
6628
6629 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6630 &pExtent, &uSectorExtentRel);
6631 if (RT_FAILURE(rc))
6632 goto out;
6633
6634 /* Check access permissions as defined in the extent descriptor. */
6635 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6636 {
6637 rc = VERR_VD_VMDK_INVALID_STATE;
6638 goto out;
6639 }
6640
6641
6642 /* Clip read range to remain in this extent. */
6643 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6644
6645 /* Handle the read according to the current extent type. */
6646 switch (pExtent->enmType)
6647 {
6648 case VMDKETYPE_HOSTED_SPARSE:
6649#ifdef VBOX_WITH_VMDK_ESX
6650 case VMDKETYPE_ESX_SPARSE:
6651#endif /* VBOX_WITH_VMDK_ESX */
6652 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6653 &uSectorExtentAbs);
6654 if (RT_FAILURE(rc))
6655 goto out;
6656 /* Clip read range to at most the rest of the grain. */
6657 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6658 Assert(!(cbToRead % 512));
6659 if (uSectorExtentAbs == 0)
6660 rc = VERR_VD_BLOCK_FREE;
6661 else
6662 {
6663 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6664 {
6665 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6666 uSectorExtentAbs -= uSectorInGrain;
6667 uint64_t uLBA;
6668 if (pExtent->uGrainSector != uSectorExtentAbs)
6669 {
6670 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
6671 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
6672 if (RT_FAILURE(rc))
6673 {
6674 pExtent->uGrainSector = 0;
6675 AssertRC(rc);
6676 goto out;
6677 }
6678 pExtent->uGrainSector = uSectorExtentAbs;
6679 Assert(uLBA == uSectorExtentRel);
6680 }
6681 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
6682 }
6683 else
6684 {
6685 rc = vmdkFileReadSync(pImage, pExtent->pFile,
6686 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6687 pvBuf, cbToRead, NULL);
6688 }
6689 }
6690 break;
6691 case VMDKETYPE_VMFS:
6692 case VMDKETYPE_FLAT:
6693 rc = vmdkFileReadSync(pImage, pExtent->pFile,
6694 VMDK_SECTOR2BYTE(uSectorExtentRel),
6695 pvBuf, cbToRead, NULL);
6696 break;
6697 case VMDKETYPE_ZERO:
6698 memset(pvBuf, '\0', cbToRead);
6699 break;
6700 }
6701 if (pcbActuallyRead)
6702 *pcbActuallyRead = cbToRead;
6703
6704out:
6705 LogFlowFunc(("returns %Rrc\n", rc));
6706 return rc;
6707}
6708
6709/** @copydoc VBOXHDDBACKEND::pfnWrite */
6710static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
6711 size_t cbToWrite, size_t *pcbWriteProcess,
6712 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
6713{
6714 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6715 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6716 PVMDKEXTENT pExtent;
6717 uint64_t uSectorExtentRel;
6718 uint64_t uSectorExtentAbs;
6719 int rc;
6720
6721 AssertPtr(pImage);
6722 Assert(uOffset % 512 == 0);
6723 Assert(cbToWrite % 512 == 0);
6724
6725 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6726 {
6727 rc = VERR_VD_IMAGE_READ_ONLY;
6728 goto out;
6729 }
6730
6731 if (cbToWrite == 0)
6732 {
6733 rc = VERR_INVALID_PARAMETER;
6734 goto out;
6735 }
6736
6737 /* No size check here, will do that later when the extent is located.
6738 * There are sparse images out there which according to the spec are
6739 * invalid, because the total size is not a multiple of the grain size.
6740 * Also for sparse images which are stitched together in odd ways (not at
6741 * grain boundaries, and with the nominal size not being a multiple of the
6742 * grain size), this would prevent writing to the last grain. */
6743
6744 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6745 &pExtent, &uSectorExtentRel);
6746 if (RT_FAILURE(rc))
6747 goto out;
6748
6749 /* Check access permissions as defined in the extent descriptor. */
6750 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6751 {
6752 rc = VERR_VD_VMDK_INVALID_STATE;
6753 goto out;
6754 }
6755
6756 /* Handle the write according to the current extent type. */
6757 switch (pExtent->enmType)
6758 {
6759 case VMDKETYPE_HOSTED_SPARSE:
6760#ifdef VBOX_WITH_VMDK_ESX
6761 case VMDKETYPE_ESX_SPARSE:
6762#endif /* VBOX_WITH_VMDK_ESX */
6763 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6764 &uSectorExtentAbs);
6765 if (RT_FAILURE(rc))
6766 goto out;
6767 /* Clip write range to at most the rest of the grain. */
6768 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6769 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6770 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
6771 {
6772 rc = VERR_VD_VMDK_INVALID_WRITE;
6773 goto out;
6774 }
6775 if (uSectorExtentAbs == 0)
6776 {
6777 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6778 {
6779 /* Full block write to a previously unallocated block.
6780 * Check if the caller wants to avoid the automatic alloc. */
6781 if (!(fWrite & VD_WRITE_NO_ALLOC))
6782 {
6783 /* Allocate GT and find out where to store the grain. */
6784 rc = vmdkAllocGrain(pImage, pExtent, uSectorExtentRel,
6785 pvBuf, cbToWrite);
6786 }
6787 else
6788 rc = VERR_VD_BLOCK_FREE;
6789 *pcbPreRead = 0;
6790 *pcbPostRead = 0;
6791 }
6792 else
6793 {
6794 /* Clip write range to remain in this extent. */
6795 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6796 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6797 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
6798 rc = VERR_VD_BLOCK_FREE;
6799 }
6800 }
6801 else
6802 {
6803 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6804 {
6805 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6806 uSectorExtentAbs -= uSectorInGrain;
6807 uint64_t uLBA = uSectorExtentRel;
6808 if ( pExtent->uGrainSector != uSectorExtentAbs
6809 || pExtent->uGrainSector != pExtent->uLastGrainSector)
6810 {
6811 rc = vmdkFileInflateSync(pImage, pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
6812 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
6813 if (RT_FAILURE(rc))
6814 {
6815 pExtent->uGrainSector = 0;
6816 pExtent->uLastGrainSector = 0;
6817 AssertRC(rc);
6818 goto out;
6819 }
6820 pExtent->uGrainSector = uSectorExtentAbs;
6821 pExtent->uLastGrainSector = uSectorExtentAbs;
6822 Assert(uLBA == uSectorExtentRel);
6823 }
6824 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
6825 uint32_t cbGrain = 0;
6826 rc = vmdkFileDeflateSync(pImage, pExtent->pFile,
6827 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6828 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6829 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
6830 if (RT_FAILURE(rc))
6831 {
6832 pExtent->uGrainSector = 0;
6833 pExtent->uLastGrainSector = 0;
6834 AssertRC(rc);
6835 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6836 }
6837 cbGrain = RT_ALIGN(cbGrain, 512);
6838 pExtent->uLastGrainSector = uSectorExtentAbs;
6839 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
6840 pExtent->cbLastGrainWritten = cbGrain;
6841
6842 uint64_t uEOSOff = 0;
6843 if (pExtent->fFooter)
6844 {
6845 uEOSOff = 512;
6846 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
6847 if (RT_FAILURE(rc))
6848 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
6849 }
6850 uint8_t aEOS[512];
6851 memset(aEOS, '\0', sizeof(aEOS));
6852 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6853 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
6854 aEOS, sizeof(aEOS), NULL);
6855 if (RT_FAILURE(rc))
6856 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
6857 }
6858 else
6859 {
6860 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6861 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6862 pvBuf, cbToWrite, NULL);
6863 }
6864 }
6865 break;
6866 case VMDKETYPE_VMFS:
6867 case VMDKETYPE_FLAT:
6868 /* Clip write range to remain in this extent. */
6869 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6870 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6871 VMDK_SECTOR2BYTE(uSectorExtentRel),
6872 pvBuf, cbToWrite, NULL);
6873 break;
6874 case VMDKETYPE_ZERO:
6875 /* Clip write range to remain in this extent. */
6876 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6877 break;
6878 }
6879
6880 if (pcbWriteProcess)
6881 *pcbWriteProcess = cbToWrite;
6882
6883out:
6884 LogFlowFunc(("returns %Rrc\n", rc));
6885 return rc;
6886}
6887
6888/** @copydoc VBOXHDDBACKEND::pfnFlush */
6889static int vmdkFlush(void *pBackendData)
6890{
6891 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6892 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6893 int rc;
6894
6895 AssertPtr(pImage);
6896
6897 rc = vmdkFlushImage(pImage);
6898 LogFlowFunc(("returns %Rrc\n", rc));
6899 return rc;
6900}
6901
6902/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
6903static unsigned vmdkGetVersion(void *pBackendData)
6904{
6905 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6906 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6907
6908 AssertPtr(pImage);
6909
6910 if (pImage)
6911 return VMDK_IMAGE_VERSION;
6912 else
6913 return 0;
6914}
6915
6916/** @copydoc VBOXHDDBACKEND::pfnGetSize */
6917static uint64_t vmdkGetSize(void *pBackendData)
6918{
6919 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6920 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6921
6922 AssertPtr(pImage);
6923
6924 if (pImage)
6925 return pImage->cbSize;
6926 else
6927 return 0;
6928}
6929
6930/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
6931static uint64_t vmdkGetFileSize(void *pBackendData)
6932{
6933 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6934 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6935 uint64_t cb = 0;
6936
6937 AssertPtr(pImage);
6938
6939 if (pImage)
6940 {
6941 uint64_t cbFile;
6942 if (pImage->pFile != NULL)
6943 {
6944 int rc = vmdkFileGetSize(pImage, pImage->pFile, &cbFile);
6945 if (RT_SUCCESS(rc))
6946 cb += cbFile;
6947 }
6948 for (unsigned i = 0; i < pImage->cExtents; i++)
6949 {
6950 if (pImage->pExtents[i].pFile != NULL)
6951 {
6952 int rc = vmdkFileGetSize(pImage, pImage->pExtents[i].pFile, &cbFile);
6953 if (RT_SUCCESS(rc))
6954 cb += cbFile;
6955 }
6956 }
6957 }
6958
6959 LogFlowFunc(("returns %lld\n", cb));
6960 return cb;
6961}
6962
6963/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
6964static int vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
6965{
6966 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
6967 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6968 int rc;
6969
6970 AssertPtr(pImage);
6971
6972 if (pImage)
6973 {
6974 if (pImage->PCHSGeometry.cCylinders)
6975 {
6976 *pPCHSGeometry = pImage->PCHSGeometry;
6977 rc = VINF_SUCCESS;
6978 }
6979 else
6980 rc = VERR_VD_GEOMETRY_NOT_SET;
6981 }
6982 else
6983 rc = VERR_VD_NOT_OPENED;
6984
6985 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6986 return rc;
6987}
6988
6989/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
6990static int vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6991{
6992 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6993 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6994 int rc;
6995
6996 AssertPtr(pImage);
6997
6998 if (pImage)
6999 {
7000 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7001 {
7002 rc = VERR_VD_IMAGE_READ_ONLY;
7003 goto out;
7004 }
7005 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7006 if (RT_FAILURE(rc))
7007 goto out;
7008
7009 pImage->PCHSGeometry = *pPCHSGeometry;
7010 rc = VINF_SUCCESS;
7011 }
7012 else
7013 rc = VERR_VD_NOT_OPENED;
7014
7015out:
7016 LogFlowFunc(("returns %Rrc\n", rc));
7017 return rc;
7018}
7019
7020/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
7021static int vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7022{
7023 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7024 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7025 int rc;
7026
7027 AssertPtr(pImage);
7028
7029 if (pImage)
7030 {
7031 if (pImage->LCHSGeometry.cCylinders)
7032 {
7033 *pLCHSGeometry = pImage->LCHSGeometry;
7034 rc = VINF_SUCCESS;
7035 }
7036 else
7037 rc = VERR_VD_GEOMETRY_NOT_SET;
7038 }
7039 else
7040 rc = VERR_VD_NOT_OPENED;
7041
7042 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7043 return rc;
7044}
7045
7046/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
7047static int vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7048{
7049 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7050 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7051 int rc;
7052
7053 AssertPtr(pImage);
7054
7055 if (pImage)
7056 {
7057 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7058 {
7059 rc = VERR_VD_IMAGE_READ_ONLY;
7060 goto out;
7061 }
7062 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7063 if (RT_FAILURE(rc))
7064 goto out;
7065
7066 pImage->LCHSGeometry = *pLCHSGeometry;
7067 rc = VINF_SUCCESS;
7068 }
7069 else
7070 rc = VERR_VD_NOT_OPENED;
7071
7072out:
7073 LogFlowFunc(("returns %Rrc\n", rc));
7074 return rc;
7075}
7076
7077/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
7078static unsigned vmdkGetImageFlags(void *pBackendData)
7079{
7080 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7081 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7082 unsigned uImageFlags;
7083
7084 AssertPtr(pImage);
7085
7086 if (pImage)
7087 uImageFlags = pImage->uImageFlags;
7088 else
7089 uImageFlags = 0;
7090
7091 LogFlowFunc(("returns %#x\n", uImageFlags));
7092 return uImageFlags;
7093}
7094
7095/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
7096static unsigned vmdkGetOpenFlags(void *pBackendData)
7097{
7098 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7099 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7100 unsigned uOpenFlags;
7101
7102 AssertPtr(pImage);
7103
7104 if (pImage)
7105 uOpenFlags = pImage->uOpenFlags;
7106 else
7107 uOpenFlags = 0;
7108
7109 LogFlowFunc(("returns %#x\n", uOpenFlags));
7110 return uOpenFlags;
7111}
7112
7113/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
7114static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7115{
7116 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
7117 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7118 int rc;
7119
7120 /* Image must be opened and the new flags must be valid. Just readonly and
7121 * info flags are supported. */
7122 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE)))
7123 {
7124 rc = VERR_INVALID_PARAMETER;
7125 goto out;
7126 }
7127
7128 /* Implement this operation via reopening the image. */
7129 vmdkFreeImage(pImage, false);
7130 rc = vmdkOpenImage(pImage, uOpenFlags);
7131
7132out:
7133 LogFlowFunc(("returns %Rrc\n", rc));
7134 return rc;
7135}
7136
7137/** @copydoc VBOXHDDBACKEND::pfnGetComment */
7138static int vmdkGetComment(void *pBackendData, char *pszComment,
7139 size_t cbComment)
7140{
7141 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7142 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7143 int rc;
7144
7145 AssertPtr(pImage);
7146
7147 if (pImage)
7148 {
7149 const char *pszCommentEncoded = NULL;
7150 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7151 "ddb.comment", &pszCommentEncoded);
7152 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7153 pszCommentEncoded = NULL;
7154 else if (RT_FAILURE(rc))
7155 goto out;
7156
7157 if (pszComment && pszCommentEncoded)
7158 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7159 else
7160 {
7161 if (pszComment)
7162 *pszComment = '\0';
7163 rc = VINF_SUCCESS;
7164 }
7165 if (pszCommentEncoded)
7166 RTStrFree((char *)(void *)pszCommentEncoded);
7167 }
7168 else
7169 rc = VERR_VD_NOT_OPENED;
7170
7171out:
7172 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7173 return rc;
7174}
7175
7176/** @copydoc VBOXHDDBACKEND::pfnSetComment */
7177static int vmdkSetComment(void *pBackendData, const char *pszComment)
7178{
7179 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7180 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7181 int rc;
7182
7183 AssertPtr(pImage);
7184
7185 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7186 {
7187 rc = VERR_VD_IMAGE_READ_ONLY;
7188 goto out;
7189 }
7190
7191 if (pImage)
7192 rc = vmdkSetImageComment(pImage, pszComment);
7193 else
7194 rc = VERR_VD_NOT_OPENED;
7195
7196out:
7197 LogFlowFunc(("returns %Rrc\n", rc));
7198 return rc;
7199}
7200
7201/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
7202static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7203{
7204 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7205 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7206 int rc;
7207
7208 AssertPtr(pImage);
7209
7210 if (pImage)
7211 {
7212 *pUuid = pImage->ImageUuid;
7213 rc = VINF_SUCCESS;
7214 }
7215 else
7216 rc = VERR_VD_NOT_OPENED;
7217
7218 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7219 return rc;
7220}
7221
7222/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
7223static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7224{
7225 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7226 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7227 int rc;
7228
7229 LogFlowFunc(("%RTuuid\n", pUuid));
7230 AssertPtr(pImage);
7231
7232 if (pImage)
7233 {
7234 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7235 {
7236 pImage->ImageUuid = *pUuid;
7237 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7238 VMDK_DDB_IMAGE_UUID, pUuid);
7239 if (RT_FAILURE(rc))
7240 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7241 rc = VINF_SUCCESS;
7242 }
7243 else
7244 rc = VERR_VD_IMAGE_READ_ONLY;
7245 }
7246 else
7247 rc = VERR_VD_NOT_OPENED;
7248
7249 LogFlowFunc(("returns %Rrc\n", rc));
7250 return rc;
7251}
7252
7253/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
7254static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7255{
7256 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7257 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7258 int rc;
7259
7260 AssertPtr(pImage);
7261
7262 if (pImage)
7263 {
7264 *pUuid = pImage->ModificationUuid;
7265 rc = VINF_SUCCESS;
7266 }
7267 else
7268 rc = VERR_VD_NOT_OPENED;
7269
7270 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7271 return rc;
7272}
7273
7274/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
7275static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7276{
7277 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7278 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7279 int rc;
7280
7281 AssertPtr(pImage);
7282
7283 if (pImage)
7284 {
7285 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7286 {
7287 /*
7288 * Only change the modification uuid if it changed.
7289 * Avoids a lot of unneccessary 1-byte writes during
7290 * vmdkFlush.
7291 */
7292 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7293 {
7294 pImage->ModificationUuid = *pUuid;
7295 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7296 VMDK_DDB_MODIFICATION_UUID, pUuid);
7297 if (RT_FAILURE(rc))
7298 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7299 }
7300 rc = VINF_SUCCESS;
7301 }
7302 else
7303 rc = VERR_VD_IMAGE_READ_ONLY;
7304 }
7305 else
7306 rc = VERR_VD_NOT_OPENED;
7307
7308 LogFlowFunc(("returns %Rrc\n", rc));
7309 return rc;
7310}
7311
7312/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
7313static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7314{
7315 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7316 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7317 int rc;
7318
7319 AssertPtr(pImage);
7320
7321 if (pImage)
7322 {
7323 *pUuid = pImage->ParentUuid;
7324 rc = VINF_SUCCESS;
7325 }
7326 else
7327 rc = VERR_VD_NOT_OPENED;
7328
7329 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7330 return rc;
7331}
7332
7333/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
7334static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7335{
7336 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7337 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7338 int rc;
7339
7340 AssertPtr(pImage);
7341
7342 if (pImage)
7343 {
7344 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7345 {
7346 pImage->ParentUuid = *pUuid;
7347 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7348 VMDK_DDB_PARENT_UUID, pUuid);
7349 if (RT_FAILURE(rc))
7350 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7351 rc = VINF_SUCCESS;
7352 }
7353 else
7354 rc = VERR_VD_IMAGE_READ_ONLY;
7355 }
7356 else
7357 rc = VERR_VD_NOT_OPENED;
7358
7359 LogFlowFunc(("returns %Rrc\n", rc));
7360 return rc;
7361}
7362
7363/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
7364static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7365{
7366 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7367 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7368 int rc;
7369
7370 AssertPtr(pImage);
7371
7372 if (pImage)
7373 {
7374 *pUuid = pImage->ParentModificationUuid;
7375 rc = VINF_SUCCESS;
7376 }
7377 else
7378 rc = VERR_VD_NOT_OPENED;
7379
7380 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7381 return rc;
7382}
7383
7384/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
7385static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7386{
7387 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7388 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7389 int rc;
7390
7391 AssertPtr(pImage);
7392
7393 if (pImage)
7394 {
7395 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7396 {
7397 pImage->ParentModificationUuid = *pUuid;
7398 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7399 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7400 if (RT_FAILURE(rc))
7401 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7402 rc = VINF_SUCCESS;
7403 }
7404 else
7405 rc = VERR_VD_IMAGE_READ_ONLY;
7406 }
7407 else
7408 rc = VERR_VD_NOT_OPENED;
7409
7410 LogFlowFunc(("returns %Rrc\n", rc));
7411 return rc;
7412}
7413
7414/** @copydoc VBOXHDDBACKEND::pfnDump */
7415static void vmdkDump(void *pBackendData)
7416{
7417 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7418
7419 AssertPtr(pImage);
7420 if (pImage)
7421 {
7422 vmdkMessage(pImage, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7423 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7424 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7425 VMDK_BYTE2SECTOR(pImage->cbSize));
7426 vmdkMessage(pImage, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7427 vmdkMessage(pImage, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7428 vmdkMessage(pImage, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7429 vmdkMessage(pImage, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7430 }
7431}
7432
7433/** @copydoc VBOXHDDBACKEND::pfnIsAsyncIOSupported */
7434static bool vmdkIsAsyncIOSupported(void *pBackendData)
7435{
7436 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7437
7438 /* We do not support async I/O for stream optimized VMDK images. */
7439 return (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) == 0;
7440}
7441
7442/** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
7443static int vmdkAsyncRead(void *pBackendData, uint64_t uOffset, size_t cbRead,
7444 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7445{
7446 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7447 pBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
7448 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7449 PVMDKEXTENT pExtent;
7450 uint64_t uSectorExtentRel;
7451 uint64_t uSectorExtentAbs;
7452 int rc;
7453
7454 AssertPtr(pImage);
7455 Assert(uOffset % 512 == 0);
7456 Assert(cbRead % 512 == 0);
7457
7458 if ( uOffset + cbRead > pImage->cbSize
7459 || cbRead == 0)
7460 {
7461 rc = VERR_INVALID_PARAMETER;
7462 goto out;
7463 }
7464
7465 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7466 &pExtent, &uSectorExtentRel);
7467 if (RT_FAILURE(rc))
7468 goto out;
7469
7470 /* Check access permissions as defined in the extent descriptor. */
7471 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
7472 {
7473 rc = VERR_VD_VMDK_INVALID_STATE;
7474 goto out;
7475 }
7476
7477 /* Clip read range to remain in this extent. */
7478 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7479
7480 /* Handle the read according to the current extent type. */
7481 switch (pExtent->enmType)
7482 {
7483 case VMDKETYPE_HOSTED_SPARSE:
7484#ifdef VBOX_WITH_VMDK_ESX
7485 case VMDKETYPE_ESX_SPARSE:
7486#endif /* VBOX_WITH_VMDK_ESX */
7487 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent,
7488 uSectorExtentRel, &uSectorExtentAbs);
7489 if (RT_FAILURE(rc))
7490 goto out;
7491 /* Clip read range to at most the rest of the grain. */
7492 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7493 Assert(!(cbRead % 512));
7494 if (uSectorExtentAbs == 0)
7495 rc = VERR_VD_BLOCK_FREE;
7496 else
7497 {
7498 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
7499 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
7500 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7501 pIoCtx, cbRead);
7502 }
7503 break;
7504 case VMDKETYPE_VMFS:
7505 case VMDKETYPE_FLAT:
7506 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
7507 VMDK_SECTOR2BYTE(uSectorExtentRel),
7508 pIoCtx, cbRead);
7509 break;
7510 case VMDKETYPE_ZERO:
7511 size_t cbSet;
7512
7513 cbSet = vmdkFileIoCtxSet(pImage, pIoCtx, 0, cbRead);
7514 Assert(cbSet == cbRead);
7515
7516 rc = VINF_SUCCESS;
7517 break;
7518 }
7519 if (pcbActuallyRead)
7520 *pcbActuallyRead = cbRead;
7521
7522out:
7523 LogFlowFunc(("returns %Rrc\n", rc));
7524 return rc;
7525}
7526
7527/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
7528static int vmdkAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbWrite,
7529 PVDIOCTX pIoCtx,
7530 size_t *pcbWriteProcess, size_t *pcbPreRead,
7531 size_t *pcbPostRead, unsigned fWrite)
7532{
7533 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7534 pBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7535 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7536 PVMDKEXTENT pExtent;
7537 uint64_t uSectorExtentRel;
7538 uint64_t uSectorExtentAbs;
7539 int rc;
7540
7541 AssertPtr(pImage);
7542 Assert(uOffset % 512 == 0);
7543 Assert(cbWrite % 512 == 0);
7544
7545 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7546 {
7547 rc = VERR_VD_IMAGE_READ_ONLY;
7548 goto out;
7549 }
7550
7551 if (cbWrite == 0)
7552 {
7553 rc = VERR_INVALID_PARAMETER;
7554 goto out;
7555 }
7556
7557 /* No size check here, will do that later when the extent is located.
7558 * There are sparse images out there which according to the spec are
7559 * invalid, because the total size is not a multiple of the grain size.
7560 * Also for sparse images which are stitched together in odd ways (not at
7561 * grain boundaries, and with the nominal size not being a multiple of the
7562 * grain size), this would prevent writing to the last grain. */
7563
7564 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7565 &pExtent, &uSectorExtentRel);
7566 if (RT_FAILURE(rc))
7567 goto out;
7568
7569 /* Check access permissions as defined in the extent descriptor. */
7570 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
7571 {
7572 rc = VERR_VD_VMDK_INVALID_STATE;
7573 goto out;
7574 }
7575
7576 /* Handle the write according to the current extent type. */
7577 switch (pExtent->enmType)
7578 {
7579 case VMDKETYPE_HOSTED_SPARSE:
7580#ifdef VBOX_WITH_VMDK_ESX
7581 case VMDKETYPE_ESX_SPARSE:
7582#endif /* VBOX_WITH_VMDK_ESX */
7583 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent, uSectorExtentRel,
7584 &uSectorExtentAbs);
7585 if (RT_FAILURE(rc))
7586 goto out;
7587 /* Clip write range to at most the rest of the grain. */
7588 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7589 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7590 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
7591 {
7592 rc = VERR_VD_VMDK_INVALID_WRITE;
7593 goto out;
7594 }
7595 if (uSectorExtentAbs == 0)
7596 {
7597 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7598 {
7599 /* Full block write to a previously unallocated block.
7600 * Check if the caller wants to avoid the automatic alloc. */
7601 if (!(fWrite & VD_WRITE_NO_ALLOC))
7602 {
7603 /* Allocate GT and find out where to store the grain. */
7604 rc = vmdkAllocGrainAsync(pImage, pExtent, pIoCtx,
7605 uSectorExtentRel, cbWrite);
7606 }
7607 else
7608 rc = VERR_VD_BLOCK_FREE;
7609 *pcbPreRead = 0;
7610 *pcbPostRead = 0;
7611 }
7612 else
7613 {
7614 /* Clip write range to remain in this extent. */
7615 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7616 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7617 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
7618 rc = VERR_VD_BLOCK_FREE;
7619 }
7620 }
7621 else
7622 {
7623 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7624 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
7625 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7626 pIoCtx, cbWrite, NULL, NULL);
7627 }
7628 break;
7629 case VMDKETYPE_VMFS:
7630 case VMDKETYPE_FLAT:
7631 /* Clip write range to remain in this extent. */
7632 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7633 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
7634 VMDK_SECTOR2BYTE(uSectorExtentRel),
7635 pIoCtx, cbWrite, NULL, NULL);
7636 break;
7637 case VMDKETYPE_ZERO:
7638 /* Clip write range to remain in this extent. */
7639 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7640 break;
7641 }
7642
7643 if (pcbWriteProcess)
7644 *pcbWriteProcess = cbWrite;
7645
7646out:
7647 LogFlowFunc(("returns %Rrc\n", rc));
7648 return rc;
7649}
7650
7651/** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
7652static int vmdkAsyncFlush(void *pBackendData, PVDIOCTX pIoCtx)
7653{
7654 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7655 PVMDKEXTENT pExtent;
7656 int rc = VINF_SUCCESS;
7657
7658 for (unsigned i = 0; i < pImage->cExtents; i++)
7659 {
7660 pExtent = &pImage->pExtents[i];
7661 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
7662 {
7663 switch (pExtent->enmType)
7664 {
7665 case VMDKETYPE_HOSTED_SPARSE:
7666#ifdef VBOX_WITH_VMDK_ESX
7667 case VMDKETYPE_ESX_SPARSE:
7668#endif /* VBOX_WITH_VMDK_ESX */
7669 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
7670 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7671 goto out;
7672 if (pExtent->fFooter)
7673 {
7674 uint64_t cbSize;
7675 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
7676 if (RT_FAILURE(rc))
7677 goto out;
7678 cbSize = RT_ALIGN_64(cbSize, 512);
7679 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
7680 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7681 goto out;
7682 }
7683 break;
7684 case VMDKETYPE_VMFS:
7685 case VMDKETYPE_FLAT:
7686 /* Nothing to do. */
7687 break;
7688 case VMDKETYPE_ZERO:
7689 default:
7690 AssertMsgFailed(("extent with type %d marked as dirty\n",
7691 pExtent->enmType));
7692 break;
7693 }
7694 }
7695 switch (pExtent->enmType)
7696 {
7697 case VMDKETYPE_HOSTED_SPARSE:
7698#ifdef VBOX_WITH_VMDK_ESX
7699 case VMDKETYPE_ESX_SPARSE:
7700#endif /* VBOX_WITH_VMDK_ESX */
7701 case VMDKETYPE_VMFS:
7702 case VMDKETYPE_FLAT:
7703 /** @todo implement proper path absolute check. */
7704 if ( pExtent->pFile != NULL
7705 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7706 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
7707 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
7708 break;
7709 case VMDKETYPE_ZERO:
7710 /* No need to do anything for this extent. */
7711 break;
7712 default:
7713 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
7714 break;
7715 }
7716 }
7717
7718out:
7719 return rc;
7720}
7721
7722
7723VBOXHDDBACKEND g_VmdkStreamBackend =
7724{
7725 /* pszBackendName */
7726 "VMDKstream",
7727 /* cbSize */
7728 sizeof(VBOXHDDBACKEND),
7729 /* uBackendCaps */
7730 VD_CAP_UUID | VD_CAP_CREATE_DYNAMIC | VD_CAP_FILE | VD_CAP_VFS,
7731 /* papszFileExtensions */
7732 s_apszVmdkFileExtensions,
7733 /* paConfigInfo */
7734 NULL,
7735 /* hPlugin */
7736 NIL_RTLDRMOD,
7737 /* pfnCheckIfValid */
7738 vmdksCheckIfValid,
7739 /* pfnOpen */
7740 vmdksOpen,
7741 /* pfnCreate */
7742 vmdksCreate,
7743 /* pfnRename */
7744 NULL,
7745 /* pfnClose */
7746 vmdksClose,
7747 /* pfnRead */
7748 vmdksRead,
7749 /* pfnWrite */
7750 vmdksWrite,
7751 /* pfnFlush */
7752 vmdksFlush,
7753 /* pfnGetVersion */
7754 vmdkGetVersion,
7755 /* pfnGetSize */
7756 vmdkGetSize,
7757 /* pfnGetFileSize */
7758 vmdkGetFileSize,
7759 /* pfnGetPCHSGeometry */
7760 vmdkGetPCHSGeometry,
7761 /* pfnSetPCHSGeometry */
7762 vmdksSetPCHSGeometry,
7763 /* pfnGetLCHSGeometry */
7764 vmdkGetLCHSGeometry,
7765 /* pfnSetLCHSGeometry */
7766 vmdksSetLCHSGeometry,
7767 /* pfnGetImageFlags */
7768 vmdkGetImageFlags,
7769 /* pfnGetOpenFlags */
7770 vmdkGetOpenFlags,
7771 /* pfnSetOpenFlags */
7772 vmdksSetOpenFlags,
7773 /* pfnGetComment */
7774 vmdkGetComment,
7775 /* pfnSetComment */
7776 vmdksSetComment,
7777 /* pfnGetUuid */
7778 vmdkGetUuid,
7779 /* pfnSetUuid */
7780 vmdksSetUuid,
7781 /* pfnGetModificationUuid */
7782 vmdkGetModificationUuid,
7783 /* pfnSetModificationUuid */
7784 vmdksSetModificationUuid,
7785 /* pfnGetParentUuid */
7786 vmdkGetParentUuid,
7787 /* pfnSetParentUuid */
7788 vmdksSetParentUuid,
7789 /* pfnGetParentModificationUuid */
7790 vmdkGetParentModificationUuid,
7791 /* pfnSetParentModificationUuid */
7792 vmdksSetParentModificationUuid,
7793 /* pfnDump */
7794 vmdkDump,
7795 /* pfnGetTimeStamp */
7796 NULL,
7797 /* pfnGetParentTimeStamp */
7798 NULL,
7799 /* pfnSetParentTimeStamp */
7800 NULL,
7801 /* pfnGetParentFilename */
7802 NULL,
7803 /* pfnSetParentFilename */
7804 NULL,
7805 /* pfnIsAsyncIOSupported */
7806 NULL,
7807 /* pfnAsyncRead */
7808 NULL,
7809 /* pfnAsyncWrite */
7810 NULL,
7811 /* pfnAsyncFlush */
7812 NULL,
7813 /* pfnComposeLocation */
7814 genericFileComposeLocation,
7815 /* pfnComposeName */
7816 genericFileComposeName,
7817 /* pfnCompact */
7818 NULL,
7819 /* pfnResize */
7820 NULL
7821};
7822
7823
7824VBOXHDDBACKEND g_VmdkBackend =
7825{
7826 /* pszBackendName */
7827 "VMDK",
7828 /* cbSize */
7829 sizeof(VBOXHDDBACKEND),
7830 /* uBackendCaps */
7831 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7832 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7833 | VD_CAP_VFS,
7834 /* papszFileExtensions */
7835 s_apszVmdkFileExtensions,
7836 /* paConfigInfo */
7837 NULL,
7838 /* hPlugin */
7839 NIL_RTLDRMOD,
7840 /* pfnCheckIfValid */
7841 vmdkCheckIfValid,
7842 /* pfnOpen */
7843 vmdkOpen,
7844 /* pfnCreate */
7845 vmdkCreate,
7846 /* pfnRename */
7847 vmdkRename,
7848 /* pfnClose */
7849 vmdkClose,
7850 /* pfnRead */
7851 vmdkRead,
7852 /* pfnWrite */
7853 vmdkWrite,
7854 /* pfnFlush */
7855 vmdkFlush,
7856 /* pfnGetVersion */
7857 vmdkGetVersion,
7858 /* pfnGetSize */
7859 vmdkGetSize,
7860 /* pfnGetFileSize */
7861 vmdkGetFileSize,
7862 /* pfnGetPCHSGeometry */
7863 vmdkGetPCHSGeometry,
7864 /* pfnSetPCHSGeometry */
7865 vmdkSetPCHSGeometry,
7866 /* pfnGetLCHSGeometry */
7867 vmdkGetLCHSGeometry,
7868 /* pfnSetLCHSGeometry */
7869 vmdkSetLCHSGeometry,
7870 /* pfnGetImageFlags */
7871 vmdkGetImageFlags,
7872 /* pfnGetOpenFlags */
7873 vmdkGetOpenFlags,
7874 /* pfnSetOpenFlags */
7875 vmdkSetOpenFlags,
7876 /* pfnGetComment */
7877 vmdkGetComment,
7878 /* pfnSetComment */
7879 vmdkSetComment,
7880 /* pfnGetUuid */
7881 vmdkGetUuid,
7882 /* pfnSetUuid */
7883 vmdkSetUuid,
7884 /* pfnGetModificationUuid */
7885 vmdkGetModificationUuid,
7886 /* pfnSetModificationUuid */
7887 vmdkSetModificationUuid,
7888 /* pfnGetParentUuid */
7889 vmdkGetParentUuid,
7890 /* pfnSetParentUuid */
7891 vmdkSetParentUuid,
7892 /* pfnGetParentModificationUuid */
7893 vmdkGetParentModificationUuid,
7894 /* pfnSetParentModificationUuid */
7895 vmdkSetParentModificationUuid,
7896 /* pfnDump */
7897 vmdkDump,
7898 /* pfnGetTimeStamp */
7899 NULL,
7900 /* pfnGetParentTimeStamp */
7901 NULL,
7902 /* pfnSetParentTimeStamp */
7903 NULL,
7904 /* pfnGetParentFilename */
7905 NULL,
7906 /* pfnSetParentFilename */
7907 NULL,
7908 /* pfnIsAsyncIOSupported */
7909 vmdkIsAsyncIOSupported,
7910 /* pfnAsyncRead */
7911 vmdkAsyncRead,
7912 /* pfnAsyncWrite */
7913 vmdkAsyncWrite,
7914 /* pfnAsyncFlush */
7915 vmdkAsyncFlush,
7916 /* pfnComposeLocation */
7917 genericFileComposeLocation,
7918 /* pfnComposeName */
7919 genericFileComposeName,
7920 /* pfnCompact */
7921 NULL,
7922 /* pfnResize */
7923 NULL
7924};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette