VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 31713

Last change on this file since 31713 was 31380, checked in by vboxsync, 14 years ago

VmdkHDDCore, VBoxHDD: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 251.8 KB
Line 
1/* $Id: VmdkHDDCore.cpp 31380 2010-08-05 07:33:32Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/file.h>
30#include <iprt/path.h>
31#include <iprt/string.h>
32#include <iprt/rand.h>
33#include <iprt/zip.h>
34
35
36/*******************************************************************************
37* Constants And Macros, Structures and Typedefs *
38*******************************************************************************/
39
40/** Maximum encoded string size (including NUL) we allow for VMDK images.
41 * Deliberately not set high to avoid running out of descriptor space. */
42#define VMDK_ENCODED_COMMENT_MAX 1024
43
44/** VMDK descriptor DDB entry for PCHS cylinders. */
45#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
46
47/** VMDK descriptor DDB entry for PCHS heads. */
48#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
49
50/** VMDK descriptor DDB entry for PCHS sectors. */
51#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
52
53/** VMDK descriptor DDB entry for LCHS cylinders. */
54#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
55
56/** VMDK descriptor DDB entry for LCHS heads. */
57#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
58
59/** VMDK descriptor DDB entry for LCHS sectors. */
60#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
61
62/** VMDK descriptor DDB entry for image UUID. */
63#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
64
65/** VMDK descriptor DDB entry for image modification UUID. */
66#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
67
68/** VMDK descriptor DDB entry for parent image UUID. */
69#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
70
71/** VMDK descriptor DDB entry for parent image modification UUID. */
72#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
73
74/** No compression for streamOptimized files. */
75#define VMDK_COMPRESSION_NONE 0
76
77/** Deflate compression for streamOptimized files. */
78#define VMDK_COMPRESSION_DEFLATE 1
79
80/** Marker that the actual GD value is stored in the footer. */
81#define VMDK_GD_AT_END 0xffffffffffffffffULL
82
83/** Marker for end-of-stream in streamOptimized images. */
84#define VMDK_MARKER_EOS 0
85
86/** Marker for grain table block in streamOptimized images. */
87#define VMDK_MARKER_GT 1
88
89/** Marker for grain directory block in streamOptimized images. */
90#define VMDK_MARKER_GD 2
91
92/** Marker for footer in streamOptimized images. */
93#define VMDK_MARKER_FOOTER 3
94
95/** Dummy marker for "don't check the marker value". */
96#define VMDK_MARKER_IGNORE 0xffffffffU
97
98/**
99 * Magic number for hosted images created by VMware Workstation 4, VMware
100 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
101 */
102#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
103
104/**
105 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
106 * this header is also used for monolithic flat images.
107 */
108#pragma pack(1)
109typedef struct SparseExtentHeader
110{
111 uint32_t magicNumber;
112 uint32_t version;
113 uint32_t flags;
114 uint64_t capacity;
115 uint64_t grainSize;
116 uint64_t descriptorOffset;
117 uint64_t descriptorSize;
118 uint32_t numGTEsPerGT;
119 uint64_t rgdOffset;
120 uint64_t gdOffset;
121 uint64_t overHead;
122 bool uncleanShutdown;
123 char singleEndLineChar;
124 char nonEndLineChar;
125 char doubleEndLineChar1;
126 char doubleEndLineChar2;
127 uint16_t compressAlgorithm;
128 uint8_t pad[433];
129} SparseExtentHeader;
130#pragma pack()
131
132/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
133 * divisible by the default grain size (64K) */
134#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
135
136/** VMDK streamOptimized file format marker. The type field may or may not
137 * be actually valid, but there's always data to read there. */
138#pragma pack(1)
139typedef struct VMDKMARKER
140{
141 uint64_t uSector;
142 uint32_t cbSize;
143 uint32_t uType;
144} VMDKMARKER;
145#pragma pack()
146
147
148#ifdef VBOX_WITH_VMDK_ESX
149
150/** @todo the ESX code is not tested, not used, and lacks error messages. */
151
152/**
153 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
154 */
155#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
156
157#pragma pack(1)
158typedef struct COWDisk_Header
159{
160 uint32_t magicNumber;
161 uint32_t version;
162 uint32_t flags;
163 uint32_t numSectors;
164 uint32_t grainSize;
165 uint32_t gdOffset;
166 uint32_t numGDEntries;
167 uint32_t freeSector;
168 /* The spec incompletely documents quite a few further fields, but states
169 * that they are unused by the current format. Replace them by padding. */
170 char reserved1[1604];
171 uint32_t savedGeneration;
172 char reserved2[8];
173 uint32_t uncleanShutdown;
174 char padding[396];
175} COWDisk_Header;
176#pragma pack()
177#endif /* VBOX_WITH_VMDK_ESX */
178
179
180/** Convert sector number/size to byte offset/size. */
181#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
182
183/** Convert byte offset/size to sector number/size. */
184#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
185
186/**
187 * VMDK extent type.
188 */
189typedef enum VMDKETYPE
190{
191 /** Hosted sparse extent. */
192 VMDKETYPE_HOSTED_SPARSE = 1,
193 /** Flat extent. */
194 VMDKETYPE_FLAT,
195 /** Zero extent. */
196 VMDKETYPE_ZERO,
197 /** VMFS extent, used by ESX. */
198 VMDKETYPE_VMFS
199#ifdef VBOX_WITH_VMDK_ESX
200 ,
201 /** ESX sparse extent. */
202 VMDKETYPE_ESX_SPARSE
203#endif /* VBOX_WITH_VMDK_ESX */
204} VMDKETYPE, *PVMDKETYPE;
205
206/**
207 * VMDK access type for a extent.
208 */
209typedef enum VMDKACCESS
210{
211 /** No access allowed. */
212 VMDKACCESS_NOACCESS = 0,
213 /** Read-only access. */
214 VMDKACCESS_READONLY,
215 /** Read-write access. */
216 VMDKACCESS_READWRITE
217} VMDKACCESS, *PVMDKACCESS;
218
219/** Forward declaration for PVMDKIMAGE. */
220typedef struct VMDKIMAGE *PVMDKIMAGE;
221
222/**
223 * Extents files entry. Used for opening a particular file only once.
224 */
225typedef struct VMDKFILE
226{
227 /** Pointer to filename. Local copy. */
228 const char *pszFilename;
229 /** File open flags for consistency checking. */
230 unsigned fOpen;
231 /** File handle. */
232 RTFILE File;
233 /** Handle for asnychronous access if requested.*/
234 PVDIOSTORAGE pStorage;
235 /** Flag whether to use File or pStorage. */
236 bool fAsyncIO;
237 /** Reference counter. */
238 unsigned uReferences;
239 /** Flag whether the file should be deleted on last close. */
240 bool fDelete;
241 /** Pointer to the image we belong to. */
242 PVMDKIMAGE pImage;
243 /** Pointer to next file descriptor. */
244 struct VMDKFILE *pNext;
245 /** Pointer to the previous file descriptor. */
246 struct VMDKFILE *pPrev;
247} VMDKFILE, *PVMDKFILE;
248
249/**
250 * VMDK extent data structure.
251 */
252typedef struct VMDKEXTENT
253{
254 /** File handle. */
255 PVMDKFILE pFile;
256 /** Base name of the image extent. */
257 const char *pszBasename;
258 /** Full name of the image extent. */
259 const char *pszFullname;
260 /** Number of sectors in this extent. */
261 uint64_t cSectors;
262 /** Number of sectors per block (grain in VMDK speak). */
263 uint64_t cSectorsPerGrain;
264 /** Starting sector number of descriptor. */
265 uint64_t uDescriptorSector;
266 /** Size of descriptor in sectors. */
267 uint64_t cDescriptorSectors;
268 /** Starting sector number of grain directory. */
269 uint64_t uSectorGD;
270 /** Starting sector number of redundant grain directory. */
271 uint64_t uSectorRGD;
272 /** Total number of metadata sectors. */
273 uint64_t cOverheadSectors;
274 /** Nominal size (i.e. as described by the descriptor) of this extent. */
275 uint64_t cNominalSectors;
276 /** Sector offset (i.e. as described by the descriptor) of this extent. */
277 uint64_t uSectorOffset;
278 /** Number of entries in a grain table. */
279 uint32_t cGTEntries;
280 /** Number of sectors reachable via a grain directory entry. */
281 uint32_t cSectorsPerGDE;
282 /** Number of entries in the grain directory. */
283 uint32_t cGDEntries;
284 /** Pointer to the next free sector. Legacy information. Do not use. */
285 uint32_t uFreeSector;
286 /** Number of this extent in the list of images. */
287 uint32_t uExtent;
288 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
289 char *pDescData;
290 /** Pointer to the grain directory. */
291 uint32_t *pGD;
292 /** Pointer to the redundant grain directory. */
293 uint32_t *pRGD;
294 /** VMDK version of this extent. 1=1.0/1.1 */
295 uint32_t uVersion;
296 /** Type of this extent. */
297 VMDKETYPE enmType;
298 /** Access to this extent. */
299 VMDKACCESS enmAccess;
300 /** Flag whether this extent is marked as unclean. */
301 bool fUncleanShutdown;
302 /** Flag whether the metadata in the extent header needs to be updated. */
303 bool fMetaDirty;
304 /** Flag whether there is a footer in this extent. */
305 bool fFooter;
306 /** Compression type for this extent. */
307 uint16_t uCompression;
308 /** Last grain which has been written to. Only for streamOptimized extents. */
309 uint32_t uLastGrainWritten;
310 /** Sector number of last grain which has been written to. Only for
311 * streamOptimized extents. */
312 uint32_t uLastGrainSector;
313 /** Data size of last grain which has been written to. Only for
314 * streamOptimized extents. */
315 uint32_t cbLastGrainWritten;
316 /** Starting sector of the decompressed grain buffer. */
317 uint32_t uGrainSector;
318 /** Decompressed grain buffer for streamOptimized extents. */
319 void *pvGrain;
320 /** Reference to the image in which this extent is used. Do not use this
321 * on a regular basis to avoid passing pImage references to functions
322 * explicitly. */
323 struct VMDKIMAGE *pImage;
324} VMDKEXTENT, *PVMDKEXTENT;
325
326/**
327 * Grain table cache size. Allocated per image.
328 */
329#define VMDK_GT_CACHE_SIZE 256
330
331/**
332 * Grain table block size. Smaller than an actual grain table block to allow
333 * more grain table blocks to be cached without having to allocate excessive
334 * amounts of memory for the cache.
335 */
336#define VMDK_GT_CACHELINE_SIZE 128
337
338
339/**
340 * Maximum number of lines in a descriptor file. Not worth the effort of
341 * making it variable. Descriptor files are generally very short (~20 lines),
342 * with the exception of sparse files split in 2G chunks, which need for the
343 * maximum size (almost 2T) exactly 1025 lines for the disk database.
344 */
345#define VMDK_DESCRIPTOR_LINES_MAX 1100U
346
347/**
348 * Parsed descriptor information. Allows easy access and update of the
349 * descriptor (whether separate file or not). Free form text files suck.
350 */
351typedef struct VMDKDESCRIPTOR
352{
353 /** Line number of first entry of the disk descriptor. */
354 unsigned uFirstDesc;
355 /** Line number of first entry in the extent description. */
356 unsigned uFirstExtent;
357 /** Line number of first disk database entry. */
358 unsigned uFirstDDB;
359 /** Total number of lines. */
360 unsigned cLines;
361 /** Total amount of memory available for the descriptor. */
362 size_t cbDescAlloc;
363 /** Set if descriptor has been changed and not yet written to disk. */
364 bool fDirty;
365 /** Array of pointers to the data in the descriptor. */
366 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
367 /** Array of line indices pointing to the next non-comment line. */
368 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
369} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
370
371
372/**
373 * Cache entry for translating extent/sector to a sector number in that
374 * extent.
375 */
376typedef struct VMDKGTCACHEENTRY
377{
378 /** Extent number for which this entry is valid. */
379 uint32_t uExtent;
380 /** GT data block number. */
381 uint64_t uGTBlock;
382 /** Data part of the cache entry. */
383 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
384} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
385
386/**
387 * Cache data structure for blocks of grain table entries. For now this is a
388 * fixed size direct mapping cache, but this should be adapted to the size of
389 * the sparse image and maybe converted to a set-associative cache. The
390 * implementation below implements a write-through cache with write allocate.
391 */
392typedef struct VMDKGTCACHE
393{
394 /** Cache entries. */
395 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
396 /** Number of cache entries (currently unused). */
397 unsigned cEntries;
398} VMDKGTCACHE, *PVMDKGTCACHE;
399
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Pointer to the image extents. */
407 PVMDKEXTENT pExtents;
408 /** Number of image extents. */
409 unsigned cExtents;
410 /** Pointer to the files list, for opening a file referenced multiple
411 * times only once (happens mainly with raw partition access). */
412 PVMDKFILE pFiles;
413
414 /** Base image name. */
415 const char *pszFilename;
416 /** Descriptor file if applicable. */
417 PVMDKFILE pFile;
418
419 /** Pointer to the per-disk VD interface list. */
420 PVDINTERFACE pVDIfsDisk;
421 /** Pointer to the per-image VD interface list. */
422 PVDINTERFACE pVDIfsImage;
423
424 /** Error interface. */
425 PVDINTERFACE pInterfaceError;
426 /** Error interface callbacks. */
427 PVDINTERFACEERROR pInterfaceErrorCallbacks;
428
429 /** I/O interface. */
430 PVDINTERFACE pInterfaceIO;
431 /** I/O interface callbacks. */
432 PVDINTERFACEIO pInterfaceIOCallbacks;
433 /**
434 * Pointer to an array of segment entries for async I/O.
435 * This is an optimization because the task number to submit is not known
436 * and allocating/freeing an array in the read/write functions every time
437 * is too expensive.
438 */
439 PPDMDATASEG paSegments;
440 /** Entries available in the segments array. */
441 unsigned cSegments;
442
443 /** Open flags passed by VBoxHD layer. */
444 unsigned uOpenFlags;
445 /** Image flags defined during creation or determined during open. */
446 unsigned uImageFlags;
447 /** Total size of the image. */
448 uint64_t cbSize;
449 /** Physical geometry of this image. */
450 PDMMEDIAGEOMETRY PCHSGeometry;
451 /** Logical geometry of this image. */
452 PDMMEDIAGEOMETRY LCHSGeometry;
453 /** Image UUID. */
454 RTUUID ImageUuid;
455 /** Image modification UUID. */
456 RTUUID ModificationUuid;
457 /** Parent image UUID. */
458 RTUUID ParentUuid;
459 /** Parent image modification UUID. */
460 RTUUID ParentModificationUuid;
461
462 /** Pointer to grain table cache, if this image contains sparse extents. */
463 PVMDKGTCACHE pGTCache;
464 /** Pointer to the descriptor (NULL if no separate descriptor file). */
465 char *pDescData;
466 /** Allocation size of the descriptor file. */
467 size_t cbDescAlloc;
468 /** Parsed descriptor file content. */
469 VMDKDESCRIPTOR Descriptor;
470} VMDKIMAGE;
471
472
473/** State for the input callout of the inflate reader. */
474typedef struct VMDKINFLATESTATE
475{
476 /* File where the data is stored. */
477 PVMDKFILE File;
478 /* Total size of the data to read. */
479 size_t cbSize;
480 /* Offset in the file to read. */
481 uint64_t uFileOffset;
482 /* Current read position. */
483 ssize_t iOffset;
484} VMDKINFLATESTATE;
485
486/** State for the output callout of the deflate writer. */
487typedef struct VMDKDEFLATESTATE
488{
489 /* File where the data is to be stored. */
490 PVMDKFILE File;
491 /* Offset in the file to write at. */
492 uint64_t uFileOffset;
493 /* Current write position. */
494 ssize_t iOffset;
495} VMDKDEFLATESTATE;
496
497/** Tracks async grain allocation. */
498typedef struct VMDKGRAINALLOCASYNC
499{
500 /** Old size of the extent. Used for rollback after an error. */
501 uint64_t cbExtentOld;
502 /** Flag whether the allocation failed. */
503 bool fIoErr;
504 /** Current number of transfers pending.
505 * If reached 0 and there is an error the old state is restored. */
506 unsigned cIoXfersPending;
507 /** Sector number */
508 uint64_t uSector;
509 /** Flag whether the grain table needs to be updated. */
510 bool fGTUpdateNeeded;
511 /** Extent the allocation happens. */
512 PVMDKEXTENT pExtent;
513 /** New size of the extent, required for the grain table update. */
514 uint64_t cbExtentSize;
515 /** Grain table sector. */
516 uint64_t uGTSector;
517 /** Backup grain table sector. */
518 uint64_t uRGTSector;
519} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
520
521/*******************************************************************************
522 * Static Variables *
523 *******************************************************************************/
524
525/** NULL-terminated array of supported file extensions. */
526static const char *const s_apszVmdkFileExtensions[] =
527{
528 "vmdk",
529 NULL
530};
531
532/*******************************************************************************
533* Internal Functions *
534*******************************************************************************/
535
536static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
537
538static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
539 bool fDelete);
540
541static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
542static int vmdkFlushImage(PVMDKIMAGE pImage);
543static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
544static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
545
546static int vmdkAllocGrainAsyncComplete(void *pvBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
547
548/**
549 * Internal: signal an error to the frontend.
550 */
551DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
552 const char *pszFormat, ...)
553{
554 va_list va;
555 va_start(va, pszFormat);
556 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
557 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
558 pszFormat, va);
559 va_end(va);
560 return rc;
561}
562
563/**
564 * Internal: open a file (using a file descriptor cache to ensure each file
565 * is only opened once - anything else can cause locking problems).
566 */
567static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
568 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
569{
570 int rc = VINF_SUCCESS;
571 PVMDKFILE pVmdkFile;
572
573 for (pVmdkFile = pImage->pFiles;
574 pVmdkFile != NULL;
575 pVmdkFile = pVmdkFile->pNext)
576 {
577 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
578 {
579 Assert(fOpen == pVmdkFile->fOpen);
580 pVmdkFile->uReferences++;
581
582 *ppVmdkFile = pVmdkFile;
583
584 return rc;
585 }
586 }
587
588 /* If we get here, there's no matching entry in the cache. */
589 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
590 if (!VALID_PTR(pVmdkFile))
591 {
592 *ppVmdkFile = NULL;
593 return VERR_NO_MEMORY;
594 }
595
596 pVmdkFile->pszFilename = RTStrDup(pszFilename);
597 if (!VALID_PTR(pVmdkFile->pszFilename))
598 {
599 RTMemFree(pVmdkFile);
600 *ppVmdkFile = NULL;
601 return VERR_NO_MEMORY;
602 }
603 pVmdkFile->fOpen = fOpen;
604
605#ifndef VBOX_WITH_NEW_IO_CODE
606 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
607 {
608 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
609 pszFilename,
610 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
611 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
612 : 0,
613 NULL,
614 pImage->pVDIfsDisk,
615 &pVmdkFile->pStorage);
616 pVmdkFile->fAsyncIO = true;
617 }
618 else
619 {
620 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
621 pVmdkFile->fAsyncIO = false;
622 }
623#else
624 unsigned uOpenFlags = 0;
625
626 if ((fOpen & RTFILE_O_ACCESS_MASK) == RTFILE_O_READ)
627 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY;
628 if ((fOpen & RTFILE_O_ACTION_MASK) == RTFILE_O_CREATE)
629 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_CREATE;
630 if ((fOpen & RTFILE_O_DENY_MASK) == RTFILE_O_DENY_NONE)
631 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_DONT_LOCK;
632
633 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
634 pszFilename,
635 uOpenFlags,
636 &pVmdkFile->pStorage);
637#endif
638 if (RT_SUCCESS(rc))
639 {
640 pVmdkFile->uReferences = 1;
641 pVmdkFile->pImage = pImage;
642 pVmdkFile->pNext = pImage->pFiles;
643 if (pImage->pFiles)
644 pImage->pFiles->pPrev = pVmdkFile;
645 pImage->pFiles = pVmdkFile;
646 *ppVmdkFile = pVmdkFile;
647 }
648 else
649 {
650 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
651 RTMemFree(pVmdkFile);
652 *ppVmdkFile = NULL;
653 }
654
655 return rc;
656}
657
658/**
659 * Internal: close a file, updating the file descriptor cache.
660 */
661static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
662{
663 int rc = VINF_SUCCESS;
664 PVMDKFILE pVmdkFile = *ppVmdkFile;
665
666 AssertPtr(pVmdkFile);
667
668 pVmdkFile->fDelete |= fDelete;
669 Assert(pVmdkFile->uReferences);
670 pVmdkFile->uReferences--;
671 if (pVmdkFile->uReferences == 0)
672 {
673 PVMDKFILE pPrev;
674 PVMDKFILE pNext;
675
676 /* Unchain the element from the list. */
677 pPrev = pVmdkFile->pPrev;
678 pNext = pVmdkFile->pNext;
679
680 if (pNext)
681 pNext->pPrev = pPrev;
682 if (pPrev)
683 pPrev->pNext = pNext;
684 else
685 pImage->pFiles = pNext;
686
687#ifndef VBOX_WITH_NEW_IO_CODE
688 if (pVmdkFile->fAsyncIO)
689 {
690 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
691 pVmdkFile->pStorage);
692 }
693 else
694 {
695 rc = RTFileClose(pVmdkFile->File);
696 }
697#else
698 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
699 pVmdkFile->pStorage);
700#endif
701 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
702 rc = RTFileDelete(pVmdkFile->pszFilename);
703 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
704 RTMemFree(pVmdkFile);
705 }
706
707 *ppVmdkFile = NULL;
708 return rc;
709}
710
711/**
712 * Internal: read from a file distinguishing between async and normal operation
713 */
714DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
715 uint64_t uOffset, void *pvBuf,
716 size_t cbToRead, size_t *pcbRead)
717{
718 PVMDKIMAGE pImage = pVmdkFile->pImage;
719
720#ifndef VBOX_WITH_NEW_IO_CODE
721 if (pVmdkFile->fAsyncIO)
722 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
723 pVmdkFile->pStorage, uOffset,
724 cbToRead, pvBuf, pcbRead);
725 else
726 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
727#else
728 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
729 pVmdkFile->pStorage, uOffset,
730 cbToRead, pvBuf, pcbRead);
731#endif
732}
733
734/**
735 * Internal: write to a file distinguishing between async and normal operation
736 */
737DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
738 uint64_t uOffset, const void *pvBuf,
739 size_t cbToWrite, size_t *pcbWritten)
740{
741 PVMDKIMAGE pImage = pVmdkFile->pImage;
742
743#ifndef VBOX_WITH_NEW_IO_CODE
744 if (pVmdkFile->fAsyncIO)
745 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
746 pVmdkFile->pStorage, uOffset,
747 cbToWrite, pvBuf, pcbWritten);
748 else
749 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
750#else
751 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
752 pVmdkFile->pStorage, uOffset,
753 cbToWrite, pvBuf, pcbWritten);
754#endif
755}
756
757/**
758 * Internal: get the size of a file distinguishing beween async and normal operation
759 */
760DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
761{
762 PVMDKIMAGE pImage = pVmdkFile->pImage;
763
764#ifndef VBOX_WITH_NEW_IO_CODE
765 if (pVmdkFile->fAsyncIO)
766 {
767 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
768 pVmdkFile->pStorage,
769 pcbSize);
770 }
771 else
772 return RTFileGetSize(pVmdkFile->File, pcbSize);
773#else
774 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
775 pVmdkFile->pStorage,
776 pcbSize);
777#endif
778}
779
780/**
781 * Internal: set the size of a file distinguishing beween async and normal operation
782 */
783DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
784{
785 PVMDKIMAGE pImage = pVmdkFile->pImage;
786
787#ifndef VBOX_WITH_NEW_IO_CODE
788 if (pVmdkFile->fAsyncIO)
789 {
790 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
791 pVmdkFile->pStorage,
792 cbSize);
793 }
794 else
795 return RTFileSetSize(pVmdkFile->File, cbSize);
796#else
797 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
798 pVmdkFile->pStorage,
799 cbSize);
800#endif
801}
802
803/**
804 * Internal: flush a file distinguishing between async and normal operation
805 */
806DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
807{
808 PVMDKIMAGE pImage = pVmdkFile->pImage;
809
810#ifndef VBOX_WITH_NEW_IO_CODE
811 if (pVmdkFile->fAsyncIO)
812 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
813 pVmdkFile->pStorage);
814 else
815 return RTFileFlush(pVmdkFile->File);
816#else
817 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
818 pVmdkFile->pStorage);
819#endif
820}
821
822
823DECLINLINE(int) vmdkFileFlushAsync(PVMDKFILE pVmdkFile, PVDIOCTX pIoCtx)
824{
825 PVMDKIMAGE pImage = pVmdkFile->pImage;
826
827 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
828 pVmdkFile->pStorage, pIoCtx,
829 NULL, NULL);
830}
831
832
833static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
834{
835 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
836
837 Assert(cbBuf);
838 if (pInflateState->iOffset < 0)
839 {
840 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
841 if (pcbBuf)
842 *pcbBuf = 1;
843 pInflateState->iOffset = 0;
844 return VINF_SUCCESS;
845 }
846 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
847 int rc = vmdkFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
848 if (RT_FAILURE(rc))
849 return rc;
850 pInflateState->uFileOffset += cbBuf;
851 pInflateState->iOffset += cbBuf;
852 pInflateState->cbSize -= cbBuf;
853 Assert(pcbBuf);
854 *pcbBuf = cbBuf;
855 return VINF_SUCCESS;
856}
857
858/**
859 * Internal: read from a file and inflate the compressed data,
860 * distinguishing between async and normal operation
861 */
862DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
863 uint64_t uOffset, void *pvBuf,
864 size_t cbToRead, unsigned uMarker,
865 uint64_t *puLBA, uint32_t *pcbMarkerData)
866{
867 if (pVmdkFile->fAsyncIO)
868 {
869 AssertMsgFailed(("TODO\n"));
870 return VERR_NOT_SUPPORTED;
871 }
872 else
873 {
874 int rc;
875 PRTZIPDECOMP pZip = NULL;
876 VMDKMARKER Marker;
877 uint64_t uCompOffset, cbComp;
878 VMDKINFLATESTATE InflateState;
879 size_t cbActuallyRead;
880 size_t cbMarker = sizeof(Marker);
881
882 if (uMarker == VMDK_MARKER_IGNORE)
883 cbMarker -= sizeof(Marker.uType);
884 rc = vmdkFileReadAt(pVmdkFile, uOffset, &Marker, cbMarker, NULL);
885 if (RT_FAILURE(rc))
886 return rc;
887 Marker.uSector = RT_LE2H_U64(Marker.uSector);
888 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
889 if ( uMarker != VMDK_MARKER_IGNORE
890 && ( RT_LE2H_U32(Marker.uType) != uMarker
891 || Marker.cbSize != 0))
892 return VERR_VD_VMDK_INVALID_FORMAT;
893 if (Marker.cbSize != 0)
894 {
895 /* Compressed grain marker. Data follows immediately. */
896 uCompOffset = uOffset + 12;
897 cbComp = Marker.cbSize;
898 if (puLBA)
899 *puLBA = Marker.uSector;
900 if (pcbMarkerData)
901 *pcbMarkerData = cbComp + 12;
902 }
903 else
904 {
905 Marker.uType = RT_LE2H_U32(Marker.uType);
906 if (Marker.uType == VMDK_MARKER_EOS)
907 {
908 Assert(uMarker != VMDK_MARKER_EOS);
909 return VERR_VD_VMDK_INVALID_FORMAT;
910 }
911 else if ( Marker.uType == VMDK_MARKER_GT
912 || Marker.uType == VMDK_MARKER_GD
913 || Marker.uType == VMDK_MARKER_FOOTER)
914 {
915 uCompOffset = uOffset + 512;
916 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
917 if (pcbMarkerData)
918 *pcbMarkerData = cbComp + 512;
919 }
920 else
921 {
922 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
923 return VERR_VD_VMDK_INVALID_FORMAT;
924 }
925 }
926 InflateState.File = pVmdkFile;
927 InflateState.cbSize = cbComp;
928 InflateState.uFileOffset = uCompOffset;
929 InflateState.iOffset = -1;
930 /* Sanity check - the expansion ratio should be much less than 2. */
931 Assert(cbComp < 2 * cbToRead);
932 if (cbComp >= 2 * cbToRead)
933 return VERR_VD_VMDK_INVALID_FORMAT;
934
935 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
936 if (RT_FAILURE(rc))
937 return rc;
938 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
939 RTZipDecompDestroy(pZip);
940 if (RT_FAILURE(rc))
941 return rc;
942 if (cbActuallyRead != cbToRead)
943 rc = VERR_VD_VMDK_INVALID_FORMAT;
944 return rc;
945 }
946}
947
948static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
949{
950 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
951
952 Assert(cbBuf);
953 if (pDeflateState->iOffset < 0)
954 {
955 pvBuf = (const uint8_t *)pvBuf + 1;
956 cbBuf--;
957 pDeflateState->iOffset = 0;
958 }
959 if (!cbBuf)
960 return VINF_SUCCESS;
961 int rc = vmdkFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
962 if (RT_FAILURE(rc))
963 return rc;
964 pDeflateState->uFileOffset += cbBuf;
965 pDeflateState->iOffset += cbBuf;
966 return VINF_SUCCESS;
967}
968
969/**
970 * Internal: deflate the uncompressed data and write to a file,
971 * distinguishing between async and normal operation
972 */
973DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
974 uint64_t uOffset, const void *pvBuf,
975 size_t cbToWrite, unsigned uMarker,
976 uint64_t uLBA, uint32_t *pcbMarkerData)
977{
978 if (pVmdkFile->fAsyncIO)
979 {
980 AssertMsgFailed(("TODO\n"));
981 return VERR_NOT_SUPPORTED;
982 }
983 else
984 {
985 int rc;
986 PRTZIPCOMP pZip = NULL;
987 VMDKMARKER Marker;
988 uint64_t uCompOffset, cbDecomp;
989 VMDKDEFLATESTATE DeflateState;
990
991 Marker.uSector = RT_H2LE_U64(uLBA);
992 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
993 if (uMarker == VMDK_MARKER_IGNORE)
994 {
995 /* Compressed grain marker. Data follows immediately. */
996 uCompOffset = uOffset + 12;
997 cbDecomp = cbToWrite;
998 }
999 else
1000 {
1001 /** @todo implement creating the other marker types */
1002 return VERR_NOT_IMPLEMENTED;
1003 }
1004 DeflateState.File = pVmdkFile;
1005 DeflateState.uFileOffset = uCompOffset;
1006 DeflateState.iOffset = -1;
1007
1008 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
1009 if (RT_FAILURE(rc))
1010 return rc;
1011 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
1012 if (RT_SUCCESS(rc))
1013 rc = RTZipCompFinish(pZip);
1014 RTZipCompDestroy(pZip);
1015 if (RT_SUCCESS(rc))
1016 {
1017 if (pcbMarkerData)
1018 *pcbMarkerData = 12 + DeflateState.iOffset;
1019 /* Set the file size to remove old garbage in case the block is
1020 * rewritten. Cannot cause data loss as the code calling this
1021 * guarantees that data gets only appended. */
1022 Assert(DeflateState.uFileOffset > uCompOffset);
1023 rc = vmdkFileSetSize(pVmdkFile, DeflateState.uFileOffset);
1024
1025 if (uMarker == VMDK_MARKER_IGNORE)
1026 {
1027 /* Compressed grain marker. */
1028 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
1029 rc = vmdkFileWriteAt(pVmdkFile, uOffset, &Marker, 12, NULL);
1030 if (RT_FAILURE(rc))
1031 return rc;
1032 }
1033 else
1034 {
1035 /** @todo implement creating the other marker types */
1036 return VERR_NOT_IMPLEMENTED;
1037 }
1038 }
1039 return rc;
1040 }
1041}
1042
1043/**
1044 * Internal: check if all files are closed, prevent leaking resources.
1045 */
1046static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1047{
1048 int rc = VINF_SUCCESS, rc2;
1049 PVMDKFILE pVmdkFile;
1050
1051 Assert(pImage->pFiles == NULL);
1052 for (pVmdkFile = pImage->pFiles;
1053 pVmdkFile != NULL;
1054 pVmdkFile = pVmdkFile->pNext)
1055 {
1056 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1057 pVmdkFile->pszFilename));
1058 pImage->pFiles = pVmdkFile->pNext;
1059
1060 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
1061 rc2 = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
1062 pVmdkFile->pStorage);
1063 else
1064 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1065
1066 if (RT_SUCCESS(rc))
1067 rc = rc2;
1068 }
1069 return rc;
1070}
1071
1072/**
1073 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1074 * critical non-ASCII characters.
1075 */
1076static char *vmdkEncodeString(const char *psz)
1077{
1078 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1079 char *pszDst = szEnc;
1080
1081 AssertPtr(psz);
1082
1083 for (; *psz; psz = RTStrNextCp(psz))
1084 {
1085 char *pszDstPrev = pszDst;
1086 RTUNICP Cp = RTStrGetCp(psz);
1087 if (Cp == '\\')
1088 {
1089 pszDst = RTStrPutCp(pszDst, Cp);
1090 pszDst = RTStrPutCp(pszDst, Cp);
1091 }
1092 else if (Cp == '\n')
1093 {
1094 pszDst = RTStrPutCp(pszDst, '\\');
1095 pszDst = RTStrPutCp(pszDst, 'n');
1096 }
1097 else if (Cp == '\r')
1098 {
1099 pszDst = RTStrPutCp(pszDst, '\\');
1100 pszDst = RTStrPutCp(pszDst, 'r');
1101 }
1102 else
1103 pszDst = RTStrPutCp(pszDst, Cp);
1104 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1105 {
1106 pszDst = pszDstPrev;
1107 break;
1108 }
1109 }
1110 *pszDst = '\0';
1111 return RTStrDup(szEnc);
1112}
1113
1114/**
1115 * Internal: decode a string and store it into the specified string.
1116 */
1117static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1118{
1119 int rc = VINF_SUCCESS;
1120 char szBuf[4];
1121
1122 if (!cb)
1123 return VERR_BUFFER_OVERFLOW;
1124
1125 AssertPtr(psz);
1126
1127 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1128 {
1129 char *pszDst = szBuf;
1130 RTUNICP Cp = RTStrGetCp(pszEncoded);
1131 if (Cp == '\\')
1132 {
1133 pszEncoded = RTStrNextCp(pszEncoded);
1134 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1135 if (CpQ == 'n')
1136 RTStrPutCp(pszDst, '\n');
1137 else if (CpQ == 'r')
1138 RTStrPutCp(pszDst, '\r');
1139 else if (CpQ == '\0')
1140 {
1141 rc = VERR_VD_VMDK_INVALID_HEADER;
1142 break;
1143 }
1144 else
1145 RTStrPutCp(pszDst, CpQ);
1146 }
1147 else
1148 pszDst = RTStrPutCp(pszDst, Cp);
1149
1150 /* Need to leave space for terminating NUL. */
1151 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1152 {
1153 rc = VERR_BUFFER_OVERFLOW;
1154 break;
1155 }
1156 memcpy(psz, szBuf, pszDst - szBuf);
1157 psz += pszDst - szBuf;
1158 }
1159 *psz = '\0';
1160 return rc;
1161}
1162
1163static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1164{
1165 int rc = VINF_SUCCESS;
1166 unsigned i;
1167 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1168 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1169
1170 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1171 goto out;
1172
1173 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1174 if (!pGD)
1175 {
1176 rc = VERR_NO_MEMORY;
1177 goto out;
1178 }
1179 pExtent->pGD = pGD;
1180 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1181 * life files don't have them. The spec is wrong in creative ways. */
1182 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1183 pGD, cbGD, NULL);
1184 AssertRC(rc);
1185 if (RT_FAILURE(rc))
1186 {
1187 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1188 goto out;
1189 }
1190 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1191 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1192
1193 if (pExtent->uSectorRGD)
1194 {
1195 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1196 if (!pRGD)
1197 {
1198 rc = VERR_NO_MEMORY;
1199 goto out;
1200 }
1201 pExtent->pRGD = pRGD;
1202 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1203 * life files don't have them. The spec is wrong in creative ways. */
1204 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1205 pRGD, cbGD, NULL);
1206 AssertRC(rc);
1207 if (RT_FAILURE(rc))
1208 {
1209 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1210 goto out;
1211 }
1212 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1213 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1214
1215 /* Check grain table and redundant grain table for consistency. */
1216 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1217 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1218 if (!pTmpGT1)
1219 {
1220 rc = VERR_NO_MEMORY;
1221 goto out;
1222 }
1223 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1224 if (!pTmpGT2)
1225 {
1226 RTMemTmpFree(pTmpGT1);
1227 rc = VERR_NO_MEMORY;
1228 goto out;
1229 }
1230
1231 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1232 i < pExtent->cGDEntries;
1233 i++, pGDTmp++, pRGDTmp++)
1234 {
1235 /* If no grain table is allocated skip the entry. */
1236 if (*pGDTmp == 0 && *pRGDTmp == 0)
1237 continue;
1238
1239 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1240 {
1241 /* Just one grain directory entry refers to a not yet allocated
1242 * grain table or both grain directory copies refer to the same
1243 * grain table. Not allowed. */
1244 RTMemTmpFree(pTmpGT1);
1245 RTMemTmpFree(pTmpGT2);
1246 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1247 goto out;
1248 }
1249 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1250 * life files don't have them. The spec is wrong in creative ways. */
1251 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1252 pTmpGT1, cbGT, NULL);
1253 if (RT_FAILURE(rc))
1254 {
1255 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1256 RTMemTmpFree(pTmpGT1);
1257 RTMemTmpFree(pTmpGT2);
1258 goto out;
1259 }
1260 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1261 * life files don't have them. The spec is wrong in creative ways. */
1262 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1263 pTmpGT2, cbGT, NULL);
1264 if (RT_FAILURE(rc))
1265 {
1266 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1267 RTMemTmpFree(pTmpGT1);
1268 RTMemTmpFree(pTmpGT2);
1269 goto out;
1270 }
1271 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1272 {
1273 RTMemTmpFree(pTmpGT1);
1274 RTMemTmpFree(pTmpGT2);
1275 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1276 goto out;
1277 }
1278 }
1279
1280 /** @todo figure out what to do for unclean VMDKs. */
1281 RTMemTmpFree(pTmpGT1);
1282 RTMemTmpFree(pTmpGT2);
1283 }
1284
1285 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1286 {
1287 uint32_t uLastGrainWritten = 0;
1288 uint32_t uLastGrainSector = 0;
1289 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1290 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1291 if (!pTmpGT)
1292 {
1293 rc = VERR_NO_MEMORY;
1294 goto out;
1295 }
1296 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1297 {
1298 /* If no grain table is allocated skip the entry. */
1299 if (*pGDTmp == 0)
1300 continue;
1301
1302 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1303 * life files don't have them. The spec is wrong in creative ways. */
1304 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1305 pTmpGT, cbGT, NULL);
1306 if (RT_FAILURE(rc))
1307 {
1308 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1309 RTMemTmpFree(pTmpGT);
1310 goto out;
1311 }
1312 uint32_t j;
1313 uint32_t *pGTTmp;
1314 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1315 {
1316 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1317
1318 /* If no grain is allocated skip the entry. */
1319 if (uGTTmp == 0)
1320 continue;
1321
1322 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1323 {
1324 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1325 RTMemTmpFree(pTmpGT);
1326 goto out;
1327 }
1328 uLastGrainSector = uGTTmp;
1329 uLastGrainWritten = i * pExtent->cGTEntries + j;
1330 }
1331 }
1332 RTMemTmpFree(pTmpGT);
1333
1334 /* streamOptimized extents need a grain decompress buffer. */
1335 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1336 if (!pExtent->pvGrain)
1337 {
1338 rc = VERR_NO_MEMORY;
1339 goto out;
1340 }
1341
1342 if (uLastGrainSector)
1343 {
1344 uint64_t uLBA = 0;
1345 uint32_t cbMarker = 0;
1346 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1347 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1348 if (RT_FAILURE(rc))
1349 goto out;
1350
1351 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1352 pExtent->uGrainSector = uLastGrainSector;
1353 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1354 }
1355 pExtent->uLastGrainWritten = uLastGrainWritten;
1356 pExtent->uLastGrainSector = uLastGrainSector;
1357 }
1358
1359out:
1360 if (RT_FAILURE(rc))
1361 vmdkFreeGrainDirectory(pExtent);
1362 return rc;
1363}
1364
1365static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1366 bool fPreAlloc)
1367{
1368 int rc = VINF_SUCCESS;
1369 unsigned i;
1370 uint32_t *pGD = NULL, *pRGD = NULL;
1371 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1372 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1373 size_t cbGTRounded;
1374 uint64_t cbOverhead;
1375
1376 if (fPreAlloc)
1377 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1378 else
1379 cbGTRounded = 0;
1380
1381 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1382 if (!pGD)
1383 {
1384 rc = VERR_NO_MEMORY;
1385 goto out;
1386 }
1387 pExtent->pGD = pGD;
1388 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1389 if (!pRGD)
1390 {
1391 rc = VERR_NO_MEMORY;
1392 goto out;
1393 }
1394 pExtent->pRGD = pRGD;
1395
1396 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1397 /* For streamOptimized extents put the end-of-stream marker at the end. */
1398 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1399 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1400 else
1401 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1402 if (RT_FAILURE(rc))
1403 goto out;
1404 pExtent->uSectorRGD = uStartSector;
1405 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1406
1407 if (fPreAlloc)
1408 {
1409 uint32_t uGTSectorLE;
1410 uint64_t uOffsetSectors;
1411
1412 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1413 for (i = 0; i < pExtent->cGDEntries; i++)
1414 {
1415 pRGD[i] = uOffsetSectors;
1416 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1417 /* Write the redundant grain directory entry to disk. */
1418 rc = vmdkFileWriteAt(pExtent->pFile,
1419 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1420 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1421 if (RT_FAILURE(rc))
1422 {
1423 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1424 goto out;
1425 }
1426 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1427 }
1428
1429 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1430 for (i = 0; i < pExtent->cGDEntries; i++)
1431 {
1432 pGD[i] = uOffsetSectors;
1433 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1434 /* Write the grain directory entry to disk. */
1435 rc = vmdkFileWriteAt(pExtent->pFile,
1436 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1437 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1438 if (RT_FAILURE(rc))
1439 {
1440 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1441 goto out;
1442 }
1443 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1444 }
1445 }
1446 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1447
1448 /* streamOptimized extents need a grain decompress buffer. */
1449 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1450 {
1451 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1452 if (!pExtent->pvGrain)
1453 {
1454 rc = VERR_NO_MEMORY;
1455 goto out;
1456 }
1457 }
1458
1459out:
1460 if (RT_FAILURE(rc))
1461 vmdkFreeGrainDirectory(pExtent);
1462 return rc;
1463}
1464
1465static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1466{
1467 if (pExtent->pGD)
1468 {
1469 RTMemFree(pExtent->pGD);
1470 pExtent->pGD = NULL;
1471 }
1472 if (pExtent->pRGD)
1473 {
1474 RTMemFree(pExtent->pRGD);
1475 pExtent->pRGD = NULL;
1476 }
1477}
1478
1479static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1480 char **ppszUnquoted, char **ppszNext)
1481{
1482 char *pszQ;
1483 char *pszUnquoted;
1484
1485 /* Skip over whitespace. */
1486 while (*pszStr == ' ' || *pszStr == '\t')
1487 pszStr++;
1488
1489 if (*pszStr != '"')
1490 {
1491 pszQ = (char *)pszStr;
1492 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1493 pszQ++;
1494 }
1495 else
1496 {
1497 pszStr++;
1498 pszQ = (char *)strchr(pszStr, '"');
1499 if (pszQ == NULL)
1500 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1501 }
1502
1503 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1504 if (!pszUnquoted)
1505 return VERR_NO_MEMORY;
1506 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1507 pszUnquoted[pszQ - pszStr] = '\0';
1508 *ppszUnquoted = pszUnquoted;
1509 if (ppszNext)
1510 *ppszNext = pszQ + 1;
1511 return VINF_SUCCESS;
1512}
1513
1514static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1515 const char *pszLine)
1516{
1517 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1518 ssize_t cbDiff = strlen(pszLine) + 1;
1519
1520 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1521 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1522 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1523
1524 memcpy(pEnd, pszLine, cbDiff);
1525 pDescriptor->cLines++;
1526 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1527 pDescriptor->fDirty = true;
1528
1529 return VINF_SUCCESS;
1530}
1531
1532static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1533 const char *pszKey, const char **ppszValue)
1534{
1535 size_t cbKey = strlen(pszKey);
1536 const char *pszValue;
1537
1538 while (uStart != 0)
1539 {
1540 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1541 {
1542 /* Key matches, check for a '=' (preceded by whitespace). */
1543 pszValue = pDescriptor->aLines[uStart] + cbKey;
1544 while (*pszValue == ' ' || *pszValue == '\t')
1545 pszValue++;
1546 if (*pszValue == '=')
1547 {
1548 *ppszValue = pszValue + 1;
1549 break;
1550 }
1551 }
1552 uStart = pDescriptor->aNextLines[uStart];
1553 }
1554 return !!uStart;
1555}
1556
1557static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1558 unsigned uStart,
1559 const char *pszKey, const char *pszValue)
1560{
1561 char *pszTmp;
1562 size_t cbKey = strlen(pszKey);
1563 unsigned uLast = 0;
1564
1565 while (uStart != 0)
1566 {
1567 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1568 {
1569 /* Key matches, check for a '=' (preceded by whitespace). */
1570 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1571 while (*pszTmp == ' ' || *pszTmp == '\t')
1572 pszTmp++;
1573 if (*pszTmp == '=')
1574 {
1575 pszTmp++;
1576 while (*pszTmp == ' ' || *pszTmp == '\t')
1577 pszTmp++;
1578 break;
1579 }
1580 }
1581 if (!pDescriptor->aNextLines[uStart])
1582 uLast = uStart;
1583 uStart = pDescriptor->aNextLines[uStart];
1584 }
1585 if (uStart)
1586 {
1587 if (pszValue)
1588 {
1589 /* Key already exists, replace existing value. */
1590 size_t cbOldVal = strlen(pszTmp);
1591 size_t cbNewVal = strlen(pszValue);
1592 ssize_t cbDiff = cbNewVal - cbOldVal;
1593 /* Check for buffer overflow. */
1594 if ( pDescriptor->aLines[pDescriptor->cLines]
1595 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1596 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1597
1598 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1599 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1600 memcpy(pszTmp, pszValue, cbNewVal + 1);
1601 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1602 pDescriptor->aLines[i] += cbDiff;
1603 }
1604 else
1605 {
1606 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1607 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1608 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1609 {
1610 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1611 if (pDescriptor->aNextLines[i])
1612 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1613 else
1614 pDescriptor->aNextLines[i-1] = 0;
1615 }
1616 pDescriptor->cLines--;
1617 /* Adjust starting line numbers of following descriptor sections. */
1618 if (uStart < pDescriptor->uFirstExtent)
1619 pDescriptor->uFirstExtent--;
1620 if (uStart < pDescriptor->uFirstDDB)
1621 pDescriptor->uFirstDDB--;
1622 }
1623 }
1624 else
1625 {
1626 /* Key doesn't exist, append after the last entry in this category. */
1627 if (!pszValue)
1628 {
1629 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1630 return VINF_SUCCESS;
1631 }
1632 cbKey = strlen(pszKey);
1633 size_t cbValue = strlen(pszValue);
1634 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1635 /* Check for buffer overflow. */
1636 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1637 || ( pDescriptor->aLines[pDescriptor->cLines]
1638 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1639 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1640 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1641 {
1642 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1643 if (pDescriptor->aNextLines[i - 1])
1644 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1645 else
1646 pDescriptor->aNextLines[i] = 0;
1647 }
1648 uStart = uLast + 1;
1649 pDescriptor->aNextLines[uLast] = uStart;
1650 pDescriptor->aNextLines[uStart] = 0;
1651 pDescriptor->cLines++;
1652 pszTmp = pDescriptor->aLines[uStart];
1653 memmove(pszTmp + cbDiff, pszTmp,
1654 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1655 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1656 pDescriptor->aLines[uStart][cbKey] = '=';
1657 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1658 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1659 pDescriptor->aLines[i] += cbDiff;
1660
1661 /* Adjust starting line numbers of following descriptor sections. */
1662 if (uStart <= pDescriptor->uFirstExtent)
1663 pDescriptor->uFirstExtent++;
1664 if (uStart <= pDescriptor->uFirstDDB)
1665 pDescriptor->uFirstDDB++;
1666 }
1667 pDescriptor->fDirty = true;
1668 return VINF_SUCCESS;
1669}
1670
1671static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1672 uint32_t *puValue)
1673{
1674 const char *pszValue;
1675
1676 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1677 &pszValue))
1678 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1679 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1680}
1681
1682static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1683 const char *pszKey, const char **ppszValue)
1684{
1685 const char *pszValue;
1686 char *pszValueUnquoted;
1687
1688 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1689 &pszValue))
1690 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1691 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1692 if (RT_FAILURE(rc))
1693 return rc;
1694 *ppszValue = pszValueUnquoted;
1695 return rc;
1696}
1697
1698static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1699 const char *pszKey, const char *pszValue)
1700{
1701 char *pszValueQuoted;
1702
1703 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1704 if (RT_FAILURE(rc))
1705 return rc;
1706 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1707 pszValueQuoted);
1708 RTStrFree(pszValueQuoted);
1709 return rc;
1710}
1711
1712static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1713 PVMDKDESCRIPTOR pDescriptor)
1714{
1715 unsigned uEntry = pDescriptor->uFirstExtent;
1716 ssize_t cbDiff;
1717
1718 if (!uEntry)
1719 return;
1720
1721 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1722 /* Move everything including \0 in the entry marking the end of buffer. */
1723 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1724 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1725 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1726 {
1727 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1728 if (pDescriptor->aNextLines[i])
1729 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1730 else
1731 pDescriptor->aNextLines[i - 1] = 0;
1732 }
1733 pDescriptor->cLines--;
1734 if (pDescriptor->uFirstDDB)
1735 pDescriptor->uFirstDDB--;
1736
1737 return;
1738}
1739
1740static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1741 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1742 VMDKETYPE enmType, const char *pszBasename,
1743 uint64_t uSectorOffset)
1744{
1745 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1746 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1747 char *pszTmp;
1748 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1749 char szExt[1024];
1750 ssize_t cbDiff;
1751
1752 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1753 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1754
1755 /* Find last entry in extent description. */
1756 while (uStart)
1757 {
1758 if (!pDescriptor->aNextLines[uStart])
1759 uLast = uStart;
1760 uStart = pDescriptor->aNextLines[uStart];
1761 }
1762
1763 if (enmType == VMDKETYPE_ZERO)
1764 {
1765 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1766 cNominalSectors, apszType[enmType]);
1767 }
1768 else if (enmType == VMDKETYPE_FLAT)
1769 {
1770 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1771 apszAccess[enmAccess], cNominalSectors,
1772 apszType[enmType], pszBasename, uSectorOffset);
1773 }
1774 else
1775 {
1776 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1777 apszAccess[enmAccess], cNominalSectors,
1778 apszType[enmType], pszBasename);
1779 }
1780 cbDiff = strlen(szExt) + 1;
1781
1782 /* Check for buffer overflow. */
1783 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1784 || ( pDescriptor->aLines[pDescriptor->cLines]
1785 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1786 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1787
1788 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1789 {
1790 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1791 if (pDescriptor->aNextLines[i - 1])
1792 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1793 else
1794 pDescriptor->aNextLines[i] = 0;
1795 }
1796 uStart = uLast + 1;
1797 pDescriptor->aNextLines[uLast] = uStart;
1798 pDescriptor->aNextLines[uStart] = 0;
1799 pDescriptor->cLines++;
1800 pszTmp = pDescriptor->aLines[uStart];
1801 memmove(pszTmp + cbDiff, pszTmp,
1802 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1803 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1804 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1805 pDescriptor->aLines[i] += cbDiff;
1806
1807 /* Adjust starting line numbers of following descriptor sections. */
1808 if (uStart <= pDescriptor->uFirstDDB)
1809 pDescriptor->uFirstDDB++;
1810
1811 pDescriptor->fDirty = true;
1812 return VINF_SUCCESS;
1813}
1814
1815static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1816 const char *pszKey, const char **ppszValue)
1817{
1818 const char *pszValue;
1819 char *pszValueUnquoted;
1820
1821 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1822 &pszValue))
1823 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1824 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 *ppszValue = pszValueUnquoted;
1828 return rc;
1829}
1830
1831static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1832 const char *pszKey, uint32_t *puValue)
1833{
1834 const char *pszValue;
1835 char *pszValueUnquoted;
1836
1837 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1838 &pszValue))
1839 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1840 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1841 if (RT_FAILURE(rc))
1842 return rc;
1843 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1844 RTMemTmpFree(pszValueUnquoted);
1845 return rc;
1846}
1847
1848static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1849 const char *pszKey, PRTUUID pUuid)
1850{
1851 const char *pszValue;
1852 char *pszValueUnquoted;
1853
1854 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1855 &pszValue))
1856 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1857 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1858 if (RT_FAILURE(rc))
1859 return rc;
1860 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1861 RTMemTmpFree(pszValueUnquoted);
1862 return rc;
1863}
1864
1865static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1866 const char *pszKey, const char *pszVal)
1867{
1868 int rc;
1869 char *pszValQuoted;
1870
1871 if (pszVal)
1872 {
1873 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1874 if (RT_FAILURE(rc))
1875 return rc;
1876 }
1877 else
1878 pszValQuoted = NULL;
1879 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1880 pszValQuoted);
1881 if (pszValQuoted)
1882 RTStrFree(pszValQuoted);
1883 return rc;
1884}
1885
1886static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1887 const char *pszKey, PCRTUUID pUuid)
1888{
1889 char *pszUuid;
1890
1891 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1892 if (RT_FAILURE(rc))
1893 return rc;
1894 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1895 pszUuid);
1896 RTStrFree(pszUuid);
1897 return rc;
1898}
1899
1900static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1901 const char *pszKey, uint32_t uValue)
1902{
1903 char *pszValue;
1904
1905 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1906 if (RT_FAILURE(rc))
1907 return rc;
1908 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1909 pszValue);
1910 RTStrFree(pszValue);
1911 return rc;
1912}
1913
1914static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1915 size_t cbDescData,
1916 PVMDKDESCRIPTOR pDescriptor)
1917{
1918 int rc = VINF_SUCCESS;
1919 unsigned cLine = 0, uLastNonEmptyLine = 0;
1920 char *pTmp = pDescData;
1921
1922 pDescriptor->cbDescAlloc = cbDescData;
1923 while (*pTmp != '\0')
1924 {
1925 pDescriptor->aLines[cLine++] = pTmp;
1926 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1927 {
1928 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1929 goto out;
1930 }
1931
1932 while (*pTmp != '\0' && *pTmp != '\n')
1933 {
1934 if (*pTmp == '\r')
1935 {
1936 if (*(pTmp + 1) != '\n')
1937 {
1938 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1939 goto out;
1940 }
1941 else
1942 {
1943 /* Get rid of CR character. */
1944 *pTmp = '\0';
1945 }
1946 }
1947 pTmp++;
1948 }
1949 /* Get rid of LF character. */
1950 if (*pTmp == '\n')
1951 {
1952 *pTmp = '\0';
1953 pTmp++;
1954 }
1955 }
1956 pDescriptor->cLines = cLine;
1957 /* Pointer right after the end of the used part of the buffer. */
1958 pDescriptor->aLines[cLine] = pTmp;
1959
1960 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1961 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1962 {
1963 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1964 goto out;
1965 }
1966
1967 /* Initialize those, because we need to be able to reopen an image. */
1968 pDescriptor->uFirstDesc = 0;
1969 pDescriptor->uFirstExtent = 0;
1970 pDescriptor->uFirstDDB = 0;
1971 for (unsigned i = 0; i < cLine; i++)
1972 {
1973 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1974 {
1975 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1976 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1977 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1978 {
1979 /* An extent descriptor. */
1980 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1981 {
1982 /* Incorrect ordering of entries. */
1983 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1984 goto out;
1985 }
1986 if (!pDescriptor->uFirstExtent)
1987 {
1988 pDescriptor->uFirstExtent = i;
1989 uLastNonEmptyLine = 0;
1990 }
1991 }
1992 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1993 {
1994 /* A disk database entry. */
1995 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1996 {
1997 /* Incorrect ordering of entries. */
1998 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1999 goto out;
2000 }
2001 if (!pDescriptor->uFirstDDB)
2002 {
2003 pDescriptor->uFirstDDB = i;
2004 uLastNonEmptyLine = 0;
2005 }
2006 }
2007 else
2008 {
2009 /* A normal entry. */
2010 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2011 {
2012 /* Incorrect ordering of entries. */
2013 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2014 goto out;
2015 }
2016 if (!pDescriptor->uFirstDesc)
2017 {
2018 pDescriptor->uFirstDesc = i;
2019 uLastNonEmptyLine = 0;
2020 }
2021 }
2022 if (uLastNonEmptyLine)
2023 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2024 uLastNonEmptyLine = i;
2025 }
2026 }
2027
2028out:
2029 return rc;
2030}
2031
2032static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2033 PCPDMMEDIAGEOMETRY pPCHSGeometry)
2034{
2035 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2036 VMDK_DDB_GEO_PCHS_CYLINDERS,
2037 pPCHSGeometry->cCylinders);
2038 if (RT_FAILURE(rc))
2039 return rc;
2040 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2041 VMDK_DDB_GEO_PCHS_HEADS,
2042 pPCHSGeometry->cHeads);
2043 if (RT_FAILURE(rc))
2044 return rc;
2045 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2046 VMDK_DDB_GEO_PCHS_SECTORS,
2047 pPCHSGeometry->cSectors);
2048 return rc;
2049}
2050
2051static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2052 PCPDMMEDIAGEOMETRY pLCHSGeometry)
2053{
2054 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2055 VMDK_DDB_GEO_LCHS_CYLINDERS,
2056 pLCHSGeometry->cCylinders);
2057 if (RT_FAILURE(rc))
2058 return rc;
2059 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2060 VMDK_DDB_GEO_LCHS_HEADS,
2061
2062 pLCHSGeometry->cHeads);
2063 if (RT_FAILURE(rc))
2064 return rc;
2065 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2066 VMDK_DDB_GEO_LCHS_SECTORS,
2067 pLCHSGeometry->cSectors);
2068 return rc;
2069}
2070
2071static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2072 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2073{
2074 int rc;
2075
2076 pDescriptor->uFirstDesc = 0;
2077 pDescriptor->uFirstExtent = 0;
2078 pDescriptor->uFirstDDB = 0;
2079 pDescriptor->cLines = 0;
2080 pDescriptor->cbDescAlloc = cbDescData;
2081 pDescriptor->fDirty = false;
2082 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2083 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2084
2085 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2086 if (RT_FAILURE(rc))
2087 goto out;
2088 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2089 if (RT_FAILURE(rc))
2090 goto out;
2091 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2092 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2093 if (RT_FAILURE(rc))
2094 goto out;
2095 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2096 if (RT_FAILURE(rc))
2097 goto out;
2098 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2099 if (RT_FAILURE(rc))
2100 goto out;
2101 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2102 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2103 if (RT_FAILURE(rc))
2104 goto out;
2105 /* The trailing space is created by VMware, too. */
2106 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2107 if (RT_FAILURE(rc))
2108 goto out;
2109 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2110 if (RT_FAILURE(rc))
2111 goto out;
2112 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2113 if (RT_FAILURE(rc))
2114 goto out;
2115 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2116 if (RT_FAILURE(rc))
2117 goto out;
2118 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2119
2120 /* Now that the framework is in place, use the normal functions to insert
2121 * the remaining keys. */
2122 char szBuf[9];
2123 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2124 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2125 "CID", szBuf);
2126 if (RT_FAILURE(rc))
2127 goto out;
2128 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2129 "parentCID", "ffffffff");
2130 if (RT_FAILURE(rc))
2131 goto out;
2132
2133 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2134 if (RT_FAILURE(rc))
2135 goto out;
2136
2137out:
2138 return rc;
2139}
2140
2141static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2142 size_t cbDescData)
2143{
2144 int rc;
2145 unsigned cExtents;
2146 unsigned uLine;
2147 unsigned i;
2148
2149 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2150 &pImage->Descriptor);
2151 if (RT_FAILURE(rc))
2152 return rc;
2153
2154 /* Check version, must be 1. */
2155 uint32_t uVersion;
2156 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2157 if (RT_FAILURE(rc))
2158 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2159 if (uVersion != 1)
2160 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2161
2162 /* Get image creation type and determine image flags. */
2163 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2164 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2165 &pszCreateType);
2166 if (RT_FAILURE(rc))
2167 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2168 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2169 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2170 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2171 else if ( !strcmp(pszCreateType, "partitionedDevice")
2172 || !strcmp(pszCreateType, "fullDevice"))
2173 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2174 else if (!strcmp(pszCreateType, "streamOptimized"))
2175 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2176 else if (!strcmp(pszCreateType, "vmfs"))
2177 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2178 RTStrFree((char *)(void *)pszCreateType);
2179
2180 /* Count the number of extent config entries. */
2181 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2182 uLine != 0;
2183 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2184 /* nothing */;
2185
2186 if (!pImage->pDescData && cExtents != 1)
2187 {
2188 /* Monolithic image, must have only one extent (already opened). */
2189 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2190 }
2191
2192 if (pImage->pDescData)
2193 {
2194 /* Non-monolithic image, extents need to be allocated. */
2195 rc = vmdkCreateExtents(pImage, cExtents);
2196 if (RT_FAILURE(rc))
2197 return rc;
2198 }
2199
2200 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2201 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2202 {
2203 char *pszLine = pImage->Descriptor.aLines[uLine];
2204
2205 /* Access type of the extent. */
2206 if (!strncmp(pszLine, "RW", 2))
2207 {
2208 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2209 pszLine += 2;
2210 }
2211 else if (!strncmp(pszLine, "RDONLY", 6))
2212 {
2213 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2214 pszLine += 6;
2215 }
2216 else if (!strncmp(pszLine, "NOACCESS", 8))
2217 {
2218 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2219 pszLine += 8;
2220 }
2221 else
2222 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2223 if (*pszLine++ != ' ')
2224 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2225
2226 /* Nominal size of the extent. */
2227 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2228 &pImage->pExtents[i].cNominalSectors);
2229 if (RT_FAILURE(rc))
2230 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2231 if (*pszLine++ != ' ')
2232 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2233
2234 /* Type of the extent. */
2235#ifdef VBOX_WITH_VMDK_ESX
2236 /** @todo Add the ESX extent types. Not necessary for now because
2237 * the ESX extent types are only used inside an ESX server. They are
2238 * automatically converted if the VMDK is exported. */
2239#endif /* VBOX_WITH_VMDK_ESX */
2240 if (!strncmp(pszLine, "SPARSE", 6))
2241 {
2242 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2243 pszLine += 6;
2244 }
2245 else if (!strncmp(pszLine, "FLAT", 4))
2246 {
2247 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2248 pszLine += 4;
2249 }
2250 else if (!strncmp(pszLine, "ZERO", 4))
2251 {
2252 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2253 pszLine += 4;
2254 }
2255 else if (!strncmp(pszLine, "VMFS", 4))
2256 {
2257 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2258 pszLine += 4;
2259 }
2260 else
2261 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2262
2263 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2264 {
2265 /* This one has no basename or offset. */
2266 if (*pszLine == ' ')
2267 pszLine++;
2268 if (*pszLine != '\0')
2269 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2270 pImage->pExtents[i].pszBasename = NULL;
2271 }
2272 else
2273 {
2274 /* All other extent types have basename and optional offset. */
2275 if (*pszLine++ != ' ')
2276 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2277
2278 /* Basename of the image. Surrounded by quotes. */
2279 char *pszBasename;
2280 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2281 if (RT_FAILURE(rc))
2282 return rc;
2283 pImage->pExtents[i].pszBasename = pszBasename;
2284 if (*pszLine == ' ')
2285 {
2286 pszLine++;
2287 if (*pszLine != '\0')
2288 {
2289 /* Optional offset in extent specified. */
2290 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2291 &pImage->pExtents[i].uSectorOffset);
2292 if (RT_FAILURE(rc))
2293 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2294 }
2295 }
2296
2297 if (*pszLine != '\0')
2298 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2299 }
2300 }
2301
2302 /* Determine PCHS geometry (autogenerate if necessary). */
2303 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2304 VMDK_DDB_GEO_PCHS_CYLINDERS,
2305 &pImage->PCHSGeometry.cCylinders);
2306 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2307 pImage->PCHSGeometry.cCylinders = 0;
2308 else if (RT_FAILURE(rc))
2309 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2310 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2311 VMDK_DDB_GEO_PCHS_HEADS,
2312 &pImage->PCHSGeometry.cHeads);
2313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2314 pImage->PCHSGeometry.cHeads = 0;
2315 else if (RT_FAILURE(rc))
2316 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2317 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2318 VMDK_DDB_GEO_PCHS_SECTORS,
2319 &pImage->PCHSGeometry.cSectors);
2320 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2321 pImage->PCHSGeometry.cSectors = 0;
2322 else if (RT_FAILURE(rc))
2323 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2324 if ( pImage->PCHSGeometry.cCylinders == 0
2325 || pImage->PCHSGeometry.cHeads == 0
2326 || pImage->PCHSGeometry.cHeads > 16
2327 || pImage->PCHSGeometry.cSectors == 0
2328 || pImage->PCHSGeometry.cSectors > 63)
2329 {
2330 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2331 * as the total image size isn't known yet). */
2332 pImage->PCHSGeometry.cCylinders = 0;
2333 pImage->PCHSGeometry.cHeads = 16;
2334 pImage->PCHSGeometry.cSectors = 63;
2335 }
2336
2337 /* Determine LCHS geometry (set to 0 if not specified). */
2338 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2339 VMDK_DDB_GEO_LCHS_CYLINDERS,
2340 &pImage->LCHSGeometry.cCylinders);
2341 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2342 pImage->LCHSGeometry.cCylinders = 0;
2343 else if (RT_FAILURE(rc))
2344 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2345 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2346 VMDK_DDB_GEO_LCHS_HEADS,
2347 &pImage->LCHSGeometry.cHeads);
2348 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2349 pImage->LCHSGeometry.cHeads = 0;
2350 else if (RT_FAILURE(rc))
2351 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2352 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2353 VMDK_DDB_GEO_LCHS_SECTORS,
2354 &pImage->LCHSGeometry.cSectors);
2355 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2356 pImage->LCHSGeometry.cSectors = 0;
2357 else if (RT_FAILURE(rc))
2358 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2359 if ( pImage->LCHSGeometry.cCylinders == 0
2360 || pImage->LCHSGeometry.cHeads == 0
2361 || pImage->LCHSGeometry.cSectors == 0)
2362 {
2363 pImage->LCHSGeometry.cCylinders = 0;
2364 pImage->LCHSGeometry.cHeads = 0;
2365 pImage->LCHSGeometry.cSectors = 0;
2366 }
2367
2368 /* Get image UUID. */
2369 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2370 &pImage->ImageUuid);
2371 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2372 {
2373 /* Image without UUID. Probably created by VMware and not yet used
2374 * by VirtualBox. Can only be added for images opened in read/write
2375 * mode, so don't bother producing a sensible UUID otherwise. */
2376 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2377 RTUuidClear(&pImage->ImageUuid);
2378 else
2379 {
2380 rc = RTUuidCreate(&pImage->ImageUuid);
2381 if (RT_FAILURE(rc))
2382 return rc;
2383 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2384 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2385 if (RT_FAILURE(rc))
2386 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2387 }
2388 }
2389 else if (RT_FAILURE(rc))
2390 return rc;
2391
2392 /* Get image modification UUID. */
2393 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2394 VMDK_DDB_MODIFICATION_UUID,
2395 &pImage->ModificationUuid);
2396 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2397 {
2398 /* Image without UUID. Probably created by VMware and not yet used
2399 * by VirtualBox. Can only be added for images opened in read/write
2400 * mode, so don't bother producing a sensible UUID otherwise. */
2401 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2402 RTUuidClear(&pImage->ModificationUuid);
2403 else
2404 {
2405 rc = RTUuidCreate(&pImage->ModificationUuid);
2406 if (RT_FAILURE(rc))
2407 return rc;
2408 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2409 VMDK_DDB_MODIFICATION_UUID,
2410 &pImage->ModificationUuid);
2411 if (RT_FAILURE(rc))
2412 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2413 }
2414 }
2415 else if (RT_FAILURE(rc))
2416 return rc;
2417
2418 /* Get UUID of parent image. */
2419 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2420 &pImage->ParentUuid);
2421 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2422 {
2423 /* Image without UUID. Probably created by VMware and not yet used
2424 * by VirtualBox. Can only be added for images opened in read/write
2425 * mode, so don't bother producing a sensible UUID otherwise. */
2426 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2427 RTUuidClear(&pImage->ParentUuid);
2428 else
2429 {
2430 rc = RTUuidClear(&pImage->ParentUuid);
2431 if (RT_FAILURE(rc))
2432 return rc;
2433 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2434 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2435 if (RT_FAILURE(rc))
2436 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2437 }
2438 }
2439 else if (RT_FAILURE(rc))
2440 return rc;
2441
2442 /* Get parent image modification UUID. */
2443 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2444 VMDK_DDB_PARENT_MODIFICATION_UUID,
2445 &pImage->ParentModificationUuid);
2446 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2447 {
2448 /* Image without UUID. Probably created by VMware and not yet used
2449 * by VirtualBox. Can only be added for images opened in read/write
2450 * mode, so don't bother producing a sensible UUID otherwise. */
2451 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2452 RTUuidClear(&pImage->ParentModificationUuid);
2453 else
2454 {
2455 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2456 if (RT_FAILURE(rc))
2457 return rc;
2458 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2459 VMDK_DDB_PARENT_MODIFICATION_UUID,
2460 &pImage->ParentModificationUuid);
2461 if (RT_FAILURE(rc))
2462 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2463 }
2464 }
2465 else if (RT_FAILURE(rc))
2466 return rc;
2467
2468 return VINF_SUCCESS;
2469}
2470
2471/**
2472 * Internal: write/update the descriptor part of the image.
2473 */
2474static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2475{
2476 int rc = VINF_SUCCESS;
2477 uint64_t cbLimit;
2478 uint64_t uOffset;
2479 PVMDKFILE pDescFile;
2480
2481 if (pImage->pDescData)
2482 {
2483 /* Separate descriptor file. */
2484 uOffset = 0;
2485 cbLimit = 0;
2486 pDescFile = pImage->pFile;
2487 }
2488 else
2489 {
2490 /* Embedded descriptor file. */
2491 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2492 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2493 pDescFile = pImage->pExtents[0].pFile;
2494 }
2495 /* Bail out if there is no file to write to. */
2496 if (pDescFile == NULL)
2497 return VERR_INVALID_PARAMETER;
2498
2499 /*
2500 * Allocate temporary descriptor buffer.
2501 * In case there is no limit allocate a default
2502 * and increase if required.
2503 */
2504 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2505 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2506 unsigned offDescriptor = 0;
2507
2508 if (!pszDescriptor)
2509 return VERR_NO_MEMORY;
2510
2511 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2512 {
2513 const char *psz = pImage->Descriptor.aLines[i];
2514 size_t cb = strlen(psz);
2515
2516 /*
2517 * Increase the descriptor if there is no limit and
2518 * there is not enough room left for this line.
2519 */
2520 if (offDescriptor + cb + 1 > cbDescriptor)
2521 {
2522 if (cbLimit)
2523 {
2524 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2525 break;
2526 }
2527 else
2528 {
2529 char *pszDescriptorNew = NULL;
2530 LogFlow(("Increasing descriptor cache\n"));
2531
2532 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2533 if (!pszDescriptorNew)
2534 {
2535 rc = VERR_NO_MEMORY;
2536 break;
2537 }
2538 pszDescriptorNew = pszDescriptor;
2539 cbDescriptor += cb + 4 * _1K;
2540 }
2541 }
2542
2543 if (cb > 0)
2544 {
2545 memcpy(pszDescriptor + offDescriptor, psz, cb);
2546 offDescriptor += cb;
2547 }
2548
2549 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2550 offDescriptor++;
2551 }
2552
2553 if (RT_SUCCESS(rc))
2554 {
2555 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2556 if (RT_FAILURE(rc))
2557 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2558 }
2559
2560 if (RT_SUCCESS(rc) && !cbLimit)
2561 {
2562 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2563 if (RT_FAILURE(rc))
2564 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2565 }
2566
2567 if (RT_SUCCESS(rc))
2568 pImage->Descriptor.fDirty = false;
2569
2570 RTMemFree(pszDescriptor);
2571 return rc;
2572}
2573
2574/**
2575 * Internal: write/update the descriptor part of the image - async version.
2576 */
2577static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2578{
2579 int rc = VINF_SUCCESS;
2580 uint64_t cbLimit;
2581 uint64_t uOffset;
2582 PVMDKFILE pDescFile;
2583
2584 if (pImage->pDescData)
2585 {
2586 /* Separate descriptor file. */
2587 uOffset = 0;
2588 cbLimit = 0;
2589 pDescFile = pImage->pFile;
2590 }
2591 else
2592 {
2593 /* Embedded descriptor file. */
2594 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2595 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2596 pDescFile = pImage->pExtents[0].pFile;
2597 }
2598 /* Bail out if there is no file to write to. */
2599 if (pDescFile == NULL)
2600 return VERR_INVALID_PARAMETER;
2601
2602 /*
2603 * Allocate temporary descriptor buffer.
2604 * In case there is no limit allocate a default
2605 * and increase if required.
2606 */
2607 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2608 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2609 unsigned offDescriptor = 0;
2610
2611 if (!pszDescriptor)
2612 return VERR_NO_MEMORY;
2613
2614 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2615 {
2616 const char *psz = pImage->Descriptor.aLines[i];
2617 size_t cb = strlen(psz);
2618
2619 /*
2620 * Increase the descriptor if there is no limit and
2621 * there is not enough room left for this line.
2622 */
2623 if (offDescriptor + cb + 1 > cbDescriptor)
2624 {
2625 if (cbLimit)
2626 {
2627 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2628 break;
2629 }
2630 else
2631 {
2632 char *pszDescriptorNew = NULL;
2633 LogFlow(("Increasing descriptor cache\n"));
2634
2635 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2636 if (!pszDescriptorNew)
2637 {
2638 rc = VERR_NO_MEMORY;
2639 break;
2640 }
2641 pszDescriptorNew = pszDescriptor;
2642 cbDescriptor += cb + 4 * _1K;
2643 }
2644 }
2645
2646 if (cb > 0)
2647 {
2648 memcpy(pszDescriptor + offDescriptor, psz, cb);
2649 offDescriptor += cb;
2650 }
2651
2652 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2653 offDescriptor++;
2654 }
2655
2656 if (RT_SUCCESS(rc))
2657 {
2658 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2659 if (RT_FAILURE(rc))
2660 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2661 }
2662
2663 if (RT_SUCCESS(rc) && !cbLimit)
2664 {
2665 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2666 if (RT_FAILURE(rc))
2667 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2668 }
2669
2670 if (RT_SUCCESS(rc))
2671 pImage->Descriptor.fDirty = false;
2672
2673 RTMemFree(pszDescriptor);
2674 return rc;
2675
2676}
2677
2678/**
2679 * Internal: validate the consistency check values in a binary header.
2680 */
2681static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2682{
2683 int rc = VINF_SUCCESS;
2684 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2685 {
2686 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2687 return rc;
2688 }
2689 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2690 {
2691 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2692 return rc;
2693 }
2694 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2695 && ( pHeader->singleEndLineChar != '\n'
2696 || pHeader->nonEndLineChar != ' '
2697 || pHeader->doubleEndLineChar1 != '\r'
2698 || pHeader->doubleEndLineChar2 != '\n') )
2699 {
2700 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2701 return rc;
2702 }
2703 return rc;
2704}
2705
2706/**
2707 * Internal: read metadata belonging to an extent with binary header, i.e.
2708 * as found in monolithic files.
2709 */
2710static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2711{
2712 SparseExtentHeader Header;
2713 uint64_t cSectorsPerGDE;
2714
2715 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2716 AssertRC(rc);
2717 if (RT_FAILURE(rc))
2718 {
2719 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2720 goto out;
2721 }
2722 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2723 if (RT_FAILURE(rc))
2724 goto out;
2725 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2726 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2727 {
2728 /* Read the footer, which isn't compressed and comes before the
2729 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2730 * VMware reality. Theory and practice have very little in common. */
2731 uint64_t cbSize;
2732 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2733 AssertRC(rc);
2734 if (RT_FAILURE(rc))
2735 {
2736 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2737 goto out;
2738 }
2739 cbSize = RT_ALIGN_64(cbSize, 512);
2740 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2741 AssertRC(rc);
2742 if (RT_FAILURE(rc))
2743 {
2744 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2745 goto out;
2746 }
2747 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2748 if (RT_FAILURE(rc))
2749 goto out;
2750 pExtent->fFooter = true;
2751 }
2752 pExtent->uVersion = RT_LE2H_U32(Header.version);
2753 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2754 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2755 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2756 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2757 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2758 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2759 {
2760 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2761 goto out;
2762 }
2763 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2764 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2765 {
2766 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2767 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2768 }
2769 else
2770 {
2771 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2772 pExtent->uSectorRGD = 0;
2773 }
2774 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2775 {
2776 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2777 goto out;
2778 }
2779 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2780 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2781 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2782 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2783 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2784 {
2785 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2786 goto out;
2787 }
2788 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2789 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2790
2791 /* Fix up the number of descriptor sectors, as some flat images have
2792 * really just one, and this causes failures when inserting the UUID
2793 * values and other extra information. */
2794 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2795 {
2796 /* Do it the easy way - just fix it for flat images which have no
2797 * other complicated metadata which needs space too. */
2798 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2799 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2800 pExtent->cDescriptorSectors = 4;
2801 }
2802
2803out:
2804 if (RT_FAILURE(rc))
2805 vmdkFreeExtentData(pImage, pExtent, false);
2806
2807 return rc;
2808}
2809
2810/**
2811 * Internal: read additional metadata belonging to an extent. For those
2812 * extents which have no additional metadata just verify the information.
2813 */
2814static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2815{
2816 int rc = VINF_SUCCESS;
2817 uint64_t cbExtentSize;
2818
2819 /* The image must be a multiple of a sector in size and contain the data
2820 * area (flat images only). If not, it means the image is at least
2821 * truncated, or even seriously garbled. */
2822 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2823 if (RT_FAILURE(rc))
2824 {
2825 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2826 goto out;
2827 }
2828/* disabled the size check again as there are too many too short vmdks out there */
2829#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2830 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2831 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2832 {
2833 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2834 goto out;
2835 }
2836#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2837 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2838 goto out;
2839
2840 /* The spec says that this must be a power of two and greater than 8,
2841 * but probably they meant not less than 8. */
2842 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2843 || pExtent->cSectorsPerGrain < 8)
2844 {
2845 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2846 goto out;
2847 }
2848
2849 /* This code requires that a grain table must hold a power of two multiple
2850 * of the number of entries per GT cache entry. */
2851 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2852 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2853 {
2854 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2855 goto out;
2856 }
2857
2858 rc = vmdkReadGrainDirectory(pExtent);
2859
2860out:
2861 if (RT_FAILURE(rc))
2862 vmdkFreeExtentData(pImage, pExtent, false);
2863
2864 return rc;
2865}
2866
2867/**
2868 * Internal: write/update the metadata for a sparse extent.
2869 */
2870static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2871{
2872 SparseExtentHeader Header;
2873
2874 memset(&Header, '\0', sizeof(Header));
2875 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2876 Header.version = RT_H2LE_U32(pExtent->uVersion);
2877 Header.flags = RT_H2LE_U32(RT_BIT(0));
2878 if (pExtent->pRGD)
2879 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2880 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2881 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2882 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2883 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2884 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2885 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2886 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2887 if (pExtent->fFooter && uOffset == 0)
2888 {
2889 if (pExtent->pRGD)
2890 {
2891 Assert(pExtent->uSectorRGD);
2892 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2893 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2894 }
2895 else
2896 {
2897 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2898 }
2899 }
2900 else
2901 {
2902 if (pExtent->pRGD)
2903 {
2904 Assert(pExtent->uSectorRGD);
2905 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2906 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2907 }
2908 else
2909 {
2910 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2911 }
2912 }
2913 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2914 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2915 Header.singleEndLineChar = '\n';
2916 Header.nonEndLineChar = ' ';
2917 Header.doubleEndLineChar1 = '\r';
2918 Header.doubleEndLineChar2 = '\n';
2919 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2920
2921 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2922 AssertRC(rc);
2923 if (RT_FAILURE(rc))
2924 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2925 return rc;
2926}
2927
2928/**
2929 * Internal: write/update the metadata for a sparse extent - async version.
2930 */
2931static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2932 uint64_t uOffset, PVDIOCTX pIoCtx)
2933{
2934 SparseExtentHeader Header;
2935
2936 memset(&Header, '\0', sizeof(Header));
2937 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2938 Header.version = RT_H2LE_U32(pExtent->uVersion);
2939 Header.flags = RT_H2LE_U32(RT_BIT(0));
2940 if (pExtent->pRGD)
2941 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2942 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2943 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2944 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2945 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2946 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2947 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2948 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2949 if (pExtent->fFooter && uOffset == 0)
2950 {
2951 if (pExtent->pRGD)
2952 {
2953 Assert(pExtent->uSectorRGD);
2954 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2955 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2956 }
2957 else
2958 {
2959 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2960 }
2961 }
2962 else
2963 {
2964 if (pExtent->pRGD)
2965 {
2966 Assert(pExtent->uSectorRGD);
2967 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2968 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2969 }
2970 else
2971 {
2972 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2973 }
2974 }
2975 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2976 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2977 Header.singleEndLineChar = '\n';
2978 Header.nonEndLineChar = ' ';
2979 Header.doubleEndLineChar1 = '\r';
2980 Header.doubleEndLineChar2 = '\n';
2981 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2982
2983 int rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pImage->pInterfaceIO->pvUser,
2984 pExtent->pFile->pStorage,
2985 uOffset, &Header, sizeof(Header),
2986 pIoCtx, NULL, NULL);
2987 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2988 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2989 return rc;
2990}
2991
2992#ifdef VBOX_WITH_VMDK_ESX
2993/**
2994 * Internal: unused code to read the metadata of a sparse ESX extent.
2995 *
2996 * Such extents never leave ESX server, so this isn't ever used.
2997 */
2998static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2999{
3000 COWDisk_Header Header;
3001 uint64_t cSectorsPerGDE;
3002
3003 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
3004 AssertRC(rc);
3005 if (RT_FAILURE(rc))
3006 goto out;
3007 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
3008 || RT_LE2H_U32(Header.version) != 1
3009 || RT_LE2H_U32(Header.flags) != 3)
3010 {
3011 rc = VERR_VD_VMDK_INVALID_HEADER;
3012 goto out;
3013 }
3014 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
3015 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
3016 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
3017 /* The spec says that this must be between 1 sector and 1MB. This code
3018 * assumes it's a power of two, so check that requirement, too. */
3019 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
3020 || pExtent->cSectorsPerGrain == 0
3021 || pExtent->cSectorsPerGrain > 2048)
3022 {
3023 rc = VERR_VD_VMDK_INVALID_HEADER;
3024 goto out;
3025 }
3026 pExtent->uDescriptorSector = 0;
3027 pExtent->cDescriptorSectors = 0;
3028 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
3029 pExtent->uSectorRGD = 0;
3030 pExtent->cOverheadSectors = 0;
3031 pExtent->cGTEntries = 4096;
3032 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3033 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
3034 {
3035 rc = VERR_VD_VMDK_INVALID_HEADER;
3036 goto out;
3037 }
3038 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3039 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3040 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
3041 {
3042 /* Inconsistency detected. Computed number of GD entries doesn't match
3043 * stored value. Better be safe than sorry. */
3044 rc = VERR_VD_VMDK_INVALID_HEADER;
3045 goto out;
3046 }
3047 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
3048 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
3049
3050 rc = vmdkReadGrainDirectory(pExtent);
3051
3052out:
3053 if (RT_FAILURE(rc))
3054 vmdkFreeExtentData(pImage, pExtent, false);
3055
3056 return rc;
3057}
3058#endif /* VBOX_WITH_VMDK_ESX */
3059
3060/**
3061 * Internal: free the memory used by the extent data structure, optionally
3062 * deleting the referenced files.
3063 */
3064static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3065 bool fDelete)
3066{
3067 vmdkFreeGrainDirectory(pExtent);
3068 if (pExtent->pDescData)
3069 {
3070 RTMemFree(pExtent->pDescData);
3071 pExtent->pDescData = NULL;
3072 }
3073 if (pExtent->pFile != NULL)
3074 {
3075 /* Do not delete raw extents, these have full and base names equal. */
3076 vmdkFileClose(pImage, &pExtent->pFile,
3077 fDelete
3078 && pExtent->pszFullname
3079 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3080 }
3081 if (pExtent->pszBasename)
3082 {
3083 RTMemTmpFree((void *)pExtent->pszBasename);
3084 pExtent->pszBasename = NULL;
3085 }
3086 if (pExtent->pszFullname)
3087 {
3088 RTStrFree((char *)(void *)pExtent->pszFullname);
3089 pExtent->pszFullname = NULL;
3090 }
3091 if (pExtent->pvGrain)
3092 {
3093 RTMemFree(pExtent->pvGrain);
3094 pExtent->pvGrain = NULL;
3095 }
3096}
3097
3098/**
3099 * Internal: allocate grain table cache if necessary for this image.
3100 */
3101static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3102{
3103 PVMDKEXTENT pExtent;
3104
3105 /* Allocate grain table cache if any sparse extent is present. */
3106 for (unsigned i = 0; i < pImage->cExtents; i++)
3107 {
3108 pExtent = &pImage->pExtents[i];
3109 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3110#ifdef VBOX_WITH_VMDK_ESX
3111 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3112#endif /* VBOX_WITH_VMDK_ESX */
3113 )
3114 {
3115 /* Allocate grain table cache. */
3116 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3117 if (!pImage->pGTCache)
3118 return VERR_NO_MEMORY;
3119 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3120 {
3121 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3122 pGCE->uExtent = UINT32_MAX;
3123 }
3124 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3125 break;
3126 }
3127 }
3128
3129 return VINF_SUCCESS;
3130}
3131
3132/**
3133 * Internal: allocate the given number of extents.
3134 */
3135static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3136{
3137 int rc = VINF_SUCCESS;
3138 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3139 if (pImage)
3140 {
3141 for (unsigned i = 0; i < cExtents; i++)
3142 {
3143 pExtents[i].pFile = NULL;
3144 pExtents[i].pszBasename = NULL;
3145 pExtents[i].pszFullname = NULL;
3146 pExtents[i].pGD = NULL;
3147 pExtents[i].pRGD = NULL;
3148 pExtents[i].pDescData = NULL;
3149 pExtents[i].uVersion = 1;
3150 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3151 pExtents[i].uExtent = i;
3152 pExtents[i].pImage = pImage;
3153 }
3154 pImage->pExtents = pExtents;
3155 pImage->cExtents = cExtents;
3156 }
3157 else
3158 rc = VERR_NO_MEMORY;
3159
3160 return rc;
3161}
3162
3163/**
3164 * Internal: Translate the VBoxHDD open flags to RTFile open flags.
3165 */
3166static uint32_t vmdkFileOpenFlags(unsigned uOpenFlags)
3167{
3168 uint32_t fDeny = uOpenFlags & (VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SHAREABLE)
3169 ? RTFILE_O_DENY_NONE
3170 : RTFILE_O_DENY_WRITE;
3171 uint32_t fOpen = uOpenFlags & VD_OPEN_FLAGS_READONLY
3172 ? RTFILE_O_READ
3173 : RTFILE_O_READWRITE;
3174 fOpen |= RTFILE_O_OPEN | fDeny;
3175 return fOpen;
3176}
3177
3178/**
3179 * Internal: Open an image, constructing all necessary data structures.
3180 */
3181static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3182{
3183 int rc;
3184 uint32_t u32Magic;
3185 PVMDKFILE pFile;
3186 PVMDKEXTENT pExtent;
3187
3188 pImage->uOpenFlags = uOpenFlags;
3189
3190 /* Try to get error interface. */
3191 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3192 if (pImage->pInterfaceError)
3193 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3194
3195 /* Try to get async I/O interface. */
3196 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
3197 if (pImage->pInterfaceIO)
3198 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
3199
3200 /*
3201 * Open the image.
3202 * We don't have to check for asynchronous access because
3203 * we only support raw access and the opened file is a description
3204 * file were no data is stored.
3205 */
3206
3207 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3208 vmdkFileOpenFlags(uOpenFlags), false);
3209 if (RT_FAILURE(rc))
3210 {
3211 /* Do NOT signal an appropriate error here, as the VD layer has the
3212 * choice of retrying the open if it failed. */
3213 goto out;
3214 }
3215 pImage->pFile = pFile;
3216
3217 /* Read magic (if present). */
3218 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3219 if (RT_FAILURE(rc))
3220 {
3221 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3222 goto out;
3223 }
3224
3225 /* Handle the file according to its magic number. */
3226 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3227 {
3228 /* It's a hosted single-extent image. */
3229 rc = vmdkCreateExtents(pImage, 1);
3230 if (RT_FAILURE(rc))
3231 goto out;
3232 /* The opened file is passed to the extent. No separate descriptor
3233 * file, so no need to keep anything open for the image. */
3234 pExtent = &pImage->pExtents[0];
3235 pExtent->pFile = pFile;
3236 pImage->pFile = NULL;
3237 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3238 if (!pExtent->pszFullname)
3239 {
3240 rc = VERR_NO_MEMORY;
3241 goto out;
3242 }
3243 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3244 if (RT_FAILURE(rc))
3245 goto out;
3246
3247 /* As we're dealing with a monolithic image here, there must
3248 * be a descriptor embedded in the image file. */
3249 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3250 {
3251 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3252 goto out;
3253 }
3254 /* HACK: extend the descriptor if it is unusually small and it fits in
3255 * the unused space after the image header. Allows opening VMDK files
3256 * with extremely small descriptor in read/write mode. */
3257 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3258 && pExtent->cDescriptorSectors < 3
3259 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3260 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3261 {
3262 pExtent->cDescriptorSectors = 4;
3263 pExtent->fMetaDirty = true;
3264 }
3265 /* Read the descriptor from the extent. */
3266 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3267 if (!pExtent->pDescData)
3268 {
3269 rc = VERR_NO_MEMORY;
3270 goto out;
3271 }
3272 rc = vmdkFileReadAt(pExtent->pFile,
3273 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3274 pExtent->pDescData,
3275 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3276 AssertRC(rc);
3277 if (RT_FAILURE(rc))
3278 {
3279 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3280 goto out;
3281 }
3282
3283 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3284 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3285 if (RT_FAILURE(rc))
3286 goto out;
3287
3288 rc = vmdkReadMetaExtent(pImage, pExtent);
3289 if (RT_FAILURE(rc))
3290 goto out;
3291
3292 /* Mark the extent as unclean if opened in read-write mode. */
3293 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3294 {
3295 pExtent->fUncleanShutdown = true;
3296 pExtent->fMetaDirty = true;
3297 }
3298 }
3299 else
3300 {
3301 /* Allocate at least 10K, and make sure that there is 5K free space
3302 * in case new entries need to be added to the descriptor. Never
3303 * alocate more than 128K, because that's no valid descriptor file
3304 * and will result in the correct "truncated read" error handling. */
3305 uint64_t cbFileSize;
3306 rc = vmdkFileGetSize(pFile, &cbFileSize);
3307 if (RT_FAILURE(rc))
3308 goto out;
3309
3310 uint64_t cbSize = cbFileSize;
3311 if (cbSize % VMDK_SECTOR2BYTE(10))
3312 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3313 else
3314 cbSize += VMDK_SECTOR2BYTE(10);
3315 cbSize = RT_MIN(cbSize, _128K);
3316 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3317 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3318 if (!pImage->pDescData)
3319 {
3320 rc = VERR_NO_MEMORY;
3321 goto out;
3322 }
3323
3324 size_t cbRead;
3325 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3326 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3327 &cbRead);
3328 if (RT_FAILURE(rc))
3329 {
3330 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3331 goto out;
3332 }
3333 if (cbRead == pImage->cbDescAlloc)
3334 {
3335 /* Likely the read is truncated. Better fail a bit too early
3336 * (normally the descriptor is much smaller than our buffer). */
3337 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3338 goto out;
3339 }
3340
3341 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3342 pImage->cbDescAlloc);
3343 if (RT_FAILURE(rc))
3344 goto out;
3345
3346 /*
3347 * We have to check for the asynchronous open flag. The
3348 * extents are parsed and the type of all are known now.
3349 * Check if every extent is either FLAT or ZERO.
3350 */
3351 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3352 {
3353 unsigned cFlatExtents = 0;
3354
3355 for (unsigned i = 0; i < pImage->cExtents; i++)
3356 {
3357 pExtent = &pImage->pExtents[i];
3358
3359 if (( pExtent->enmType != VMDKETYPE_FLAT
3360 && pExtent->enmType != VMDKETYPE_ZERO
3361 && pExtent->enmType != VMDKETYPE_VMFS)
3362 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3363 {
3364 /*
3365 * Opened image contains at least one none flat or zero extent.
3366 * Return error but don't set error message as the caller
3367 * has the chance to open in non async I/O mode.
3368 */
3369 rc = VERR_NOT_SUPPORTED;
3370 goto out;
3371 }
3372 if (pExtent->enmType == VMDKETYPE_FLAT)
3373 cFlatExtents++;
3374 }
3375 }
3376
3377 for (unsigned i = 0; i < pImage->cExtents; i++)
3378 {
3379 pExtent = &pImage->pExtents[i];
3380
3381 if (pExtent->pszBasename)
3382 {
3383 /* Hack to figure out whether the specified name in the
3384 * extent descriptor is absolute. Doesn't always work, but
3385 * should be good enough for now. */
3386 char *pszFullname;
3387 /** @todo implement proper path absolute check. */
3388 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3389 {
3390 pszFullname = RTStrDup(pExtent->pszBasename);
3391 if (!pszFullname)
3392 {
3393 rc = VERR_NO_MEMORY;
3394 goto out;
3395 }
3396 }
3397 else
3398 {
3399 size_t cbDirname;
3400 char *pszDirname = RTStrDup(pImage->pszFilename);
3401 if (!pszDirname)
3402 {
3403 rc = VERR_NO_MEMORY;
3404 goto out;
3405 }
3406 RTPathStripFilename(pszDirname);
3407 cbDirname = strlen(pszDirname);
3408 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3409 RTPATH_SLASH, pExtent->pszBasename);
3410 RTStrFree(pszDirname);
3411 if (RT_FAILURE(rc))
3412 goto out;
3413 }
3414 pExtent->pszFullname = pszFullname;
3415 }
3416 else
3417 pExtent->pszFullname = NULL;
3418
3419 switch (pExtent->enmType)
3420 {
3421 case VMDKETYPE_HOSTED_SPARSE:
3422 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3423 vmdkFileOpenFlags(uOpenFlags), false);
3424 if (RT_FAILURE(rc))
3425 {
3426 /* Do NOT signal an appropriate error here, as the VD
3427 * layer has the choice of retrying the open if it
3428 * failed. */
3429 goto out;
3430 }
3431 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3432 if (RT_FAILURE(rc))
3433 goto out;
3434 rc = vmdkReadMetaExtent(pImage, pExtent);
3435 if (RT_FAILURE(rc))
3436 goto out;
3437
3438 /* Mark extent as unclean if opened in read-write mode. */
3439 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3440 {
3441 pExtent->fUncleanShutdown = true;
3442 pExtent->fMetaDirty = true;
3443 }
3444 break;
3445 case VMDKETYPE_VMFS:
3446 case VMDKETYPE_FLAT:
3447 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3448 vmdkFileOpenFlags(uOpenFlags), true);
3449 if (RT_FAILURE(rc))
3450 {
3451 /* Do NOT signal an appropriate error here, as the VD
3452 * layer has the choice of retrying the open if it
3453 * failed. */
3454 goto out;
3455 }
3456 break;
3457 case VMDKETYPE_ZERO:
3458 /* Nothing to do. */
3459 break;
3460 default:
3461 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3462 }
3463 }
3464 }
3465
3466 /* Make sure this is not reached accidentally with an error status. */
3467 AssertRC(rc);
3468
3469 /* Determine PCHS geometry if not set. */
3470 if (pImage->PCHSGeometry.cCylinders == 0)
3471 {
3472 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3473 / pImage->PCHSGeometry.cHeads
3474 / pImage->PCHSGeometry.cSectors;
3475 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3476 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3477 {
3478 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3479 AssertRC(rc);
3480 }
3481 }
3482
3483 /* Update the image metadata now in case has changed. */
3484 rc = vmdkFlushImage(pImage);
3485 if (RT_FAILURE(rc))
3486 goto out;
3487
3488 /* Figure out a few per-image constants from the extents. */
3489 pImage->cbSize = 0;
3490 for (unsigned i = 0; i < pImage->cExtents; i++)
3491 {
3492 pExtent = &pImage->pExtents[i];
3493 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3494#ifdef VBOX_WITH_VMDK_ESX
3495 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3496#endif /* VBOX_WITH_VMDK_ESX */
3497 )
3498 {
3499 /* Here used to be a check whether the nominal size of an extent
3500 * is a multiple of the grain size. The spec says that this is
3501 * always the case, but unfortunately some files out there in the
3502 * wild violate the spec (e.g. ReactOS 0.3.1). */
3503 }
3504 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3505 }
3506
3507 for (unsigned i = 0; i < pImage->cExtents; i++)
3508 {
3509 pExtent = &pImage->pExtents[i];
3510 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3511 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3512 {
3513 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3514 break;
3515 }
3516 }
3517
3518 rc = vmdkAllocateGrainTableCache(pImage);
3519 if (RT_FAILURE(rc))
3520 goto out;
3521
3522out:
3523 if (RT_FAILURE(rc))
3524 vmdkFreeImage(pImage, false);
3525 return rc;
3526}
3527
3528/**
3529 * Internal: Translate the VBoxHDD open flags to RTFile open/create flags.
3530 */
3531static uint32_t vmdkFileCreateFlags(unsigned uOpenFlags)
3532{
3533 uint32_t fDeny = uOpenFlags & VD_OPEN_FLAGS_SHAREABLE
3534 ? RTFILE_O_DENY_NONE
3535 : RTFILE_O_DENY_WRITE;
3536 uint32_t fOpen = RTFILE_O_READWRITE | RTFILE_O_NOT_CONTENT_INDEXED;
3537 fOpen |= RTFILE_O_CREATE | fDeny;
3538 return fOpen;
3539}
3540
3541/**
3542 * Internal: create VMDK images for raw disk/partition access.
3543 */
3544static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3545 uint64_t cbSize)
3546{
3547 int rc = VINF_SUCCESS;
3548 PVMDKEXTENT pExtent;
3549
3550 if (pRaw->fRawDisk)
3551 {
3552 /* Full raw disk access. This requires setting up a descriptor
3553 * file and open the (flat) raw disk. */
3554 rc = vmdkCreateExtents(pImage, 1);
3555 if (RT_FAILURE(rc))
3556 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3557 pExtent = &pImage->pExtents[0];
3558 /* Create raw disk descriptor file. */
3559 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3560 vmdkFileCreateFlags(pImage->uOpenFlags), false);
3561 if (RT_FAILURE(rc))
3562 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3563
3564 /* Set up basename for extent description. Cannot use StrDup. */
3565 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3566 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3567 if (!pszBasename)
3568 return VERR_NO_MEMORY;
3569 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3570 pExtent->pszBasename = pszBasename;
3571 /* For raw disks the full name is identical to the base name. */
3572 pExtent->pszFullname = RTStrDup(pszBasename);
3573 if (!pExtent->pszFullname)
3574 return VERR_NO_MEMORY;
3575 pExtent->enmType = VMDKETYPE_FLAT;
3576 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3577 pExtent->uSectorOffset = 0;
3578 pExtent->enmAccess = VMDKACCESS_READWRITE;
3579 pExtent->fMetaDirty = false;
3580
3581 /* Open flat image, the raw disk. */
3582 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3583 vmdkFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY),
3584 false);
3585 if (RT_FAILURE(rc))
3586 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3587 }
3588 else
3589 {
3590 /* Raw partition access. This requires setting up a descriptor
3591 * file, write the partition information to a flat extent and
3592 * open all the (flat) raw disk partitions. */
3593
3594 /* First pass over the partition data areas to determine how many
3595 * extents we need. One data area can require up to 2 extents, as
3596 * it might be necessary to skip over unpartitioned space. */
3597 unsigned cExtents = 0;
3598 uint64_t uStart = 0;
3599 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3600 {
3601 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3602 if (uStart > pPart->uStart)
3603 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3604
3605 if (uStart < pPart->uStart)
3606 cExtents++;
3607 uStart = pPart->uStart + pPart->cbData;
3608 cExtents++;
3609 }
3610 /* Another extent for filling up the rest of the image. */
3611 if (uStart != cbSize)
3612 cExtents++;
3613
3614 rc = vmdkCreateExtents(pImage, cExtents);
3615 if (RT_FAILURE(rc))
3616 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3617
3618 /* Create raw partition descriptor file. */
3619 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3620 vmdkFileCreateFlags(pImage->uOpenFlags), false);
3621 if (RT_FAILURE(rc))
3622 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3623
3624 /* Create base filename for the partition table extent. */
3625 /** @todo remove fixed buffer without creating memory leaks. */
3626 char pszPartition[1024];
3627 const char *pszBase = RTPathFilename(pImage->pszFilename);
3628 const char *pszExt = RTPathExt(pszBase);
3629 if (pszExt == NULL)
3630 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3631 char *pszBaseBase = RTStrDup(pszBase);
3632 if (!pszBaseBase)
3633 return VERR_NO_MEMORY;
3634 RTPathStripExt(pszBaseBase);
3635 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3636 pszBaseBase, pszExt);
3637 RTStrFree(pszBaseBase);
3638
3639 /* Second pass over the partitions, now define all extents. */
3640 uint64_t uPartOffset = 0;
3641 cExtents = 0;
3642 uStart = 0;
3643 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3644 {
3645 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3646 pExtent = &pImage->pExtents[cExtents++];
3647
3648 if (uStart < pPart->uStart)
3649 {
3650 pExtent->pszBasename = NULL;
3651 pExtent->pszFullname = NULL;
3652 pExtent->enmType = VMDKETYPE_ZERO;
3653 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3654 pExtent->uSectorOffset = 0;
3655 pExtent->enmAccess = VMDKACCESS_READWRITE;
3656 pExtent->fMetaDirty = false;
3657 /* go to next extent */
3658 pExtent = &pImage->pExtents[cExtents++];
3659 }
3660 uStart = pPart->uStart + pPart->cbData;
3661
3662 if (pPart->pvPartitionData)
3663 {
3664 /* Set up basename for extent description. Can't use StrDup. */
3665 size_t cbBasename = strlen(pszPartition) + 1;
3666 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3667 if (!pszBasename)
3668 return VERR_NO_MEMORY;
3669 memcpy(pszBasename, pszPartition, cbBasename);
3670 pExtent->pszBasename = pszBasename;
3671
3672 /* Set up full name for partition extent. */
3673 size_t cbDirname;
3674 char *pszDirname = RTStrDup(pImage->pszFilename);
3675 if (!pszDirname)
3676 return VERR_NO_MEMORY;
3677 RTPathStripFilename(pszDirname);
3678 cbDirname = strlen(pszDirname);
3679 char *pszFullname;
3680 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3681 RTPATH_SLASH, pExtent->pszBasename);
3682 RTStrFree(pszDirname);
3683 if (RT_FAILURE(rc))
3684 return rc;
3685 pExtent->pszFullname = pszFullname;
3686 pExtent->enmType = VMDKETYPE_FLAT;
3687 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3688 pExtent->uSectorOffset = uPartOffset;
3689 pExtent->enmAccess = VMDKACCESS_READWRITE;
3690 pExtent->fMetaDirty = false;
3691
3692 /* Create partition table flat image. */
3693 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3694 vmdkFileCreateFlags(pImage->uOpenFlags),
3695 false);
3696 if (RT_FAILURE(rc))
3697 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3698 rc = vmdkFileWriteAt(pExtent->pFile,
3699 VMDK_SECTOR2BYTE(uPartOffset),
3700 pPart->pvPartitionData,
3701 pPart->cbData, NULL);
3702 if (RT_FAILURE(rc))
3703 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3704 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3705 }
3706 else
3707 {
3708 if (pPart->pszRawDevice)
3709 {
3710 /* Set up basename for extent descr. Can't use StrDup. */
3711 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3712 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3713 if (!pszBasename)
3714 return VERR_NO_MEMORY;
3715 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3716 pExtent->pszBasename = pszBasename;
3717 /* For raw disks full name is identical to base name. */
3718 pExtent->pszFullname = RTStrDup(pszBasename);
3719 if (!pExtent->pszFullname)
3720 return VERR_NO_MEMORY;
3721 pExtent->enmType = VMDKETYPE_FLAT;
3722 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3723 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3724 pExtent->enmAccess = VMDKACCESS_READWRITE;
3725 pExtent->fMetaDirty = false;
3726
3727 /* Open flat image, the raw partition. */
3728 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3729 vmdkFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY),
3730 false);
3731 if (RT_FAILURE(rc))
3732 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3733 }
3734 else
3735 {
3736 pExtent->pszBasename = NULL;
3737 pExtent->pszFullname = NULL;
3738 pExtent->enmType = VMDKETYPE_ZERO;
3739 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3740 pExtent->uSectorOffset = 0;
3741 pExtent->enmAccess = VMDKACCESS_READWRITE;
3742 pExtent->fMetaDirty = false;
3743 }
3744 }
3745 }
3746 /* Another extent for filling up the rest of the image. */
3747 if (uStart != cbSize)
3748 {
3749 pExtent = &pImage->pExtents[cExtents++];
3750 pExtent->pszBasename = NULL;
3751 pExtent->pszFullname = NULL;
3752 pExtent->enmType = VMDKETYPE_ZERO;
3753 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3754 pExtent->uSectorOffset = 0;
3755 pExtent->enmAccess = VMDKACCESS_READWRITE;
3756 pExtent->fMetaDirty = false;
3757 }
3758 }
3759
3760 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3761 pRaw->fRawDisk ?
3762 "fullDevice" : "partitionedDevice");
3763 if (RT_FAILURE(rc))
3764 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3765 return rc;
3766}
3767
3768/**
3769 * Internal: create a regular (i.e. file-backed) VMDK image.
3770 */
3771static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3772 unsigned uImageFlags,
3773 PFNVDPROGRESS pfnProgress, void *pvUser,
3774 unsigned uPercentStart, unsigned uPercentSpan)
3775{
3776 int rc = VINF_SUCCESS;
3777 unsigned cExtents = 1;
3778 uint64_t cbOffset = 0;
3779 uint64_t cbRemaining = cbSize;
3780
3781 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3782 {
3783 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3784 /* Do proper extent computation: need one smaller extent if the total
3785 * size isn't evenly divisible by the split size. */
3786 if (cbSize % VMDK_2G_SPLIT_SIZE)
3787 cExtents++;
3788 }
3789 rc = vmdkCreateExtents(pImage, cExtents);
3790 if (RT_FAILURE(rc))
3791 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3792
3793 /* Basename strings needed for constructing the extent names. */
3794 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3795 AssertPtr(pszBasenameSubstr);
3796 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3797
3798 /* Create searate descriptor file if necessary. */
3799 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3800 {
3801 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3802 vmdkFileCreateFlags(pImage->uOpenFlags), false);
3803 if (RT_FAILURE(rc))
3804 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3805 }
3806 else
3807 pImage->pFile = NULL;
3808
3809 /* Set up all extents. */
3810 for (unsigned i = 0; i < cExtents; i++)
3811 {
3812 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3813 uint64_t cbExtent = cbRemaining;
3814
3815 /* Set up fullname/basename for extent description. Cannot use StrDup
3816 * for basename, as it is not guaranteed that the memory can be freed
3817 * with RTMemTmpFree, which must be used as in other code paths
3818 * StrDup is not usable. */
3819 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3820 {
3821 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3822 if (!pszBasename)
3823 return VERR_NO_MEMORY;
3824 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3825 pExtent->pszBasename = pszBasename;
3826 }
3827 else
3828 {
3829 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3830 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3831 RTPathStripExt(pszBasenameBase);
3832 char *pszTmp;
3833 size_t cbTmp;
3834 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3835 {
3836 if (cExtents == 1)
3837 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3838 pszBasenameExt);
3839 else
3840 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3841 i+1, pszBasenameExt);
3842 }
3843 else
3844 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3845 pszBasenameExt);
3846 RTStrFree(pszBasenameBase);
3847 if (RT_FAILURE(rc))
3848 return rc;
3849 cbTmp = strlen(pszTmp) + 1;
3850 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3851 if (!pszBasename)
3852 return VERR_NO_MEMORY;
3853 memcpy(pszBasename, pszTmp, cbTmp);
3854 RTStrFree(pszTmp);
3855 pExtent->pszBasename = pszBasename;
3856 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3857 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3858 }
3859 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3860 RTPathStripFilename(pszBasedirectory);
3861 char *pszFullname;
3862 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3863 RTPATH_SLASH, pExtent->pszBasename);
3864 RTStrFree(pszBasedirectory);
3865 if (RT_FAILURE(rc))
3866 return rc;
3867 pExtent->pszFullname = pszFullname;
3868
3869 /* Create file for extent. */
3870 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3871 vmdkFileCreateFlags(pImage->uOpenFlags), false);
3872 if (RT_FAILURE(rc))
3873 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3874 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3875 {
3876 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3877 if (RT_FAILURE(rc))
3878 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3879
3880 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3881 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3882 * file and the guest could complain about an ATA timeout. */
3883
3884 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3885 * Currently supported file systems are ext4 and ocfs2. */
3886
3887 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3888 const size_t cbBuf = 128 * _1K;
3889 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3890 if (!pvBuf)
3891 return VERR_NO_MEMORY;
3892
3893 uint64_t uOff = 0;
3894 /* Write data to all image blocks. */
3895 while (uOff < cbExtent)
3896 {
3897 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3898
3899 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3900 if (RT_FAILURE(rc))
3901 {
3902 RTMemFree(pvBuf);
3903 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3904 }
3905
3906 uOff += cbChunk;
3907
3908 if (pfnProgress)
3909 {
3910 rc = pfnProgress(pvUser,
3911 uPercentStart + uOff * uPercentSpan / cbExtent);
3912 if (RT_FAILURE(rc))
3913 {
3914 RTMemFree(pvBuf);
3915 return rc;
3916 }
3917 }
3918 }
3919 RTMemTmpFree(pvBuf);
3920 }
3921
3922 /* Place descriptor file information (where integrated). */
3923 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3924 {
3925 pExtent->uDescriptorSector = 1;
3926 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3927 /* The descriptor is part of the (only) extent. */
3928 pExtent->pDescData = pImage->pDescData;
3929 pImage->pDescData = NULL;
3930 }
3931
3932 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3933 {
3934 uint64_t cSectorsPerGDE, cSectorsPerGD;
3935 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3936 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3937 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3938 pExtent->cGTEntries = 512;
3939 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3940 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3941 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3942 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3943 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3944 {
3945 /* The spec says version is 1 for all VMDKs, but the vast
3946 * majority of streamOptimized VMDKs actually contain
3947 * version 3 - so go with the majority. Both are acepted. */
3948 pExtent->uVersion = 3;
3949 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3950 }
3951 }
3952 else
3953 {
3954 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3955 pExtent->enmType = VMDKETYPE_VMFS;
3956 else
3957 pExtent->enmType = VMDKETYPE_FLAT;
3958 }
3959
3960 pExtent->enmAccess = VMDKACCESS_READWRITE;
3961 pExtent->fUncleanShutdown = true;
3962 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3963 pExtent->uSectorOffset = 0;
3964 pExtent->fMetaDirty = true;
3965
3966 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3967 {
3968 /* fPreAlloc should never be false because VMware can't use such images. */
3969 rc = vmdkCreateGrainDirectory(pExtent,
3970 RT_MAX( pExtent->uDescriptorSector
3971 + pExtent->cDescriptorSectors,
3972 1),
3973 true /* fPreAlloc */);
3974 if (RT_FAILURE(rc))
3975 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3976 }
3977
3978 if (RT_SUCCESS(rc) && pfnProgress)
3979 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
3980
3981 cbRemaining -= cbExtent;
3982 cbOffset += cbExtent;
3983 }
3984
3985 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3986 {
3987 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3988 * controller type is set in an image. */
3989 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3990 if (RT_FAILURE(rc))
3991 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3992 }
3993
3994 const char *pszDescType = NULL;
3995 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3996 {
3997 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3998 pszDescType = "vmfs";
3999 else
4000 pszDescType = (cExtents == 1)
4001 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4002 }
4003 else
4004 {
4005 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4006 pszDescType = "streamOptimized";
4007 else
4008 {
4009 pszDescType = (cExtents == 1)
4010 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4011 }
4012 }
4013 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4014 pszDescType);
4015 if (RT_FAILURE(rc))
4016 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4017 return rc;
4018}
4019
4020/**
4021 * Internal: The actual code for creating any VMDK variant currently in
4022 * existence on hosted environments.
4023 */
4024static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4025 unsigned uImageFlags, const char *pszComment,
4026 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4027 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4028 PFNVDPROGRESS pfnProgress, void *pvUser,
4029 unsigned uPercentStart, unsigned uPercentSpan)
4030{
4031 int rc;
4032
4033 pImage->uImageFlags = uImageFlags;
4034
4035 /* Try to get error interface. */
4036 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4037 if (pImage->pInterfaceError)
4038 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4039
4040 /* Try to get async I/O interface. */
4041 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
4042 if (pImage->pInterfaceIO)
4043 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
4044
4045 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4046 &pImage->Descriptor);
4047 if (RT_FAILURE(rc))
4048 {
4049 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4050 goto out;
4051 }
4052
4053 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4054 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4055 {
4056 /* Raw disk image (includes raw partition). */
4057 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4058 /* As the comment is misused, zap it so that no garbage comment
4059 * is set below. */
4060 pszComment = NULL;
4061 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4062 }
4063 else
4064 {
4065 /* Regular fixed or sparse image (monolithic or split). */
4066 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4067 pfnProgress, pvUser, uPercentStart,
4068 uPercentSpan * 95 / 100);
4069 }
4070
4071 if (RT_FAILURE(rc))
4072
4073 goto out;
4074
4075 if (RT_SUCCESS(rc) && pfnProgress)
4076 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4077
4078 pImage->cbSize = cbSize;
4079
4080 for (unsigned i = 0; i < pImage->cExtents; i++)
4081 {
4082 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4083
4084 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4085 pExtent->cNominalSectors, pExtent->enmType,
4086 pExtent->pszBasename, pExtent->uSectorOffset);
4087 if (RT_FAILURE(rc))
4088 {
4089 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4090 goto out;
4091 }
4092 }
4093 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4094
4095 if ( pPCHSGeometry->cCylinders != 0
4096 && pPCHSGeometry->cHeads != 0
4097 && pPCHSGeometry->cSectors != 0)
4098 {
4099 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4100 if (RT_FAILURE(rc))
4101 goto out;
4102 }
4103 if ( pLCHSGeometry->cCylinders != 0
4104 && pLCHSGeometry->cHeads != 0
4105 && pLCHSGeometry->cSectors != 0)
4106 {
4107 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4108 if (RT_FAILURE(rc))
4109 goto out;
4110 }
4111
4112 pImage->LCHSGeometry = *pLCHSGeometry;
4113 pImage->PCHSGeometry = *pPCHSGeometry;
4114
4115 pImage->ImageUuid = *pUuid;
4116 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4117 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4118 if (RT_FAILURE(rc))
4119 {
4120 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4121 goto out;
4122 }
4123 RTUuidClear(&pImage->ParentUuid);
4124 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4125 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4126 if (RT_FAILURE(rc))
4127 {
4128 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4129 goto out;
4130 }
4131 RTUuidClear(&pImage->ModificationUuid);
4132 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4133 VMDK_DDB_MODIFICATION_UUID,
4134 &pImage->ModificationUuid);
4135 if (RT_FAILURE(rc))
4136 {
4137 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4138 goto out;
4139 }
4140 RTUuidClear(&pImage->ParentModificationUuid);
4141 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4142 VMDK_DDB_PARENT_MODIFICATION_UUID,
4143 &pImage->ParentModificationUuid);
4144 if (RT_FAILURE(rc))
4145 {
4146 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4147 goto out;
4148 }
4149
4150 rc = vmdkAllocateGrainTableCache(pImage);
4151 if (RT_FAILURE(rc))
4152 goto out;
4153
4154 rc = vmdkSetImageComment(pImage, pszComment);
4155 if (RT_FAILURE(rc))
4156 {
4157 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4158 goto out;
4159 }
4160
4161 if (RT_SUCCESS(rc) && pfnProgress)
4162 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4163
4164 rc = vmdkFlushImage(pImage);
4165
4166out:
4167 if (RT_SUCCESS(rc) && pfnProgress)
4168 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4169
4170 if (RT_FAILURE(rc))
4171 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4172 return rc;
4173}
4174
4175/**
4176 * Internal: Update image comment.
4177 */
4178static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4179{
4180 char *pszCommentEncoded;
4181 if (pszComment)
4182 {
4183 pszCommentEncoded = vmdkEncodeString(pszComment);
4184 if (!pszCommentEncoded)
4185 return VERR_NO_MEMORY;
4186 }
4187 else
4188 pszCommentEncoded = NULL;
4189 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4190 "ddb.comment", pszCommentEncoded);
4191 if (pszComment)
4192 RTStrFree(pszCommentEncoded);
4193 if (RT_FAILURE(rc))
4194 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4195 return VINF_SUCCESS;
4196}
4197
4198/**
4199 * Internal. Free all allocated space for representing an image, and optionally
4200 * delete the image from disk.
4201 */
4202static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4203{
4204 AssertPtr(pImage);
4205
4206 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4207 {
4208 /* Mark all extents as clean. */
4209 for (unsigned i = 0; i < pImage->cExtents; i++)
4210 {
4211 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4212#ifdef VBOX_WITH_VMDK_ESX
4213 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4214#endif /* VBOX_WITH_VMDK_ESX */
4215 )
4216 && pImage->pExtents[i].fUncleanShutdown)
4217 {
4218 pImage->pExtents[i].fUncleanShutdown = false;
4219 pImage->pExtents[i].fMetaDirty = true;
4220 }
4221 }
4222 }
4223 (void)vmdkFlushImage(pImage);
4224
4225 if (pImage->pExtents != NULL)
4226 {
4227 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4228 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4229 RTMemFree(pImage->pExtents);
4230 pImage->pExtents = NULL;
4231 }
4232 pImage->cExtents = 0;
4233 if (pImage->pFile != NULL)
4234 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4235 vmdkFileCheckAllClose(pImage);
4236 if (pImage->pGTCache)
4237 {
4238 RTMemFree(pImage->pGTCache);
4239 pImage->pGTCache = NULL;
4240 }
4241 if (pImage->pDescData)
4242 {
4243 RTMemFree(pImage->pDescData);
4244 pImage->pDescData = NULL;
4245 }
4246}
4247
4248/**
4249 * Internal. Flush image data (and metadata) to disk.
4250 */
4251static int vmdkFlushImage(PVMDKIMAGE pImage)
4252{
4253 PVMDKEXTENT pExtent;
4254 int rc = VINF_SUCCESS;
4255
4256 /* Update descriptor if changed. */
4257 if (pImage->Descriptor.fDirty)
4258 {
4259 rc = vmdkWriteDescriptor(pImage);
4260 if (RT_FAILURE(rc))
4261 goto out;
4262 }
4263
4264 for (unsigned i = 0; i < pImage->cExtents; i++)
4265 {
4266 pExtent = &pImage->pExtents[i];
4267 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4268 {
4269 switch (pExtent->enmType)
4270 {
4271 case VMDKETYPE_HOSTED_SPARSE:
4272 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
4273 if (RT_FAILURE(rc))
4274 goto out;
4275 if (pExtent->fFooter)
4276 {
4277 uint64_t cbSize;
4278 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4279 if (RT_FAILURE(rc))
4280 goto out;
4281 cbSize = RT_ALIGN_64(cbSize, 512);
4282 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4283 if (RT_FAILURE(rc))
4284 goto out;
4285 }
4286 break;
4287#ifdef VBOX_WITH_VMDK_ESX
4288 case VMDKETYPE_ESX_SPARSE:
4289 /** @todo update the header. */
4290 break;
4291#endif /* VBOX_WITH_VMDK_ESX */
4292 case VMDKETYPE_VMFS:
4293 case VMDKETYPE_FLAT:
4294 /* Nothing to do. */
4295 break;
4296 case VMDKETYPE_ZERO:
4297 default:
4298 AssertMsgFailed(("extent with type %d marked as dirty\n",
4299 pExtent->enmType));
4300 break;
4301 }
4302 }
4303 switch (pExtent->enmType)
4304 {
4305 case VMDKETYPE_HOSTED_SPARSE:
4306#ifdef VBOX_WITH_VMDK_ESX
4307 case VMDKETYPE_ESX_SPARSE:
4308#endif /* VBOX_WITH_VMDK_ESX */
4309 case VMDKETYPE_VMFS:
4310 case VMDKETYPE_FLAT:
4311 /** @todo implement proper path absolute check. */
4312 if ( pExtent->pFile != NULL
4313 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4314 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4315 rc = vmdkFileFlush(pExtent->pFile);
4316 break;
4317 case VMDKETYPE_ZERO:
4318 /* No need to do anything for this extent. */
4319 break;
4320 default:
4321 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4322 break;
4323 }
4324 }
4325
4326out:
4327 return rc;
4328}
4329
4330/**
4331 * Internal. Flush image data (and metadata) to disk - async version.
4332 */
4333static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4334{
4335 PVMDKEXTENT pExtent;
4336 int rc = VINF_SUCCESS;
4337
4338 /* Update descriptor if changed. */
4339 if (pImage->Descriptor.fDirty)
4340 {
4341 rc = vmdkWriteDescriptor(pImage);
4342 if (RT_FAILURE(rc))
4343 goto out;
4344 }
4345
4346 for (unsigned i = 0; i < pImage->cExtents; i++)
4347 {
4348 pExtent = &pImage->pExtents[i];
4349 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4350 {
4351 switch (pExtent->enmType)
4352 {
4353 case VMDKETYPE_HOSTED_SPARSE:
4354 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4355 break;
4356#ifdef VBOX_WITH_VMDK_ESX
4357 case VMDKETYPE_ESX_SPARSE:
4358 /** @todo update the header. */
4359 break;
4360#endif /* VBOX_WITH_VMDK_ESX */
4361 case VMDKETYPE_VMFS:
4362 case VMDKETYPE_FLAT:
4363 /* Nothing to do. */
4364 break;
4365 case VMDKETYPE_ZERO:
4366 default:
4367 AssertMsgFailed(("extent with type %d marked as dirty\n",
4368 pExtent->enmType));
4369 break;
4370 }
4371 }
4372 switch (pExtent->enmType)
4373 {
4374 case VMDKETYPE_HOSTED_SPARSE:
4375#ifdef VBOX_WITH_VMDK_ESX
4376 case VMDKETYPE_ESX_SPARSE:
4377#endif /* VBOX_WITH_VMDK_ESX */
4378 case VMDKETYPE_VMFS:
4379 case VMDKETYPE_FLAT:
4380 /** @todo implement proper path absolute check. */
4381 if ( pExtent->pFile != NULL
4382 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4383 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4384 rc = vmdkFileFlushAsync(pExtent->pFile, pIoCtx);
4385 break;
4386 case VMDKETYPE_ZERO:
4387 /* No need to do anything for this extent. */
4388 break;
4389 default:
4390 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4391 break;
4392 }
4393 }
4394
4395out:
4396 return rc;
4397}
4398
4399/**
4400 * Internal. Find extent corresponding to the sector number in the disk.
4401 */
4402static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4403 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4404{
4405 PVMDKEXTENT pExtent = NULL;
4406 int rc = VINF_SUCCESS;
4407
4408 for (unsigned i = 0; i < pImage->cExtents; i++)
4409 {
4410 if (offSector < pImage->pExtents[i].cNominalSectors)
4411 {
4412 pExtent = &pImage->pExtents[i];
4413 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4414 break;
4415 }
4416 offSector -= pImage->pExtents[i].cNominalSectors;
4417 }
4418
4419 if (pExtent)
4420 *ppExtent = pExtent;
4421 else
4422 rc = VERR_IO_SECTOR_NOT_FOUND;
4423
4424 return rc;
4425}
4426
4427/**
4428 * Internal. Hash function for placing the grain table hash entries.
4429 */
4430static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4431 unsigned uExtent)
4432{
4433 /** @todo this hash function is quite simple, maybe use a better one which
4434 * scrambles the bits better. */
4435 return (uSector + uExtent) % pCache->cEntries;
4436}
4437
4438/**
4439 * Internal. Get sector number in the extent file from the relative sector
4440 * number in the extent.
4441 */
4442static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4443 uint64_t uSector, uint64_t *puExtentSector)
4444{
4445 uint64_t uGDIndex, uGTSector, uGTBlock;
4446 uint32_t uGTHash, uGTBlockIndex;
4447 PVMDKGTCACHEENTRY pGTCacheEntry;
4448 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4449 int rc;
4450
4451 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4452 if (uGDIndex >= pExtent->cGDEntries)
4453 return VERR_OUT_OF_RANGE;
4454 uGTSector = pExtent->pGD[uGDIndex];
4455 if (!uGTSector)
4456 {
4457 /* There is no grain table referenced by this grain directory
4458 * entry. So there is absolutely no data in this area. */
4459 *puExtentSector = 0;
4460 return VINF_SUCCESS;
4461 }
4462
4463 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4464 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4465 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4466 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4467 || pGTCacheEntry->uGTBlock != uGTBlock)
4468 {
4469 /* Cache miss, fetch data from disk. */
4470 rc = vmdkFileReadAt(pExtent->pFile,
4471 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4472 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4473 if (RT_FAILURE(rc))
4474 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4475 pGTCacheEntry->uExtent = pExtent->uExtent;
4476 pGTCacheEntry->uGTBlock = uGTBlock;
4477 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4478 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4479 }
4480 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4481 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4482 if (uGrainSector)
4483 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4484 else
4485 *puExtentSector = 0;
4486 return VINF_SUCCESS;
4487}
4488
4489/**
4490 * Internal. Get sector number in the extent file from the relative sector
4491 * number in the extent - version for async access.
4492 */
4493static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4494 PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4495 uint64_t uSector, uint64_t *puExtentSector)
4496{
4497 uint64_t uGDIndex, uGTSector, uGTBlock;
4498 uint32_t uGTHash, uGTBlockIndex;
4499 PVMDKGTCACHEENTRY pGTCacheEntry;
4500 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4501 int rc;
4502
4503 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4504 if (uGDIndex >= pExtent->cGDEntries)
4505 return VERR_OUT_OF_RANGE;
4506 uGTSector = pExtent->pGD[uGDIndex];
4507 if (!uGTSector)
4508 {
4509 /* There is no grain table referenced by this grain directory
4510 * entry. So there is absolutely no data in this area. */
4511 *puExtentSector = 0;
4512 return VINF_SUCCESS;
4513 }
4514
4515 LogFlowFunc(("uGTSector=%llu\n", uGTSector));
4516
4517 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4518 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4519 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4520 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4521 || pGTCacheEntry->uGTBlock != uGTBlock)
4522 {
4523 /* Cache miss, fetch data from disk. */
4524 PVDMETAXFER pMetaXfer;
4525 rc = pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pImage->pInterfaceIO->pvUser,
4526 pExtent->pFile->pStorage,
4527 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4528 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4529 if (RT_FAILURE(rc))
4530 return rc;
4531 /* We can release the metadata transfer immediately. */
4532 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pImage->pInterfaceIO->pvUser, pMetaXfer);
4533 pGTCacheEntry->uExtent = pExtent->uExtent;
4534 pGTCacheEntry->uGTBlock = uGTBlock;
4535 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4536 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4537 }
4538 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4539 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4540 if (uGrainSector)
4541 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4542 else
4543 *puExtentSector = 0;
4544 return VINF_SUCCESS;
4545}
4546
4547/**
4548 * Internal. Allocates a new grain table (if necessary), writes the grain
4549 * and updates the grain table. The cache is also updated by this operation.
4550 * This is separate from vmdkGetSector, because that should be as fast as
4551 * possible. Most code from vmdkGetSector also appears here.
4552 */
4553static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4554 uint64_t uSector, const void *pvBuf,
4555 uint64_t cbWrite)
4556{
4557 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4558 uint64_t cbExtentSize;
4559 uint32_t uGTHash, uGTBlockIndex;
4560 PVMDKGTCACHEENTRY pGTCacheEntry;
4561 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4562 int rc;
4563
4564 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4565 if (uGDIndex >= pExtent->cGDEntries)
4566 return VERR_OUT_OF_RANGE;
4567 uGTSector = pExtent->pGD[uGDIndex];
4568 if (pExtent->pRGD)
4569 uRGTSector = pExtent->pRGD[uGDIndex];
4570 else
4571 uRGTSector = 0; /**< avoid compiler warning */
4572 if (!uGTSector)
4573 {
4574 /* There is no grain table referenced by this grain directory
4575 * entry. So there is absolutely no data in this area. Allocate
4576 * a new grain table and put the reference to it in the GDs. */
4577 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4578 if (RT_FAILURE(rc))
4579 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4580 Assert(!(cbExtentSize % 512));
4581 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4582 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4583 /* For writable streamOptimized extents the final sector is the
4584 * end-of-stream marker. Will be re-added after the grain table.
4585 * If the file has a footer it also will be re-added before EOS. */
4586 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4587 {
4588 uint64_t uEOSOff = 0;
4589 uGTSector--;
4590 if (pExtent->fFooter)
4591 {
4592 uGTSector--;
4593 uEOSOff = 512;
4594 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4595 if (RT_FAILURE(rc))
4596 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4597 }
4598 pExtent->uLastGrainSector = 0;
4599 uint8_t aEOS[512];
4600 memset(aEOS, '\0', sizeof(aEOS));
4601 rc = vmdkFileWriteAt(pExtent->pFile,
4602 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4603 aEOS, sizeof(aEOS), NULL);
4604 if (RT_FAILURE(rc))
4605 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4606 }
4607 /* Normally the grain table is preallocated for hosted sparse extents
4608 * that support more than 32 bit sector numbers. So this shouldn't
4609 * ever happen on a valid extent. */
4610 if (uGTSector > UINT32_MAX)
4611 return VERR_VD_VMDK_INVALID_HEADER;
4612 /* Write grain table by writing the required number of grain table
4613 * cache chunks. Avoids dynamic memory allocation, but is a bit
4614 * slower. But as this is a pretty infrequently occurring case it
4615 * should be acceptable. */
4616 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4617 for (unsigned i = 0;
4618 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4619 i++)
4620 {
4621 rc = vmdkFileWriteAt(pExtent->pFile,
4622 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4623 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4624 if (RT_FAILURE(rc))
4625 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4626 }
4627 if (pExtent->pRGD)
4628 {
4629 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4630 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4631 if (RT_FAILURE(rc))
4632 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4633 Assert(!(cbExtentSize % 512));
4634 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4635 /* For writable streamOptimized extents the final sector is the
4636 * end-of-stream marker. Will be re-added after the grain table.
4637 * If the file has a footer it also will be re-added before EOS. */
4638 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4639 {
4640 uint64_t uEOSOff = 0;
4641 uRGTSector--;
4642 if (pExtent->fFooter)
4643 {
4644 uRGTSector--;
4645 uEOSOff = 512;
4646 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4647 if (RT_FAILURE(rc))
4648 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4649 }
4650 pExtent->uLastGrainSector = 0;
4651 uint8_t aEOS[512];
4652 memset(aEOS, '\0', sizeof(aEOS));
4653 rc = vmdkFileWriteAt(pExtent->pFile,
4654 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4655 aEOS, sizeof(aEOS), NULL);
4656 if (RT_FAILURE(rc))
4657 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4658 }
4659 /* Normally the redundant grain table is preallocated for hosted
4660 * sparse extents that support more than 32 bit sector numbers. So
4661 * this shouldn't ever happen on a valid extent. */
4662 if (uRGTSector > UINT32_MAX)
4663 return VERR_VD_VMDK_INVALID_HEADER;
4664 /* Write backup grain table by writing the required number of grain
4665 * table cache chunks. Avoids dynamic memory allocation, but is a
4666 * bit slower. But as this is a pretty infrequently occurring case
4667 * it should be acceptable. */
4668 for (unsigned i = 0;
4669 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4670 i++)
4671 {
4672 rc = vmdkFileWriteAt(pExtent->pFile,
4673 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4674 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4675 if (RT_FAILURE(rc))
4676 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4677 }
4678 }
4679
4680 /* Update the grain directory on disk (doing it before writing the
4681 * grain table will result in a garbled extent if the operation is
4682 * aborted for some reason. Otherwise the worst that can happen is
4683 * some unused sectors in the extent. */
4684 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4685 rc = vmdkFileWriteAt(pExtent->pFile,
4686 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4687 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4688 if (RT_FAILURE(rc))
4689 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4690 if (pExtent->pRGD)
4691 {
4692 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4693 rc = vmdkFileWriteAt(pExtent->pFile,
4694 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4695 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4696 if (RT_FAILURE(rc))
4697 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4698 }
4699
4700 /* As the final step update the in-memory copy of the GDs. */
4701 pExtent->pGD[uGDIndex] = uGTSector;
4702 if (pExtent->pRGD)
4703 pExtent->pRGD[uGDIndex] = uRGTSector;
4704 }
4705
4706 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4707 if (RT_FAILURE(rc))
4708 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4709 Assert(!(cbExtentSize % 512));
4710
4711 /* Write the data. Always a full grain, or we're in big trouble. */
4712 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4713 {
4714 /* For streamOptimized extents this is a little more difficult, as the
4715 * cached data also needs to be updated, to handle updating the last
4716 * written block properly. Also we're trying to avoid unnecessary gaps.
4717 * Additionally the end-of-stream marker needs to be written. */
4718 if (!pExtent->uLastGrainSector)
4719 {
4720 cbExtentSize -= 512;
4721 if (pExtent->fFooter)
4722 cbExtentSize -= 512;
4723 }
4724 else
4725 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4726 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4727 uint32_t cbGrain = 0;
4728 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4729 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4730 if (RT_FAILURE(rc))
4731 {
4732 pExtent->uGrainSector = 0;
4733 pExtent->uLastGrainSector = 0;
4734 AssertRC(rc);
4735 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4736 }
4737 cbGrain = RT_ALIGN(cbGrain, 512);
4738 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4739 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4740 pExtent->cbLastGrainWritten = cbGrain;
4741 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4742 pExtent->uGrainSector = uSector;
4743
4744 uint64_t uEOSOff = 0;
4745 if (pExtent->fFooter)
4746 {
4747 uEOSOff = 512;
4748 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4749 if (RT_FAILURE(rc))
4750 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4751 }
4752 uint8_t aEOS[512];
4753 memset(aEOS, '\0', sizeof(aEOS));
4754 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4755 aEOS, sizeof(aEOS), NULL);
4756 if (RT_FAILURE(rc))
4757 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4758 }
4759 else
4760 {
4761 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4762 if (RT_FAILURE(rc))
4763 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4764 }
4765
4766 /* Update the grain table (and the cache). */
4767 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4768 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4769 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4770 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4771 || pGTCacheEntry->uGTBlock != uGTBlock)
4772 {
4773 /* Cache miss, fetch data from disk. */
4774 rc = vmdkFileReadAt(pExtent->pFile,
4775 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4776 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4777 if (RT_FAILURE(rc))
4778 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4779 pGTCacheEntry->uExtent = pExtent->uExtent;
4780 pGTCacheEntry->uGTBlock = uGTBlock;
4781 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4782 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4783 }
4784 else
4785 {
4786 /* Cache hit. Convert grain table block back to disk format, otherwise
4787 * the code below will write garbage for all but the updated entry. */
4788 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4789 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4790 }
4791 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4792 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4793 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4794 /* Update grain table on disk. */
4795 rc = vmdkFileWriteAt(pExtent->pFile,
4796 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4797 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4798 if (RT_FAILURE(rc))
4799 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4800 if (pExtent->pRGD)
4801 {
4802 /* Update backup grain table on disk. */
4803 rc = vmdkFileWriteAt(pExtent->pFile,
4804 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4805 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4806 if (RT_FAILURE(rc))
4807 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4808 }
4809#ifdef VBOX_WITH_VMDK_ESX
4810 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4811 {
4812 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4813 pExtent->fMetaDirty = true;
4814 }
4815#endif /* VBOX_WITH_VMDK_ESX */
4816 return rc;
4817}
4818
4819/**
4820 * Internal: Updates the grain table during a async grain allocation.
4821 */
4822static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4823 PVMDKGTCACHE pCache, PVDIOCTX pIoCtx,
4824 PVMDKGRAINALLOCASYNC pGrainAlloc)
4825{
4826 int rc = VINF_SUCCESS;
4827 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4828 uint32_t uGTHash, uGTBlockIndex;
4829 uint64_t uGTSector, uRGTSector, uGTBlock;
4830 uint64_t uSector = pGrainAlloc->uSector;
4831 PVMDKGTCACHEENTRY pGTCacheEntry;
4832
4833 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
4834 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
4835
4836 uGTSector = pGrainAlloc->uGTSector;
4837 uRGTSector = pGrainAlloc->uRGTSector;
4838 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4839
4840 /* Update the grain table (and the cache). */
4841 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4842 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4843 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4844 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4845 || pGTCacheEntry->uGTBlock != uGTBlock)
4846 {
4847 /* Cache miss, fetch data from disk. */
4848 LogFlow(("Cache miss, fetch data from disk\n"));
4849 PVDMETAXFER pMetaXfer = NULL;
4850 rc = pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4851 pExtent->pFile->pStorage,
4852 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4853 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4854 &pMetaXfer,
4855 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4856 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4857 {
4858 pGrainAlloc->cIoXfersPending++;
4859 pGrainAlloc->fGTUpdateNeeded = true;
4860 /* Leave early, we will be called again after the read completed. */
4861 LogFlowFunc(("Metadata read in progress, leaving\n"));
4862 return rc;
4863 }
4864 else if (RT_FAILURE(rc))
4865 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4866 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pExtent->pImage->pInterfaceIO->pvUser, pMetaXfer);
4867 pGTCacheEntry->uExtent = pExtent->uExtent;
4868 pGTCacheEntry->uGTBlock = uGTBlock;
4869 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4870 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4871 }
4872 else
4873 {
4874 /* Cache hit. Convert grain table block back to disk format, otherwise
4875 * the code below will write garbage for all but the updated entry. */
4876 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4877 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4878 }
4879 pGrainAlloc->fGTUpdateNeeded = false;
4880 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4881 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize));
4882 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize);
4883 /* Update grain table on disk. */
4884 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4885 pExtent->pFile->pStorage,
4886 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4887 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4888 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4889 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4890 pGrainAlloc->cIoXfersPending++;
4891 else if (RT_FAILURE(rc))
4892 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4893 if (pExtent->pRGD)
4894 {
4895 /* Update backup grain table on disk. */
4896 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4897 pExtent->pFile->pStorage,
4898 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4899 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4900 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4901 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4902 pGrainAlloc->cIoXfersPending++;
4903 else if (RT_FAILURE(rc))
4904 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4905 }
4906#ifdef VBOX_WITH_VMDK_ESX
4907 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4908 {
4909 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4910 pExtent->fMetaDirty = true;
4911 }
4912#endif /* VBOX_WITH_VMDK_ESX */
4913
4914 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4915
4916 return rc;
4917}
4918
4919/**
4920 * Internal - complete the grain allocation by updating disk grain table if required.
4921 */
4922static int vmdkAllocGrainAsyncComplete(void *pvBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
4923{
4924 int rc = VINF_SUCCESS;
4925 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
4926 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
4927 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
4928
4929 LogFlowFunc(("pvBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
4930 pvBackendData, pIoCtx, pvUser, rcReq));
4931
4932 pGrainAlloc->cIoXfersPending--;
4933 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
4934 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent, pImage->pGTCache,
4935 pIoCtx, pGrainAlloc);
4936
4937 if (!pGrainAlloc->cIoXfersPending)
4938 {
4939 /* Grain allocation completed. */
4940 RTMemFree(pGrainAlloc);
4941 }
4942
4943 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
4944 return rc;
4945}
4946
4947/**
4948 * Internal. Allocates a new grain table (if necessary) - async version.
4949 */
4950static int vmdkAllocGrainAsync(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4951 PVDIOCTX pIoCtx, uint64_t uSector,
4952 uint64_t cbWrite)
4953{
4954 uint64_t uGDIndex, uGTSector, uRGTSector;
4955 uint64_t cbExtentSize;
4956 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
4957 PVMDKIMAGE pImage = pExtent->pImage;
4958 int rc;
4959
4960 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
4961 pCache, pExtent, pIoCtx, uSector, cbWrite));
4962
4963 AssertReturn(!(pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
4964
4965 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
4966 if (!pGrainAlloc)
4967 return VERR_NO_MEMORY;
4968
4969 pGrainAlloc->pExtent = pExtent;
4970 pGrainAlloc->uSector = uSector;
4971
4972 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4973 if (uGDIndex >= pExtent->cGDEntries)
4974 return VERR_OUT_OF_RANGE;
4975 uGTSector = pExtent->pGD[uGDIndex];
4976 if (pExtent->pRGD)
4977 uRGTSector = pExtent->pRGD[uGDIndex];
4978 else
4979 uRGTSector = 0; /**< avoid compiler warning */
4980 if (!uGTSector)
4981 {
4982 LogFlow(("Allocating new grain table\n"));
4983
4984 /* There is no grain table referenced by this grain directory
4985 * entry. So there is absolutely no data in this area. Allocate
4986 * a new grain table and put the reference to it in the GDs. */
4987 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4988 if (RT_FAILURE(rc))
4989 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4990 Assert(!(cbExtentSize % 512));
4991
4992 pGrainAlloc->cbExtentOld = cbExtentSize;
4993
4994 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4995 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4996
4997 /* Normally the grain table is preallocated for hosted sparse extents
4998 * that support more than 32 bit sector numbers. So this shouldn't
4999 * ever happen on a valid extent. */
5000 if (uGTSector > UINT32_MAX)
5001 return VERR_VD_VMDK_INVALID_HEADER;
5002
5003 /* Write grain table by writing the required number of grain table
5004 * cache chunks. Allocate memory dynamically here or we flood the
5005 * metadata cache with very small entries.
5006 */
5007 size_t cbGTDataTmp = (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE) * VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5008 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5009
5010 if (!paGTDataTmp)
5011 return VERR_NO_MEMORY;
5012
5013 memset(paGTDataTmp, '\0', cbGTDataTmp);
5014 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5015 pExtent->pFile->pStorage,
5016 VMDK_SECTOR2BYTE(uGTSector),
5017 paGTDataTmp, cbGTDataTmp, pIoCtx,
5018 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5019 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5020 pGrainAlloc->cIoXfersPending++;
5021 else if (RT_FAILURE(rc))
5022 {
5023 RTMemTmpFree(paGTDataTmp);
5024 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5025 }
5026
5027 if (pExtent->pRGD)
5028 {
5029 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5030 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
5031 if (RT_FAILURE(rc))
5032 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5033 Assert(!(cbExtentSize % 512));
5034 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5035
5036 /* Normally the redundant grain table is preallocated for hosted
5037 * sparse extents that support more than 32 bit sector numbers. So
5038 * this shouldn't ever happen on a valid extent. */
5039 if (uRGTSector > UINT32_MAX)
5040 {
5041 RTMemTmpFree(paGTDataTmp);
5042 return VERR_VD_VMDK_INVALID_HEADER;
5043 }
5044 /* Write backup grain table by writing the required number of grain
5045 * table cache chunks. */
5046 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5047 pExtent->pFile->pStorage,
5048 VMDK_SECTOR2BYTE(uRGTSector),
5049 paGTDataTmp, cbGTDataTmp, pIoCtx,
5050 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5051 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5052 pGrainAlloc->cIoXfersPending++;
5053 else if (RT_FAILURE(rc))
5054 {
5055 RTMemTmpFree(paGTDataTmp);
5056 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5057 }
5058 }
5059
5060 RTMemTmpFree(paGTDataTmp);
5061
5062 /* Update the grain directory on disk (doing it before writing the
5063 * grain table will result in a garbled extent if the operation is
5064 * aborted for some reason. Otherwise the worst that can happen is
5065 * some unused sectors in the extent. */
5066 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5067 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5068 pExtent->pFile->pStorage,
5069 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5070 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5071 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5072 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5073 pGrainAlloc->cIoXfersPending++;
5074 else if (RT_FAILURE(rc))
5075 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5076 if (pExtent->pRGD)
5077 {
5078 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5079 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5080 pExtent->pFile->pStorage,
5081 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5082 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5083 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5084 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5085 pGrainAlloc->cIoXfersPending++;
5086 else if (RT_FAILURE(rc))
5087 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5088 }
5089
5090 /* As the final step update the in-memory copy of the GDs. */
5091 pExtent->pGD[uGDIndex] = uGTSector;
5092 if (pExtent->pRGD)
5093 pExtent->pRGD[uGDIndex] = uRGTSector;
5094 }
5095
5096 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5097 pGrainAlloc->uGTSector = uGTSector;
5098 pGrainAlloc->uRGTSector = uRGTSector;
5099
5100 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
5101 if (RT_FAILURE(rc))
5102 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5103 Assert(!(cbExtentSize % 512));
5104
5105 if (!pGrainAlloc->cbExtentOld)
5106 pGrainAlloc->cbExtentOld = cbExtentSize;
5107
5108 pGrainAlloc->cbExtentSize = cbExtentSize;
5109
5110 /* Write the data. Always a full grain, or we're in big trouble. */
5111 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
5112 pExtent->pFile->pStorage,
5113 cbExtentSize,
5114 pIoCtx, cbWrite,
5115 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5116 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5117 pGrainAlloc->cIoXfersPending++;
5118 else if (RT_FAILURE(rc))
5119 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5120
5121 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pCache, pIoCtx, pGrainAlloc);
5122
5123 if (!pGrainAlloc->cIoXfersPending)
5124 {
5125 /* Grain allocation completed. */
5126 RTMemFree(pGrainAlloc);
5127 }
5128
5129 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5130
5131 return rc;
5132}
5133
5134
5135/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5136static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
5137{
5138 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
5139 int rc = VINF_SUCCESS;
5140 PVMDKIMAGE pImage;
5141
5142 if ( !pszFilename
5143 || !*pszFilename
5144 || strchr(pszFilename, '"'))
5145 {
5146 rc = VERR_INVALID_PARAMETER;
5147 goto out;
5148 }
5149
5150 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5151 if (!pImage)
5152 {
5153 rc = VERR_NO_MEMORY;
5154 goto out;
5155 }
5156 pImage->pszFilename = pszFilename;
5157 pImage->pFile = NULL;
5158 pImage->pExtents = NULL;
5159 pImage->pFiles = NULL;
5160 pImage->pGTCache = NULL;
5161 pImage->pDescData = NULL;
5162 pImage->pVDIfsDisk = pVDIfsDisk;
5163 pImage->pVDIfsImage = pVDIfsDisk;
5164 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5165 * much as possible in vmdkOpenImage. */
5166 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5167 vmdkFreeImage(pImage, false);
5168 RTMemFree(pImage);
5169
5170out:
5171 LogFlowFunc(("returns %Rrc\n", rc));
5172 return rc;
5173}
5174
5175/** @copydoc VBOXHDDBACKEND::pfnOpen */
5176static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5177 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5178 void **ppBackendData)
5179{
5180 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5181 int rc;
5182 PVMDKIMAGE pImage;
5183
5184 /* Check open flags. All valid flags are supported. */
5185 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5186 {
5187 rc = VERR_INVALID_PARAMETER;
5188 goto out;
5189 }
5190
5191 /* Check remaining arguments. */
5192 if ( !VALID_PTR(pszFilename)
5193 || !*pszFilename
5194 || strchr(pszFilename, '"'))
5195 {
5196 rc = VERR_INVALID_PARAMETER;
5197 goto out;
5198 }
5199
5200
5201 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5202 if (!pImage)
5203 {
5204 rc = VERR_NO_MEMORY;
5205 goto out;
5206 }
5207 pImage->pszFilename = pszFilename;
5208 pImage->pFile = NULL;
5209 pImage->pExtents = NULL;
5210 pImage->pFiles = NULL;
5211 pImage->pGTCache = NULL;
5212 pImage->pDescData = NULL;
5213 pImage->pVDIfsDisk = pVDIfsDisk;
5214 pImage->pVDIfsImage = pVDIfsImage;
5215
5216 rc = vmdkOpenImage(pImage, uOpenFlags);
5217 if (RT_SUCCESS(rc))
5218 *ppBackendData = pImage;
5219
5220out:
5221 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5222 return rc;
5223}
5224
5225/** @copydoc VBOXHDDBACKEND::pfnCreate */
5226static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
5227 unsigned uImageFlags, const char *pszComment,
5228 PCPDMMEDIAGEOMETRY pPCHSGeometry,
5229 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5230 unsigned uOpenFlags, unsigned uPercentStart,
5231 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
5232 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
5233 void **ppBackendData)
5234{
5235 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5236 int rc;
5237 PVMDKIMAGE pImage;
5238
5239 PFNVDPROGRESS pfnProgress = NULL;
5240 void *pvUser = NULL;
5241 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
5242 VDINTERFACETYPE_PROGRESS);
5243 PVDINTERFACEPROGRESS pCbProgress = NULL;
5244 if (pIfProgress)
5245 {
5246 pCbProgress = VDGetInterfaceProgress(pIfProgress);
5247 pfnProgress = pCbProgress->pfnProgress;
5248 pvUser = pIfProgress->pvUser;
5249 }
5250
5251 /* Check open flags. All valid flags are supported. */
5252 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5253 {
5254 rc = VERR_INVALID_PARAMETER;
5255 goto out;
5256 }
5257
5258 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
5259 if ( !cbSize
5260 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
5261 {
5262 rc = VERR_VD_INVALID_SIZE;
5263 goto out;
5264 }
5265
5266 /* Check remaining arguments. */
5267 if ( !VALID_PTR(pszFilename)
5268 || !*pszFilename
5269 || strchr(pszFilename, '"')
5270 || !VALID_PTR(pPCHSGeometry)
5271 || !VALID_PTR(pLCHSGeometry)
5272#ifndef VBOX_WITH_VMDK_ESX
5273 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
5274 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5275#endif
5276 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5277 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
5278 {
5279 rc = VERR_INVALID_PARAMETER;
5280 goto out;
5281 }
5282
5283 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5284 if (!pImage)
5285 {
5286 rc = VERR_NO_MEMORY;
5287 goto out;
5288 }
5289 pImage->pszFilename = pszFilename;
5290 pImage->pFile = NULL;
5291 pImage->pExtents = NULL;
5292 pImage->pFiles = NULL;
5293 pImage->pGTCache = NULL;
5294 pImage->pDescData = NULL;
5295 pImage->pVDIfsDisk = pVDIfsDisk;
5296 pImage->pVDIfsImage = pVDIfsImage;
5297 /* Descriptors for split images can be pretty large, especially if the
5298 * filename is long. So prepare for the worst, and allocate quite some
5299 * memory for the descriptor in this case. */
5300 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5301 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5302 else
5303 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5304 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5305 if (!pImage->pDescData)
5306 {
5307 rc = VERR_NO_MEMORY;
5308 goto out;
5309 }
5310
5311 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5312 pPCHSGeometry, pLCHSGeometry, pUuid,
5313 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5314 if (RT_SUCCESS(rc))
5315 {
5316 /* So far the image is opened in read/write mode. Make sure the
5317 * image is opened in read-only mode if the caller requested that. */
5318 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5319 {
5320 vmdkFreeImage(pImage, false);
5321 rc = vmdkOpenImage(pImage, uOpenFlags);
5322 if (RT_FAILURE(rc))
5323 goto out;
5324 }
5325 *ppBackendData = pImage;
5326 }
5327 else
5328 {
5329 RTMemFree(pImage->pDescData);
5330 RTMemFree(pImage);
5331 }
5332
5333out:
5334 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5335 return rc;
5336}
5337
5338/**
5339 * Replaces a fragment of a string with the specified string.
5340 *
5341 * @returns Pointer to the allocated UTF-8 string.
5342 * @param pszWhere UTF-8 string to search in.
5343 * @param pszWhat UTF-8 string to search for.
5344 * @param pszByWhat UTF-8 string to replace the found string with.
5345 */
5346static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
5347{
5348 AssertPtr(pszWhere);
5349 AssertPtr(pszWhat);
5350 AssertPtr(pszByWhat);
5351 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5352 if (!pszFoundStr)
5353 return NULL;
5354 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5355 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5356 if (pszNewStr)
5357 {
5358 char *pszTmp = pszNewStr;
5359 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5360 pszTmp += pszFoundStr - pszWhere;
5361 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5362 pszTmp += strlen(pszByWhat);
5363 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5364 }
5365 return pszNewStr;
5366}
5367
5368/** @copydoc VBOXHDDBACKEND::pfnRename */
5369static int vmdkRename(void *pBackendData, const char *pszFilename)
5370{
5371 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5372
5373 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5374 int rc = VINF_SUCCESS;
5375 char **apszOldName = NULL;
5376 char **apszNewName = NULL;
5377 char **apszNewLines = NULL;
5378 char *pszOldDescName = NULL;
5379 bool fImageFreed = false;
5380 bool fEmbeddedDesc = false;
5381 unsigned cExtents = pImage->cExtents;
5382 char *pszNewBaseName = NULL;
5383 char *pszOldBaseName = NULL;
5384 char *pszNewFullName = NULL;
5385 char *pszOldFullName = NULL;
5386 const char *pszOldImageName;
5387 unsigned i, line;
5388 VMDKDESCRIPTOR DescriptorCopy;
5389 VMDKEXTENT ExtentCopy;
5390
5391 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
5392
5393 /* Check arguments. */
5394 if ( !pImage
5395 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5396 || !VALID_PTR(pszFilename)
5397 || !*pszFilename)
5398 {
5399 rc = VERR_INVALID_PARAMETER;
5400 goto out;
5401 }
5402
5403 /*
5404 * Allocate an array to store both old and new names of renamed files
5405 * in case we have to roll back the changes. Arrays are initialized
5406 * with zeros. We actually save stuff when and if we change it.
5407 */
5408 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5409 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5410 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
5411 if (!apszOldName || !apszNewName || !apszNewLines)
5412 {
5413 rc = VERR_NO_MEMORY;
5414 goto out;
5415 }
5416
5417 /* Save the descriptor size and position. */
5418 if (pImage->pDescData)
5419 {
5420 /* Separate descriptor file. */
5421 fEmbeddedDesc = false;
5422 }
5423 else
5424 {
5425 /* Embedded descriptor file. */
5426 ExtentCopy = pImage->pExtents[0];
5427 fEmbeddedDesc = true;
5428 }
5429 /* Save the descriptor content. */
5430 DescriptorCopy.cLines = pImage->Descriptor.cLines;
5431 for (i = 0; i < DescriptorCopy.cLines; i++)
5432 {
5433 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5434 if (!DescriptorCopy.aLines[i])
5435 {
5436 rc = VERR_NO_MEMORY;
5437 goto out;
5438 }
5439 }
5440
5441 /* Prepare both old and new base names used for string replacement. */
5442 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5443 RTPathStripExt(pszNewBaseName);
5444 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5445 RTPathStripExt(pszOldBaseName);
5446 /* Prepare both old and new full names used for string replacement. */
5447 pszNewFullName = RTStrDup(pszFilename);
5448 RTPathStripExt(pszNewFullName);
5449 pszOldFullName = RTStrDup(pImage->pszFilename);
5450 RTPathStripExt(pszOldFullName);
5451
5452 /* --- Up to this point we have not done any damage yet. --- */
5453
5454 /* Save the old name for easy access to the old descriptor file. */
5455 pszOldDescName = RTStrDup(pImage->pszFilename);
5456 /* Save old image name. */
5457 pszOldImageName = pImage->pszFilename;
5458
5459 /* Update the descriptor with modified extent names. */
5460 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5461 i < cExtents;
5462 i++, line = pImage->Descriptor.aNextLines[line])
5463 {
5464 /* Assume that vmdkStrReplace will fail. */
5465 rc = VERR_NO_MEMORY;
5466 /* Update the descriptor. */
5467 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5468 pszOldBaseName, pszNewBaseName);
5469 if (!apszNewLines[i])
5470 goto rollback;
5471 pImage->Descriptor.aLines[line] = apszNewLines[i];
5472 }
5473 /* Make sure the descriptor gets written back. */
5474 pImage->Descriptor.fDirty = true;
5475 /* Flush the descriptor now, in case it is embedded. */
5476 (void)vmdkFlushImage(pImage);
5477
5478 /* Close and rename/move extents. */
5479 for (i = 0; i < cExtents; i++)
5480 {
5481 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5482 /* Compose new name for the extent. */
5483 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5484 pszOldFullName, pszNewFullName);
5485 if (!apszNewName[i])
5486 goto rollback;
5487 /* Close the extent file. */
5488 vmdkFileClose(pImage, &pExtent->pFile, false);
5489 /* Rename the extent file. */
5490 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
5491 if (RT_FAILURE(rc))
5492 goto rollback;
5493 /* Remember the old name. */
5494 apszOldName[i] = RTStrDup(pExtent->pszFullname);
5495 }
5496 /* Release all old stuff. */
5497 vmdkFreeImage(pImage, false);
5498
5499 fImageFreed = true;
5500
5501 /* Last elements of new/old name arrays are intended for
5502 * storing descriptor's names.
5503 */
5504 apszNewName[cExtents] = RTStrDup(pszFilename);
5505 /* Rename the descriptor file if it's separate. */
5506 if (!fEmbeddedDesc)
5507 {
5508 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
5509 if (RT_FAILURE(rc))
5510 goto rollback;
5511 /* Save old name only if we may need to change it back. */
5512 apszOldName[cExtents] = RTStrDup(pszFilename);
5513 }
5514
5515 /* Update pImage with the new information. */
5516 pImage->pszFilename = pszFilename;
5517
5518 /* Open the new image. */
5519 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5520 if (RT_SUCCESS(rc))
5521 goto out;
5522
5523rollback:
5524 /* Roll back all changes in case of failure. */
5525 if (RT_FAILURE(rc))
5526 {
5527 int rrc;
5528 if (!fImageFreed)
5529 {
5530 /*
5531 * Some extents may have been closed, close the rest. We will
5532 * re-open the whole thing later.
5533 */
5534 vmdkFreeImage(pImage, false);
5535 }
5536 /* Rename files back. */
5537 for (i = 0; i <= cExtents; i++)
5538 {
5539 if (apszOldName[i])
5540 {
5541 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
5542 AssertRC(rrc);
5543 }
5544 }
5545 /* Restore the old descriptor. */
5546 PVMDKFILE pFile;
5547 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
5548 vmdkFileOpenFlags(VD_OPEN_FLAGS_NORMAL),
5549 false);
5550 AssertRC(rrc);
5551 if (fEmbeddedDesc)
5552 {
5553 ExtentCopy.pFile = pFile;
5554 pImage->pExtents = &ExtentCopy;
5555 }
5556 else
5557 {
5558 /* Shouldn't be null for separate descriptor.
5559 * There will be no access to the actual content.
5560 */
5561 pImage->pDescData = pszOldDescName;
5562 pImage->pFile = pFile;
5563 }
5564 pImage->Descriptor = DescriptorCopy;
5565 vmdkWriteDescriptor(pImage);
5566 vmdkFileClose(pImage, &pFile, false);
5567 /* Get rid of the stuff we implanted. */
5568 pImage->pExtents = NULL;
5569 pImage->pFile = NULL;
5570 pImage->pDescData = NULL;
5571 /* Re-open the image back. */
5572 pImage->pszFilename = pszOldImageName;
5573 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5574 AssertRC(rrc);
5575 }
5576
5577out:
5578 for (i = 0; i < DescriptorCopy.cLines; i++)
5579 if (DescriptorCopy.aLines[i])
5580 RTStrFree(DescriptorCopy.aLines[i]);
5581 if (apszOldName)
5582 {
5583 for (i = 0; i <= cExtents; i++)
5584 if (apszOldName[i])
5585 RTStrFree(apszOldName[i]);
5586 RTMemTmpFree(apszOldName);
5587 }
5588 if (apszNewName)
5589 {
5590 for (i = 0; i <= cExtents; i++)
5591 if (apszNewName[i])
5592 RTStrFree(apszNewName[i]);
5593 RTMemTmpFree(apszNewName);
5594 }
5595 if (apszNewLines)
5596 {
5597 for (i = 0; i < cExtents; i++)
5598 if (apszNewLines[i])
5599 RTStrFree(apszNewLines[i]);
5600 RTMemTmpFree(apszNewLines);
5601 }
5602 if (pszOldDescName)
5603 RTStrFree(pszOldDescName);
5604 if (pszOldBaseName)
5605 RTStrFree(pszOldBaseName);
5606 if (pszNewBaseName)
5607 RTStrFree(pszNewBaseName);
5608 if (pszOldFullName)
5609 RTStrFree(pszOldFullName);
5610 if (pszNewFullName)
5611 RTStrFree(pszNewFullName);
5612 LogFlowFunc(("returns %Rrc\n", rc));
5613 return rc;
5614}
5615
5616/** @copydoc VBOXHDDBACKEND::pfnClose */
5617static int vmdkClose(void *pBackendData, bool fDelete)
5618{
5619 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5620 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5621 int rc = VINF_SUCCESS;
5622
5623 /* Freeing a never allocated image (e.g. because the open failed) is
5624 * not signalled as an error. After all nothing bad happens. */
5625 if (pImage)
5626 {
5627 vmdkFreeImage(pImage, fDelete);
5628 RTMemFree(pImage);
5629 }
5630
5631 LogFlowFunc(("returns %Rrc\n", rc));
5632 return rc;
5633}
5634
5635/** @copydoc VBOXHDDBACKEND::pfnRead */
5636static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5637 size_t cbToRead, size_t *pcbActuallyRead)
5638{
5639 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5640 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5641 PVMDKEXTENT pExtent;
5642 uint64_t uSectorExtentRel;
5643 uint64_t uSectorExtentAbs;
5644 int rc;
5645
5646 AssertPtr(pImage);
5647 Assert(uOffset % 512 == 0);
5648 Assert(cbToRead % 512 == 0);
5649
5650 if ( uOffset + cbToRead > pImage->cbSize
5651 || cbToRead == 0)
5652 {
5653 rc = VERR_INVALID_PARAMETER;
5654 goto out;
5655 }
5656
5657 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5658 &pExtent, &uSectorExtentRel);
5659 if (RT_FAILURE(rc))
5660 goto out;
5661
5662 /* Check access permissions as defined in the extent descriptor. */
5663 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5664 {
5665 rc = VERR_VD_VMDK_INVALID_STATE;
5666 goto out;
5667 }
5668
5669
5670 /* Clip read range to remain in this extent. */
5671 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5672
5673 /* Handle the read according to the current extent type. */
5674 switch (pExtent->enmType)
5675 {
5676 case VMDKETYPE_HOSTED_SPARSE:
5677#ifdef VBOX_WITH_VMDK_ESX
5678 case VMDKETYPE_ESX_SPARSE:
5679#endif /* VBOX_WITH_VMDK_ESX */
5680 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5681 &uSectorExtentAbs);
5682 if (RT_FAILURE(rc))
5683 goto out;
5684 /* Clip read range to at most the rest of the grain. */
5685 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5686 Assert(!(cbToRead % 512));
5687 if (uSectorExtentAbs == 0)
5688 rc = VERR_VD_BLOCK_FREE;
5689 else
5690 {
5691 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5692 {
5693 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5694 uSectorExtentAbs -= uSectorInGrain;
5695 uint64_t uLBA;
5696 if (pExtent->uGrainSector != uSectorExtentAbs)
5697 {
5698 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5699 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5700 if (RT_FAILURE(rc))
5701 {
5702 pExtent->uGrainSector = 0;
5703 AssertRC(rc);
5704 goto out;
5705 }
5706 pExtent->uGrainSector = uSectorExtentAbs;
5707 Assert(uLBA == uSectorExtentRel);
5708 }
5709 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5710 }
5711 else
5712 {
5713 rc = vmdkFileReadAt(pExtent->pFile,
5714 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5715 pvBuf, cbToRead, NULL);
5716 }
5717 }
5718 break;
5719 case VMDKETYPE_VMFS:
5720 case VMDKETYPE_FLAT:
5721 rc = vmdkFileReadAt(pExtent->pFile,
5722 VMDK_SECTOR2BYTE(uSectorExtentRel),
5723 pvBuf, cbToRead, NULL);
5724 break;
5725 case VMDKETYPE_ZERO:
5726 memset(pvBuf, '\0', cbToRead);
5727 break;
5728 }
5729 if (pcbActuallyRead)
5730 *pcbActuallyRead = cbToRead;
5731
5732out:
5733 LogFlowFunc(("returns %Rrc\n", rc));
5734 return rc;
5735}
5736
5737/** @copydoc VBOXHDDBACKEND::pfnWrite */
5738static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5739 size_t cbToWrite, size_t *pcbWriteProcess,
5740 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5741{
5742 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5743 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5744 PVMDKEXTENT pExtent;
5745 uint64_t uSectorExtentRel;
5746 uint64_t uSectorExtentAbs;
5747 int rc;
5748
5749 AssertPtr(pImage);
5750 Assert(uOffset % 512 == 0);
5751 Assert(cbToWrite % 512 == 0);
5752
5753 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5754 {
5755 rc = VERR_VD_IMAGE_READ_ONLY;
5756 goto out;
5757 }
5758
5759 if (cbToWrite == 0)
5760 {
5761 rc = VERR_INVALID_PARAMETER;
5762 goto out;
5763 }
5764
5765 /* No size check here, will do that later when the extent is located.
5766 * There are sparse images out there which according to the spec are
5767 * invalid, because the total size is not a multiple of the grain size.
5768 * Also for sparse images which are stitched together in odd ways (not at
5769 * grain boundaries, and with the nominal size not being a multiple of the
5770 * grain size), this would prevent writing to the last grain. */
5771
5772 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5773 &pExtent, &uSectorExtentRel);
5774 if (RT_FAILURE(rc))
5775 goto out;
5776
5777 /* Check access permissions as defined in the extent descriptor. */
5778 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5779 {
5780 rc = VERR_VD_VMDK_INVALID_STATE;
5781 goto out;
5782 }
5783
5784 /* Handle the write according to the current extent type. */
5785 switch (pExtent->enmType)
5786 {
5787 case VMDKETYPE_HOSTED_SPARSE:
5788#ifdef VBOX_WITH_VMDK_ESX
5789 case VMDKETYPE_ESX_SPARSE:
5790#endif /* VBOX_WITH_VMDK_ESX */
5791 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5792 &uSectorExtentAbs);
5793 if (RT_FAILURE(rc))
5794 goto out;
5795 /* Clip write range to at most the rest of the grain. */
5796 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5797 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5798 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5799 {
5800 rc = VERR_VD_VMDK_INVALID_WRITE;
5801 goto out;
5802 }
5803 if (uSectorExtentAbs == 0)
5804 {
5805 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5806 {
5807 /* Full block write to a previously unallocated block.
5808 * Check if the caller wants to avoid the automatic alloc. */
5809 if (!(fWrite & VD_WRITE_NO_ALLOC))
5810 {
5811 /* Allocate GT and find out where to store the grain. */
5812 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5813 uSectorExtentRel, pvBuf, cbToWrite);
5814 }
5815 else
5816 rc = VERR_VD_BLOCK_FREE;
5817 *pcbPreRead = 0;
5818 *pcbPostRead = 0;
5819 }
5820 else
5821 {
5822 /* Clip write range to remain in this extent. */
5823 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5824 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5825 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5826 rc = VERR_VD_BLOCK_FREE;
5827 }
5828 }
5829 else
5830 {
5831 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5832 {
5833 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5834 uSectorExtentAbs -= uSectorInGrain;
5835 uint64_t uLBA = uSectorExtentRel;
5836 if ( pExtent->uGrainSector != uSectorExtentAbs
5837 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5838 {
5839 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5840 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5841 if (RT_FAILURE(rc))
5842 {
5843 pExtent->uGrainSector = 0;
5844 pExtent->uLastGrainSector = 0;
5845 AssertRC(rc);
5846 goto out;
5847 }
5848 pExtent->uGrainSector = uSectorExtentAbs;
5849 pExtent->uLastGrainSector = uSectorExtentAbs;
5850 Assert(uLBA == uSectorExtentRel);
5851 }
5852 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5853 uint32_t cbGrain = 0;
5854 rc = vmdkFileDeflateAt(pExtent->pFile,
5855 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5856 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5857 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5858 if (RT_FAILURE(rc))
5859 {
5860 pExtent->uGrainSector = 0;
5861 pExtent->uLastGrainSector = 0;
5862 AssertRC(rc);
5863 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5864 }
5865 cbGrain = RT_ALIGN(cbGrain, 512);
5866 pExtent->uLastGrainSector = uSectorExtentAbs;
5867 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5868 pExtent->cbLastGrainWritten = cbGrain;
5869
5870 uint64_t uEOSOff = 0;
5871 if (pExtent->fFooter)
5872 {
5873 uEOSOff = 512;
5874 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5875 if (RT_FAILURE(rc))
5876 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5877 }
5878 uint8_t aEOS[512];
5879 memset(aEOS, '\0', sizeof(aEOS));
5880 rc = vmdkFileWriteAt(pExtent->pFile,
5881 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5882 aEOS, sizeof(aEOS), NULL);
5883 if (RT_FAILURE(rc))
5884 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5885 }
5886 else
5887 {
5888 rc = vmdkFileWriteAt(pExtent->pFile,
5889 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5890 pvBuf, cbToWrite, NULL);
5891 }
5892 }
5893 break;
5894 case VMDKETYPE_VMFS:
5895 case VMDKETYPE_FLAT:
5896 /* Clip write range to remain in this extent. */
5897 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5898 rc = vmdkFileWriteAt(pExtent->pFile,
5899 VMDK_SECTOR2BYTE(uSectorExtentRel),
5900 pvBuf, cbToWrite, NULL);
5901 break;
5902 case VMDKETYPE_ZERO:
5903 /* Clip write range to remain in this extent. */
5904 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5905 break;
5906 }
5907 if (pcbWriteProcess)
5908 *pcbWriteProcess = cbToWrite;
5909
5910out:
5911 LogFlowFunc(("returns %Rrc\n", rc));
5912 return rc;
5913}
5914
5915/** @copydoc VBOXHDDBACKEND::pfnFlush */
5916static int vmdkFlush(void *pBackendData)
5917{
5918 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5919 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5920 int rc;
5921
5922 AssertPtr(pImage);
5923
5924 rc = vmdkFlushImage(pImage);
5925 LogFlowFunc(("returns %Rrc\n", rc));
5926 return rc;
5927}
5928
5929/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5930static unsigned vmdkGetVersion(void *pBackendData)
5931{
5932 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5933 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5934
5935 AssertPtr(pImage);
5936
5937 if (pImage)
5938 return VMDK_IMAGE_VERSION;
5939 else
5940 return 0;
5941}
5942
5943/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5944static uint64_t vmdkGetSize(void *pBackendData)
5945{
5946 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5947 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5948
5949 AssertPtr(pImage);
5950
5951 if (pImage)
5952 return pImage->cbSize;
5953 else
5954 return 0;
5955}
5956
5957/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5958static uint64_t vmdkGetFileSize(void *pBackendData)
5959{
5960 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5961 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5962 uint64_t cb = 0;
5963
5964 AssertPtr(pImage);
5965
5966 if (pImage)
5967 {
5968 uint64_t cbFile;
5969 if (pImage->pFile != NULL)
5970 {
5971 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5972 if (RT_SUCCESS(rc))
5973 cb += cbFile;
5974 }
5975 for (unsigned i = 0; i < pImage->cExtents; i++)
5976 {
5977 if (pImage->pExtents[i].pFile != NULL)
5978 {
5979 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5980 if (RT_SUCCESS(rc))
5981 cb += cbFile;
5982 }
5983 }
5984 }
5985
5986 LogFlowFunc(("returns %lld\n", cb));
5987 return cb;
5988}
5989
5990/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5991static int vmdkGetPCHSGeometry(void *pBackendData,
5992 PPDMMEDIAGEOMETRY pPCHSGeometry)
5993{
5994 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5995 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5996 int rc;
5997
5998 AssertPtr(pImage);
5999
6000 if (pImage)
6001 {
6002 if (pImage->PCHSGeometry.cCylinders)
6003 {
6004 *pPCHSGeometry = pImage->PCHSGeometry;
6005 rc = VINF_SUCCESS;
6006 }
6007 else
6008 rc = VERR_VD_GEOMETRY_NOT_SET;
6009 }
6010 else
6011 rc = VERR_VD_NOT_OPENED;
6012
6013 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6014 return rc;
6015}
6016
6017/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
6018static int vmdkSetPCHSGeometry(void *pBackendData,
6019 PCPDMMEDIAGEOMETRY pPCHSGeometry)
6020{
6021 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6022 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6023 int rc;
6024
6025 AssertPtr(pImage);
6026
6027 if (pImage)
6028 {
6029 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6030 {
6031 rc = VERR_VD_IMAGE_READ_ONLY;
6032 goto out;
6033 }
6034 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6035 if (RT_FAILURE(rc))
6036 goto out;
6037
6038 pImage->PCHSGeometry = *pPCHSGeometry;
6039 rc = VINF_SUCCESS;
6040 }
6041 else
6042 rc = VERR_VD_NOT_OPENED;
6043
6044out:
6045 LogFlowFunc(("returns %Rrc\n", rc));
6046 return rc;
6047}
6048
6049/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
6050static int vmdkGetLCHSGeometry(void *pBackendData,
6051 PPDMMEDIAGEOMETRY pLCHSGeometry)
6052{
6053 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6054 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6055 int rc;
6056
6057 AssertPtr(pImage);
6058
6059 if (pImage)
6060 {
6061 if (pImage->LCHSGeometry.cCylinders)
6062 {
6063 *pLCHSGeometry = pImage->LCHSGeometry;
6064 rc = VINF_SUCCESS;
6065 }
6066 else
6067 rc = VERR_VD_GEOMETRY_NOT_SET;
6068 }
6069 else
6070 rc = VERR_VD_NOT_OPENED;
6071
6072 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6073 return rc;
6074}
6075
6076/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
6077static int vmdkSetLCHSGeometry(void *pBackendData,
6078 PCPDMMEDIAGEOMETRY pLCHSGeometry)
6079{
6080 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6081 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6082 int rc;
6083
6084 AssertPtr(pImage);
6085
6086 if (pImage)
6087 {
6088 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6089 {
6090 rc = VERR_VD_IMAGE_READ_ONLY;
6091 goto out;
6092 }
6093 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6094 if (RT_FAILURE(rc))
6095 goto out;
6096
6097 pImage->LCHSGeometry = *pLCHSGeometry;
6098 rc = VINF_SUCCESS;
6099 }
6100 else
6101 rc = VERR_VD_NOT_OPENED;
6102
6103out:
6104 LogFlowFunc(("returns %Rrc\n", rc));
6105 return rc;
6106}
6107
6108/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
6109static unsigned vmdkGetImageFlags(void *pBackendData)
6110{
6111 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6112 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6113 unsigned uImageFlags;
6114
6115 AssertPtr(pImage);
6116
6117 if (pImage)
6118 uImageFlags = pImage->uImageFlags;
6119 else
6120 uImageFlags = 0;
6121
6122 LogFlowFunc(("returns %#x\n", uImageFlags));
6123 return uImageFlags;
6124}
6125
6126/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
6127static unsigned vmdkGetOpenFlags(void *pBackendData)
6128{
6129 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6130 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6131 unsigned uOpenFlags;
6132
6133 AssertPtr(pImage);
6134
6135 if (pImage)
6136 uOpenFlags = pImage->uOpenFlags;
6137 else
6138 uOpenFlags = 0;
6139
6140 LogFlowFunc(("returns %#x\n", uOpenFlags));
6141 return uOpenFlags;
6142}
6143
6144/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6145static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6146{
6147 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
6148 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6149 int rc;
6150
6151 /* Image must be opened and the new flags must be valid. Just readonly and
6152 * info flags are supported. */
6153 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE)))
6154 {
6155 rc = VERR_INVALID_PARAMETER;
6156 goto out;
6157 }
6158
6159 /* Implement this operation via reopening the image. */
6160 vmdkFreeImage(pImage, false);
6161 rc = vmdkOpenImage(pImage, uOpenFlags);
6162
6163out:
6164 LogFlowFunc(("returns %Rrc\n", rc));
6165 return rc;
6166}
6167
6168/** @copydoc VBOXHDDBACKEND::pfnGetComment */
6169static int vmdkGetComment(void *pBackendData, char *pszComment,
6170 size_t cbComment)
6171{
6172 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6173 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6174 int rc;
6175
6176 AssertPtr(pImage);
6177
6178 if (pImage)
6179 {
6180 const char *pszCommentEncoded = NULL;
6181 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6182 "ddb.comment", &pszCommentEncoded);
6183 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6184 pszCommentEncoded = NULL;
6185 else if (RT_FAILURE(rc))
6186 goto out;
6187
6188 if (pszComment && pszCommentEncoded)
6189 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6190 else
6191 {
6192 if (pszComment)
6193 *pszComment = '\0';
6194 rc = VINF_SUCCESS;
6195 }
6196 if (pszCommentEncoded)
6197 RTStrFree((char *)(void *)pszCommentEncoded);
6198 }
6199 else
6200 rc = VERR_VD_NOT_OPENED;
6201
6202out:
6203 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6204 return rc;
6205}
6206
6207/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6208static int vmdkSetComment(void *pBackendData, const char *pszComment)
6209{
6210 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6211 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6212 int rc;
6213
6214 AssertPtr(pImage);
6215
6216 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6217 {
6218 rc = VERR_VD_IMAGE_READ_ONLY;
6219 goto out;
6220 }
6221
6222 if (pImage)
6223 rc = vmdkSetImageComment(pImage, pszComment);
6224 else
6225 rc = VERR_VD_NOT_OPENED;
6226
6227out:
6228 LogFlowFunc(("returns %Rrc\n", rc));
6229 return rc;
6230}
6231
6232/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
6233static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6234{
6235 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6236 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6237 int rc;
6238
6239 AssertPtr(pImage);
6240
6241 if (pImage)
6242 {
6243 *pUuid = pImage->ImageUuid;
6244 rc = VINF_SUCCESS;
6245 }
6246 else
6247 rc = VERR_VD_NOT_OPENED;
6248
6249 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6250 return rc;
6251}
6252
6253/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6254static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6255{
6256 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6257 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6258 int rc;
6259
6260 LogFlowFunc(("%RTuuid\n", pUuid));
6261 AssertPtr(pImage);
6262
6263 if (pImage)
6264 {
6265 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6266 {
6267 pImage->ImageUuid = *pUuid;
6268 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6269 VMDK_DDB_IMAGE_UUID, pUuid);
6270 if (RT_FAILURE(rc))
6271 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6272 rc = VINF_SUCCESS;
6273 }
6274 else
6275 rc = VERR_VD_IMAGE_READ_ONLY;
6276 }
6277 else
6278 rc = VERR_VD_NOT_OPENED;
6279
6280 LogFlowFunc(("returns %Rrc\n", rc));
6281 return rc;
6282}
6283
6284/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
6285static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6286{
6287 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6288 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6289 int rc;
6290
6291 AssertPtr(pImage);
6292
6293 if (pImage)
6294 {
6295 *pUuid = pImage->ModificationUuid;
6296 rc = VINF_SUCCESS;
6297 }
6298 else
6299 rc = VERR_VD_NOT_OPENED;
6300
6301 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6302 return rc;
6303}
6304
6305/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6306static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6307{
6308 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6309 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6310 int rc;
6311
6312 AssertPtr(pImage);
6313
6314 if (pImage)
6315 {
6316 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6317 {
6318 /*
6319 * Only change the modification uuid if it changed.
6320 * Avoids a lot of unneccessary 1-byte writes during
6321 * vmdkFlush.
6322 */
6323 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6324 {
6325 pImage->ModificationUuid = *pUuid;
6326 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6327 VMDK_DDB_MODIFICATION_UUID, pUuid);
6328 if (RT_FAILURE(rc))
6329 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6330 }
6331 rc = VINF_SUCCESS;
6332 }
6333 else
6334 rc = VERR_VD_IMAGE_READ_ONLY;
6335 }
6336 else
6337 rc = VERR_VD_NOT_OPENED;
6338
6339 LogFlowFunc(("returns %Rrc\n", rc));
6340 return rc;
6341}
6342
6343/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
6344static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6345{
6346 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6347 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6348 int rc;
6349
6350 AssertPtr(pImage);
6351
6352 if (pImage)
6353 {
6354 *pUuid = pImage->ParentUuid;
6355 rc = VINF_SUCCESS;
6356 }
6357 else
6358 rc = VERR_VD_NOT_OPENED;
6359
6360 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6361 return rc;
6362}
6363
6364/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6365static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6366{
6367 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6368 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6369 int rc;
6370
6371 AssertPtr(pImage);
6372
6373 if (pImage)
6374 {
6375 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6376 {
6377 pImage->ParentUuid = *pUuid;
6378 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6379 VMDK_DDB_PARENT_UUID, pUuid);
6380 if (RT_FAILURE(rc))
6381 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6382 rc = VINF_SUCCESS;
6383 }
6384 else
6385 rc = VERR_VD_IMAGE_READ_ONLY;
6386 }
6387 else
6388 rc = VERR_VD_NOT_OPENED;
6389
6390 LogFlowFunc(("returns %Rrc\n", rc));
6391 return rc;
6392}
6393
6394/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
6395static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6396{
6397 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6398 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6399 int rc;
6400
6401 AssertPtr(pImage);
6402
6403 if (pImage)
6404 {
6405 *pUuid = pImage->ParentModificationUuid;
6406 rc = VINF_SUCCESS;
6407 }
6408 else
6409 rc = VERR_VD_NOT_OPENED;
6410
6411 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6412 return rc;
6413}
6414
6415/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6416static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6417{
6418 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6419 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6420 int rc;
6421
6422 AssertPtr(pImage);
6423
6424 if (pImage)
6425 {
6426 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6427 {
6428 pImage->ParentModificationUuid = *pUuid;
6429 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6430 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6431 if (RT_FAILURE(rc))
6432 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6433 rc = VINF_SUCCESS;
6434 }
6435 else
6436 rc = VERR_VD_IMAGE_READ_ONLY;
6437 }
6438 else
6439 rc = VERR_VD_NOT_OPENED;
6440
6441 LogFlowFunc(("returns %Rrc\n", rc));
6442 return rc;
6443}
6444
6445/** @copydoc VBOXHDDBACKEND::pfnDump */
6446static void vmdkDump(void *pBackendData)
6447{
6448 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6449
6450 AssertPtr(pImage);
6451 if (pImage)
6452 {
6453 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6454 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6455 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6456 VMDK_BYTE2SECTOR(pImage->cbSize));
6457 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6458 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6459 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6460 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6461 }
6462}
6463
6464
6465static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6466{
6467 int rc = VERR_NOT_IMPLEMENTED;
6468 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6469 return rc;
6470}
6471
6472static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6473{
6474 int rc = VERR_NOT_IMPLEMENTED;
6475 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6476 return rc;
6477}
6478
6479static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
6480{
6481 int rc = VERR_NOT_IMPLEMENTED;
6482 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6483 return rc;
6484}
6485
6486static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
6487{
6488 int rc = VERR_NOT_IMPLEMENTED;
6489 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6490 return rc;
6491}
6492
6493static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
6494{
6495 int rc = VERR_NOT_IMPLEMENTED;
6496 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6497 return rc;
6498}
6499
6500static bool vmdkIsAsyncIOSupported(void *pvBackendData)
6501{
6502 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6503
6504 /* We do not support async I/O for stream optimized VMDK images. */
6505 return (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) == 0;
6506}
6507
6508static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
6509 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6510{
6511 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6512 pvBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
6513 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6514 PVMDKEXTENT pExtent;
6515 uint64_t uSectorExtentRel;
6516 uint64_t uSectorExtentAbs;
6517 int rc;
6518
6519 AssertPtr(pImage);
6520 Assert(uOffset % 512 == 0);
6521 Assert(cbRead % 512 == 0);
6522
6523 if ( uOffset + cbRead > pImage->cbSize
6524 || cbRead == 0)
6525 {
6526 rc = VERR_INVALID_PARAMETER;
6527 goto out;
6528 }
6529
6530 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6531 &pExtent, &uSectorExtentRel);
6532 if (RT_FAILURE(rc))
6533 goto out;
6534
6535 /* Check access permissions as defined in the extent descriptor. */
6536 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6537 {
6538 rc = VERR_VD_VMDK_INVALID_STATE;
6539 goto out;
6540 }
6541
6542 /* Clip read range to remain in this extent. */
6543 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6544
6545 /* Handle the read according to the current extent type. */
6546 switch (pExtent->enmType)
6547 {
6548 case VMDKETYPE_HOSTED_SPARSE:
6549#ifdef VBOX_WITH_VMDK_ESX
6550 case VMDKETYPE_ESX_SPARSE:
6551#endif /* VBOX_WITH_VMDK_ESX */
6552 rc = vmdkGetSectorAsync(pImage, pIoCtx, pImage->pGTCache, pExtent,
6553 uSectorExtentRel, &uSectorExtentAbs);
6554 if (RT_FAILURE(rc))
6555 goto out;
6556 /* Clip read range to at most the rest of the grain. */
6557 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6558 Assert(!(cbRead % 512));
6559 if (uSectorExtentAbs == 0)
6560 rc = VERR_VD_BLOCK_FREE;
6561 else
6562 {
6563 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
6564 rc = pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
6565 pExtent->pFile->pStorage,
6566 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6567 pIoCtx, cbRead);
6568 }
6569 break;
6570 case VMDKETYPE_VMFS:
6571 case VMDKETYPE_FLAT:
6572 rc = pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
6573 pExtent->pFile->pStorage,
6574 VMDK_SECTOR2BYTE(uSectorExtentRel),
6575 pIoCtx, cbRead);
6576 break;
6577 case VMDKETYPE_ZERO:
6578 size_t cbSet;
6579
6580 cbSet = pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
6581 pIoCtx, 0, cbRead);
6582 Assert(cbSet == cbRead);
6583
6584 rc = VINF_SUCCESS;
6585 break;
6586 }
6587 if (pcbActuallyRead)
6588 *pcbActuallyRead = cbRead;
6589
6590out:
6591 LogFlowFunc(("returns %Rrc\n", rc));
6592 return rc;
6593}
6594
6595static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
6596 PVDIOCTX pIoCtx,
6597 size_t *pcbWriteProcess, size_t *pcbPreRead,
6598 size_t *pcbPostRead, unsigned fWrite)
6599{
6600 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6601 pvBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6602 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6603 PVMDKEXTENT pExtent;
6604 uint64_t uSectorExtentRel;
6605 uint64_t uSectorExtentAbs;
6606 int rc;
6607
6608 AssertPtr(pImage);
6609 Assert(uOffset % 512 == 0);
6610 Assert(cbWrite % 512 == 0);
6611
6612 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6613 {
6614 rc = VERR_VD_IMAGE_READ_ONLY;
6615 goto out;
6616 }
6617
6618 if (cbWrite == 0)
6619 {
6620 rc = VERR_INVALID_PARAMETER;
6621 goto out;
6622 }
6623
6624 /* No size check here, will do that later when the extent is located.
6625 * There are sparse images out there which according to the spec are
6626 * invalid, because the total size is not a multiple of the grain size.
6627 * Also for sparse images which are stitched together in odd ways (not at
6628 * grain boundaries, and with the nominal size not being a multiple of the
6629 * grain size), this would prevent writing to the last grain. */
6630
6631 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6632 &pExtent, &uSectorExtentRel);
6633 if (RT_FAILURE(rc))
6634 goto out;
6635
6636 /* Check access permissions as defined in the extent descriptor. */
6637 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6638 {
6639 rc = VERR_VD_VMDK_INVALID_STATE;
6640 goto out;
6641 }
6642
6643 /* Handle the write according to the current extent type. */
6644 switch (pExtent->enmType)
6645 {
6646 case VMDKETYPE_HOSTED_SPARSE:
6647#ifdef VBOX_WITH_VMDK_ESX
6648 case VMDKETYPE_ESX_SPARSE:
6649#endif /* VBOX_WITH_VMDK_ESX */
6650 rc = vmdkGetSectorAsync(pImage, pIoCtx, pImage->pGTCache, pExtent, uSectorExtentRel,
6651 &uSectorExtentAbs);
6652 if (RT_FAILURE(rc))
6653 goto out;
6654 /* Clip write range to at most the rest of the grain. */
6655 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6656 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6657 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
6658 {
6659 rc = VERR_VD_VMDK_INVALID_WRITE;
6660 goto out;
6661 }
6662 if (uSectorExtentAbs == 0)
6663 {
6664 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6665 {
6666 /* Full block write to a previously unallocated block.
6667 * Check if the caller wants to avoid the automatic alloc. */
6668 if (!(fWrite & VD_WRITE_NO_ALLOC))
6669 {
6670 /* Allocate GT and find out where to store the grain. */
6671 rc = vmdkAllocGrainAsync(pImage->pGTCache, pExtent, pIoCtx,
6672 uSectorExtentRel, cbWrite);
6673 }
6674 else
6675 rc = VERR_VD_BLOCK_FREE;
6676 *pcbPreRead = 0;
6677 *pcbPostRead = 0;
6678 }
6679 else
6680 {
6681 /* Clip write range to remain in this extent. */
6682 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6683 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6684 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
6685 rc = VERR_VD_BLOCK_FREE;
6686 }
6687 }
6688 else
6689 {
6690 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
6691 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
6692 pExtent->pFile->pStorage,
6693 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6694 pIoCtx, cbWrite,
6695 NULL, NULL);
6696 }
6697 break;
6698 case VMDKETYPE_VMFS:
6699 case VMDKETYPE_FLAT:
6700 /* Clip write range to remain in this extent. */
6701 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6702 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
6703 pExtent->pFile->pStorage,
6704 VMDK_SECTOR2BYTE(uSectorExtentRel),
6705 pIoCtx, cbWrite, NULL, NULL);
6706 break;
6707 case VMDKETYPE_ZERO:
6708 /* Clip write range to remain in this extent. */
6709 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6710 break;
6711 }
6712 if (pcbWriteProcess)
6713 *pcbWriteProcess = cbWrite;
6714
6715out:
6716 LogFlowFunc(("returns %Rrc\n", rc));
6717 return rc;
6718}
6719
6720static int vmdkAsyncFlush(void *pvBackendData, PVDIOCTX pIoCtx)
6721{
6722 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6723 PVMDKEXTENT pExtent;
6724 int rc = VINF_SUCCESS;
6725
6726 for (unsigned i = 0; i < pImage->cExtents; i++)
6727 {
6728 pExtent = &pImage->pExtents[i];
6729 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6730 {
6731 switch (pExtent->enmType)
6732 {
6733 case VMDKETYPE_HOSTED_SPARSE:
6734#ifdef VBOX_WITH_VMDK_ESX
6735 case VMDKETYPE_ESX_SPARSE:
6736#endif /* VBOX_WITH_VMDK_ESX */
6737 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
6738 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6739 goto out;
6740 if (pExtent->fFooter)
6741 {
6742 uint64_t cbSize;
6743 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
6744 if (RT_FAILURE(rc))
6745 goto out;
6746 cbSize = RT_ALIGN_64(cbSize, 512);
6747 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
6748 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6749 goto out;
6750 }
6751 break;
6752 case VMDKETYPE_VMFS:
6753 case VMDKETYPE_FLAT:
6754 /* Nothing to do. */
6755 break;
6756 case VMDKETYPE_ZERO:
6757 default:
6758 AssertMsgFailed(("extent with type %d marked as dirty\n",
6759 pExtent->enmType));
6760 break;
6761 }
6762 }
6763 switch (pExtent->enmType)
6764 {
6765 case VMDKETYPE_HOSTED_SPARSE:
6766#ifdef VBOX_WITH_VMDK_ESX
6767 case VMDKETYPE_ESX_SPARSE:
6768#endif /* VBOX_WITH_VMDK_ESX */
6769 case VMDKETYPE_VMFS:
6770 case VMDKETYPE_FLAT:
6771 /** @todo implement proper path absolute check. */
6772 if ( pExtent->pFile != NULL
6773 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6774 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6775 rc = vmdkFileFlushAsync(pExtent->pFile, pIoCtx);
6776 break;
6777 case VMDKETYPE_ZERO:
6778 /* No need to do anything for this extent. */
6779 break;
6780 default:
6781 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6782 break;
6783 }
6784 }
6785
6786out:
6787 return rc;
6788}
6789
6790
6791VBOXHDDBACKEND g_VmdkBackend =
6792{
6793 /* pszBackendName */
6794 "VMDK",
6795 /* cbSize */
6796 sizeof(VBOXHDDBACKEND),
6797 /* uBackendCaps */
6798 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6799 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6800 /* papszFileExtensions */
6801 s_apszVmdkFileExtensions,
6802 /* paConfigInfo */
6803 NULL,
6804 /* hPlugin */
6805 NIL_RTLDRMOD,
6806 /* pfnCheckIfValid */
6807 vmdkCheckIfValid,
6808 /* pfnOpen */
6809 vmdkOpen,
6810 /* pfnCreate */
6811 vmdkCreate,
6812 /* pfnRename */
6813 vmdkRename,
6814 /* pfnClose */
6815 vmdkClose,
6816 /* pfnRead */
6817 vmdkRead,
6818 /* pfnWrite */
6819 vmdkWrite,
6820 /* pfnFlush */
6821 vmdkFlush,
6822 /* pfnGetVersion */
6823 vmdkGetVersion,
6824 /* pfnGetSize */
6825 vmdkGetSize,
6826 /* pfnGetFileSize */
6827 vmdkGetFileSize,
6828 /* pfnGetPCHSGeometry */
6829 vmdkGetPCHSGeometry,
6830 /* pfnSetPCHSGeometry */
6831 vmdkSetPCHSGeometry,
6832 /* pfnGetLCHSGeometry */
6833 vmdkGetLCHSGeometry,
6834 /* pfnSetLCHSGeometry */
6835 vmdkSetLCHSGeometry,
6836 /* pfnGetImageFlags */
6837 vmdkGetImageFlags,
6838 /* pfnGetOpenFlags */
6839 vmdkGetOpenFlags,
6840 /* pfnSetOpenFlags */
6841 vmdkSetOpenFlags,
6842 /* pfnGetComment */
6843 vmdkGetComment,
6844 /* pfnSetComment */
6845 vmdkSetComment,
6846 /* pfnGetUuid */
6847 vmdkGetUuid,
6848 /* pfnSetUuid */
6849 vmdkSetUuid,
6850 /* pfnGetModificationUuid */
6851 vmdkGetModificationUuid,
6852 /* pfnSetModificationUuid */
6853 vmdkSetModificationUuid,
6854 /* pfnGetParentUuid */
6855 vmdkGetParentUuid,
6856 /* pfnSetParentUuid */
6857 vmdkSetParentUuid,
6858 /* pfnGetParentModificationUuid */
6859 vmdkGetParentModificationUuid,
6860 /* pfnSetParentModificationUuid */
6861 vmdkSetParentModificationUuid,
6862 /* pfnDump */
6863 vmdkDump,
6864 /* pfnGetTimeStamp */
6865 vmdkGetTimeStamp,
6866 /* pfnGetParentTimeStamp */
6867 vmdkGetParentTimeStamp,
6868 /* pfnSetParentTimeStamp */
6869 vmdkSetParentTimeStamp,
6870 /* pfnGetParentFilename */
6871 vmdkGetParentFilename,
6872 /* pfnSetParentFilename */
6873 vmdkSetParentFilename,
6874 /* pfnIsAsyncIOSupported */
6875 vmdkIsAsyncIOSupported,
6876 /* pfnAsyncRead */
6877 vmdkAsyncRead,
6878 /* pfnAsyncWrite */
6879 vmdkAsyncWrite,
6880 /* pfnAsyncFlush */
6881 vmdkAsyncFlush,
6882 /* pfnComposeLocation */
6883 genericFileComposeLocation,
6884 /* pfnComposeName */
6885 genericFileComposeName
6886};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette