VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 30900

Last change on this file since 30900 was 30900, checked in by vboxsync, 14 years ago

VMDK: Activate new async I/O by default

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 251.7 KB
Line 
1/* $Id: VmdkHDDCore.cpp 30900 2010-07-19 08:31:55Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/file.h>
30#include <iprt/path.h>
31#include <iprt/string.h>
32#include <iprt/rand.h>
33#include <iprt/zip.h>
34
35
36/*******************************************************************************
37* Constants And Macros, Structures and Typedefs *
38*******************************************************************************/
39
40/** Maximum encoded string size (including NUL) we allow for VMDK images.
41 * Deliberately not set high to avoid running out of descriptor space. */
42#define VMDK_ENCODED_COMMENT_MAX 1024
43
44/** VMDK descriptor DDB entry for PCHS cylinders. */
45#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
46
47/** VMDK descriptor DDB entry for PCHS heads. */
48#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
49
50/** VMDK descriptor DDB entry for PCHS sectors. */
51#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
52
53/** VMDK descriptor DDB entry for LCHS cylinders. */
54#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
55
56/** VMDK descriptor DDB entry for LCHS heads. */
57#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
58
59/** VMDK descriptor DDB entry for LCHS sectors. */
60#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
61
62/** VMDK descriptor DDB entry for image UUID. */
63#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
64
65/** VMDK descriptor DDB entry for image modification UUID. */
66#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
67
68/** VMDK descriptor DDB entry for parent image UUID. */
69#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
70
71/** VMDK descriptor DDB entry for parent image modification UUID. */
72#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
73
74/** No compression for streamOptimized files. */
75#define VMDK_COMPRESSION_NONE 0
76
77/** Deflate compression for streamOptimized files. */
78#define VMDK_COMPRESSION_DEFLATE 1
79
80/** Marker that the actual GD value is stored in the footer. */
81#define VMDK_GD_AT_END 0xffffffffffffffffULL
82
83/** Marker for end-of-stream in streamOptimized images. */
84#define VMDK_MARKER_EOS 0
85
86/** Marker for grain table block in streamOptimized images. */
87#define VMDK_MARKER_GT 1
88
89/** Marker for grain directory block in streamOptimized images. */
90#define VMDK_MARKER_GD 2
91
92/** Marker for footer in streamOptimized images. */
93#define VMDK_MARKER_FOOTER 3
94
95/** Dummy marker for "don't check the marker value". */
96#define VMDK_MARKER_IGNORE 0xffffffffU
97
98/**
99 * Magic number for hosted images created by VMware Workstation 4, VMware
100 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
101 */
102#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
103
104/**
105 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
106 * this header is also used for monolithic flat images.
107 */
108#pragma pack(1)
109typedef struct SparseExtentHeader
110{
111 uint32_t magicNumber;
112 uint32_t version;
113 uint32_t flags;
114 uint64_t capacity;
115 uint64_t grainSize;
116 uint64_t descriptorOffset;
117 uint64_t descriptorSize;
118 uint32_t numGTEsPerGT;
119 uint64_t rgdOffset;
120 uint64_t gdOffset;
121 uint64_t overHead;
122 bool uncleanShutdown;
123 char singleEndLineChar;
124 char nonEndLineChar;
125 char doubleEndLineChar1;
126 char doubleEndLineChar2;
127 uint16_t compressAlgorithm;
128 uint8_t pad[433];
129} SparseExtentHeader;
130#pragma pack()
131
132/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
133 * divisible by the default grain size (64K) */
134#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
135
136/** VMDK streamOptimized file format marker. The type field may or may not
137 * be actually valid, but there's always data to read there. */
138#pragma pack(1)
139typedef struct VMDKMARKER
140{
141 uint64_t uSector;
142 uint32_t cbSize;
143 uint32_t uType;
144} VMDKMARKER;
145#pragma pack()
146
147
148#ifdef VBOX_WITH_VMDK_ESX
149
150/** @todo the ESX code is not tested, not used, and lacks error messages. */
151
152/**
153 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
154 */
155#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
156
157#pragma pack(1)
158typedef struct COWDisk_Header
159{
160 uint32_t magicNumber;
161 uint32_t version;
162 uint32_t flags;
163 uint32_t numSectors;
164 uint32_t grainSize;
165 uint32_t gdOffset;
166 uint32_t numGDEntries;
167 uint32_t freeSector;
168 /* The spec incompletely documents quite a few further fields, but states
169 * that they are unused by the current format. Replace them by padding. */
170 char reserved1[1604];
171 uint32_t savedGeneration;
172 char reserved2[8];
173 uint32_t uncleanShutdown;
174 char padding[396];
175} COWDisk_Header;
176#pragma pack()
177#endif /* VBOX_WITH_VMDK_ESX */
178
179
180/** Convert sector number/size to byte offset/size. */
181#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
182
183/** Convert byte offset/size to sector number/size. */
184#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
185
186/**
187 * VMDK extent type.
188 */
189typedef enum VMDKETYPE
190{
191 /** Hosted sparse extent. */
192 VMDKETYPE_HOSTED_SPARSE = 1,
193 /** Flat extent. */
194 VMDKETYPE_FLAT,
195 /** Zero extent. */
196 VMDKETYPE_ZERO,
197 /** VMFS extent, used by ESX. */
198 VMDKETYPE_VMFS
199#ifdef VBOX_WITH_VMDK_ESX
200 ,
201 /** ESX sparse extent. */
202 VMDKETYPE_ESX_SPARSE
203#endif /* VBOX_WITH_VMDK_ESX */
204} VMDKETYPE, *PVMDKETYPE;
205
206/**
207 * VMDK access type for a extent.
208 */
209typedef enum VMDKACCESS
210{
211 /** No access allowed. */
212 VMDKACCESS_NOACCESS = 0,
213 /** Read-only access. */
214 VMDKACCESS_READONLY,
215 /** Read-write access. */
216 VMDKACCESS_READWRITE
217} VMDKACCESS, *PVMDKACCESS;
218
219/** Forward declaration for PVMDKIMAGE. */
220typedef struct VMDKIMAGE *PVMDKIMAGE;
221
222/**
223 * Extents files entry. Used for opening a particular file only once.
224 */
225typedef struct VMDKFILE
226{
227 /** Pointer to filename. Local copy. */
228 const char *pszFilename;
229 /** File open flags for consistency checking. */
230 unsigned fOpen;
231 /** File handle. */
232 RTFILE File;
233 /** Handle for asnychronous access if requested.*/
234 PVDIOSTORAGE pStorage;
235 /** Flag whether to use File or pStorage. */
236 bool fAsyncIO;
237 /** Reference counter. */
238 unsigned uReferences;
239 /** Flag whether the file should be deleted on last close. */
240 bool fDelete;
241 /** Pointer to the image we belong to. */
242 PVMDKIMAGE pImage;
243 /** Pointer to next file descriptor. */
244 struct VMDKFILE *pNext;
245 /** Pointer to the previous file descriptor. */
246 struct VMDKFILE *pPrev;
247} VMDKFILE, *PVMDKFILE;
248
249/**
250 * VMDK extent data structure.
251 */
252typedef struct VMDKEXTENT
253{
254 /** File handle. */
255 PVMDKFILE pFile;
256 /** Base name of the image extent. */
257 const char *pszBasename;
258 /** Full name of the image extent. */
259 const char *pszFullname;
260 /** Number of sectors in this extent. */
261 uint64_t cSectors;
262 /** Number of sectors per block (grain in VMDK speak). */
263 uint64_t cSectorsPerGrain;
264 /** Starting sector number of descriptor. */
265 uint64_t uDescriptorSector;
266 /** Size of descriptor in sectors. */
267 uint64_t cDescriptorSectors;
268 /** Starting sector number of grain directory. */
269 uint64_t uSectorGD;
270 /** Starting sector number of redundant grain directory. */
271 uint64_t uSectorRGD;
272 /** Total number of metadata sectors. */
273 uint64_t cOverheadSectors;
274 /** Nominal size (i.e. as described by the descriptor) of this extent. */
275 uint64_t cNominalSectors;
276 /** Sector offset (i.e. as described by the descriptor) of this extent. */
277 uint64_t uSectorOffset;
278 /** Number of entries in a grain table. */
279 uint32_t cGTEntries;
280 /** Number of sectors reachable via a grain directory entry. */
281 uint32_t cSectorsPerGDE;
282 /** Number of entries in the grain directory. */
283 uint32_t cGDEntries;
284 /** Pointer to the next free sector. Legacy information. Do not use. */
285 uint32_t uFreeSector;
286 /** Number of this extent in the list of images. */
287 uint32_t uExtent;
288 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
289 char *pDescData;
290 /** Pointer to the grain directory. */
291 uint32_t *pGD;
292 /** Pointer to the redundant grain directory. */
293 uint32_t *pRGD;
294 /** VMDK version of this extent. 1=1.0/1.1 */
295 uint32_t uVersion;
296 /** Type of this extent. */
297 VMDKETYPE enmType;
298 /** Access to this extent. */
299 VMDKACCESS enmAccess;
300 /** Flag whether this extent is marked as unclean. */
301 bool fUncleanShutdown;
302 /** Flag whether the metadata in the extent header needs to be updated. */
303 bool fMetaDirty;
304 /** Flag whether there is a footer in this extent. */
305 bool fFooter;
306 /** Compression type for this extent. */
307 uint16_t uCompression;
308 /** Last grain which has been written to. Only for streamOptimized extents. */
309 uint32_t uLastGrainWritten;
310 /** Sector number of last grain which has been written to. Only for
311 * streamOptimized extents. */
312 uint32_t uLastGrainSector;
313 /** Data size of last grain which has been written to. Only for
314 * streamOptimized extents. */
315 uint32_t cbLastGrainWritten;
316 /** Starting sector of the decompressed grain buffer. */
317 uint32_t uGrainSector;
318 /** Decompressed grain buffer for streamOptimized extents. */
319 void *pvGrain;
320 /** Reference to the image in which this extent is used. Do not use this
321 * on a regular basis to avoid passing pImage references to functions
322 * explicitly. */
323 struct VMDKIMAGE *pImage;
324} VMDKEXTENT, *PVMDKEXTENT;
325
326/**
327 * Grain table cache size. Allocated per image.
328 */
329#define VMDK_GT_CACHE_SIZE 256
330
331/**
332 * Grain table block size. Smaller than an actual grain table block to allow
333 * more grain table blocks to be cached without having to allocate excessive
334 * amounts of memory for the cache.
335 */
336#define VMDK_GT_CACHELINE_SIZE 128
337
338
339/**
340 * Maximum number of lines in a descriptor file. Not worth the effort of
341 * making it variable. Descriptor files are generally very short (~20 lines),
342 * with the exception of sparse files split in 2G chunks, which need for the
343 * maximum size (almost 2T) exactly 1025 lines for the disk database.
344 */
345#define VMDK_DESCRIPTOR_LINES_MAX 1100U
346
347/**
348 * Parsed descriptor information. Allows easy access and update of the
349 * descriptor (whether separate file or not). Free form text files suck.
350 */
351typedef struct VMDKDESCRIPTOR
352{
353 /** Line number of first entry of the disk descriptor. */
354 unsigned uFirstDesc;
355 /** Line number of first entry in the extent description. */
356 unsigned uFirstExtent;
357 /** Line number of first disk database entry. */
358 unsigned uFirstDDB;
359 /** Total number of lines. */
360 unsigned cLines;
361 /** Total amount of memory available for the descriptor. */
362 size_t cbDescAlloc;
363 /** Set if descriptor has been changed and not yet written to disk. */
364 bool fDirty;
365 /** Array of pointers to the data in the descriptor. */
366 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
367 /** Array of line indices pointing to the next non-comment line. */
368 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
369} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
370
371
372/**
373 * Cache entry for translating extent/sector to a sector number in that
374 * extent.
375 */
376typedef struct VMDKGTCACHEENTRY
377{
378 /** Extent number for which this entry is valid. */
379 uint32_t uExtent;
380 /** GT data block number. */
381 uint64_t uGTBlock;
382 /** Data part of the cache entry. */
383 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
384} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
385
386/**
387 * Cache data structure for blocks of grain table entries. For now this is a
388 * fixed size direct mapping cache, but this should be adapted to the size of
389 * the sparse image and maybe converted to a set-associative cache. The
390 * implementation below implements a write-through cache with write allocate.
391 */
392typedef struct VMDKGTCACHE
393{
394 /** Cache entries. */
395 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
396 /** Number of cache entries (currently unused). */
397 unsigned cEntries;
398} VMDKGTCACHE, *PVMDKGTCACHE;
399
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Pointer to the image extents. */
407 PVMDKEXTENT pExtents;
408 /** Number of image extents. */
409 unsigned cExtents;
410 /** Pointer to the files list, for opening a file referenced multiple
411 * times only once (happens mainly with raw partition access). */
412 PVMDKFILE pFiles;
413
414 /** Base image name. */
415 const char *pszFilename;
416 /** Descriptor file if applicable. */
417 PVMDKFILE pFile;
418
419 /** Pointer to the per-disk VD interface list. */
420 PVDINTERFACE pVDIfsDisk;
421 /** Pointer to the per-image VD interface list. */
422 PVDINTERFACE pVDIfsImage;
423
424 /** Error interface. */
425 PVDINTERFACE pInterfaceError;
426 /** Error interface callbacks. */
427 PVDINTERFACEERROR pInterfaceErrorCallbacks;
428
429 /** I/O interface. */
430 PVDINTERFACE pInterfaceIO;
431 /** I/O interface callbacks. */
432 PVDINTERFACEIO pInterfaceIOCallbacks;
433 /**
434 * Pointer to an array of segment entries for async I/O.
435 * This is an optimization because the task number to submit is not known
436 * and allocating/freeing an array in the read/write functions every time
437 * is too expensive.
438 */
439 PPDMDATASEG paSegments;
440 /** Entries available in the segments array. */
441 unsigned cSegments;
442
443 /** Open flags passed by VBoxHD layer. */
444 unsigned uOpenFlags;
445 /** Image flags defined during creation or determined during open. */
446 unsigned uImageFlags;
447 /** Total size of the image. */
448 uint64_t cbSize;
449 /** Physical geometry of this image. */
450 PDMMEDIAGEOMETRY PCHSGeometry;
451 /** Logical geometry of this image. */
452 PDMMEDIAGEOMETRY LCHSGeometry;
453 /** Image UUID. */
454 RTUUID ImageUuid;
455 /** Image modification UUID. */
456 RTUUID ModificationUuid;
457 /** Parent image UUID. */
458 RTUUID ParentUuid;
459 /** Parent image modification UUID. */
460 RTUUID ParentModificationUuid;
461
462 /** Pointer to grain table cache, if this image contains sparse extents. */
463 PVMDKGTCACHE pGTCache;
464 /** Pointer to the descriptor (NULL if no separate descriptor file). */
465 char *pDescData;
466 /** Allocation size of the descriptor file. */
467 size_t cbDescAlloc;
468 /** Parsed descriptor file content. */
469 VMDKDESCRIPTOR Descriptor;
470} VMDKIMAGE;
471
472
473/** State for the input callout of the inflate reader. */
474typedef struct VMDKINFLATESTATE
475{
476 /* File where the data is stored. */
477 PVMDKFILE File;
478 /* Total size of the data to read. */
479 size_t cbSize;
480 /* Offset in the file to read. */
481 uint64_t uFileOffset;
482 /* Current read position. */
483 ssize_t iOffset;
484} VMDKINFLATESTATE;
485
486/** State for the output callout of the deflate writer. */
487typedef struct VMDKDEFLATESTATE
488{
489 /* File where the data is to be stored. */
490 PVMDKFILE File;
491 /* Offset in the file to write at. */
492 uint64_t uFileOffset;
493 /* Current write position. */
494 ssize_t iOffset;
495} VMDKDEFLATESTATE;
496
497/** Tracks async grain allocation. */
498typedef struct VMDKGRAINALLOCASYNC
499{
500 /** Old size of the extent. Used for rollback after an error. */
501 uint64_t cbExtentOld;
502 /** Flag whether the allocation failed. */
503 bool fIoErr;
504 /** Current number of transfers pending.
505 * If reached 0 and there is an error the old state is restored. */
506 unsigned cIoXfersPending;
507 /** Sector number */
508 uint64_t uSector;
509 /** Flag whether the grain table needs to be updated. */
510 bool fGTUpdateNeeded;
511 /** Extent the allocation happens. */
512 PVMDKEXTENT pExtent;
513 /** New size of the extent, required for the grain table update. */
514 uint64_t cbExtentSize;
515 /** Grain table sector. */
516 uint64_t uGTSector;
517 /** Backup grain table sector. */
518 uint64_t uRGTSector;
519} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
520
521/*******************************************************************************
522 * Static Variables *
523 *******************************************************************************/
524
525/** NULL-terminated array of supported file extensions. */
526static const char *const s_apszVmdkFileExtensions[] =
527{
528 "vmdk",
529 NULL
530};
531
532/*******************************************************************************
533* Internal Functions *
534*******************************************************************************/
535
536static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
537
538static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
539 bool fDelete);
540
541static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
542static int vmdkFlushImage(PVMDKIMAGE pImage);
543static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
544static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
545
546static int vmdkAllocGrainAsyncComplete(void *pvBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
547
548/**
549 * Internal: signal an error to the frontend.
550 */
551DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
552 const char *pszFormat, ...)
553{
554 va_list va;
555 va_start(va, pszFormat);
556 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
557 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
558 pszFormat, va);
559 va_end(va);
560 return rc;
561}
562
563/**
564 * Internal: open a file (using a file descriptor cache to ensure each file
565 * is only opened once - anything else can cause locking problems).
566 */
567static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
568 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
569{
570 int rc = VINF_SUCCESS;
571 PVMDKFILE pVmdkFile;
572
573 for (pVmdkFile = pImage->pFiles;
574 pVmdkFile != NULL;
575 pVmdkFile = pVmdkFile->pNext)
576 {
577 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
578 {
579 Assert(fOpen == pVmdkFile->fOpen);
580 pVmdkFile->uReferences++;
581
582 *ppVmdkFile = pVmdkFile;
583
584 return rc;
585 }
586 }
587
588 /* If we get here, there's no matching entry in the cache. */
589 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
590 if (!VALID_PTR(pVmdkFile))
591 {
592 *ppVmdkFile = NULL;
593 return VERR_NO_MEMORY;
594 }
595
596 pVmdkFile->pszFilename = RTStrDup(pszFilename);
597 if (!VALID_PTR(pVmdkFile->pszFilename))
598 {
599 RTMemFree(pVmdkFile);
600 *ppVmdkFile = NULL;
601 return VERR_NO_MEMORY;
602 }
603 pVmdkFile->fOpen = fOpen;
604
605#ifndef VBOX_WITH_NEW_IO_CODE
606 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
607 {
608 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
609 pszFilename,
610 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
611 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
612 : 0,
613 NULL,
614 pImage->pVDIfsDisk,
615 &pVmdkFile->pStorage);
616 pVmdkFile->fAsyncIO = true;
617 }
618 else
619 {
620 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
621 pVmdkFile->fAsyncIO = false;
622 }
623#else
624 unsigned uOpenFlags = 0;
625
626 if ((fOpen & RTFILE_O_ACCESS_MASK) == RTFILE_O_READ)
627 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY;
628 if ((fOpen & RTFILE_O_ACTION_MASK) == RTFILE_O_CREATE)
629 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_CREATE;
630
631 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
632 pszFilename,
633 uOpenFlags,
634 &pVmdkFile->pStorage);
635#endif
636 if (RT_SUCCESS(rc))
637 {
638 pVmdkFile->uReferences = 1;
639 pVmdkFile->pImage = pImage;
640 pVmdkFile->pNext = pImage->pFiles;
641 if (pImage->pFiles)
642 pImage->pFiles->pPrev = pVmdkFile;
643 pImage->pFiles = pVmdkFile;
644 *ppVmdkFile = pVmdkFile;
645 }
646 else
647 {
648 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
649 RTMemFree(pVmdkFile);
650 *ppVmdkFile = NULL;
651 }
652
653 return rc;
654}
655
656/**
657 * Internal: close a file, updating the file descriptor cache.
658 */
659static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
660{
661 int rc = VINF_SUCCESS;
662 PVMDKFILE pVmdkFile = *ppVmdkFile;
663
664 AssertPtr(pVmdkFile);
665
666 pVmdkFile->fDelete |= fDelete;
667 Assert(pVmdkFile->uReferences);
668 pVmdkFile->uReferences--;
669 if (pVmdkFile->uReferences == 0)
670 {
671 PVMDKFILE pPrev;
672 PVMDKFILE pNext;
673
674 /* Unchain the element from the list. */
675 pPrev = pVmdkFile->pPrev;
676 pNext = pVmdkFile->pNext;
677
678 if (pNext)
679 pNext->pPrev = pPrev;
680 if (pPrev)
681 pPrev->pNext = pNext;
682 else
683 pImage->pFiles = pNext;
684
685#ifndef VBOX_WITH_NEW_IO_CODE
686 if (pVmdkFile->fAsyncIO)
687 {
688 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
689 pVmdkFile->pStorage);
690 }
691 else
692 {
693 rc = RTFileClose(pVmdkFile->File);
694 }
695#else
696 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
697 pVmdkFile->pStorage);
698#endif
699 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
700 rc = RTFileDelete(pVmdkFile->pszFilename);
701 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
702 RTMemFree(pVmdkFile);
703 }
704
705 *ppVmdkFile = NULL;
706 return rc;
707}
708
709/**
710 * Internal: read from a file distinguishing between async and normal operation
711 */
712DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
713 uint64_t uOffset, void *pvBuf,
714 size_t cbToRead, size_t *pcbRead)
715{
716 PVMDKIMAGE pImage = pVmdkFile->pImage;
717
718#ifndef VBOX_WITH_NEW_IO_CODE
719 if (pVmdkFile->fAsyncIO)
720 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
721 pVmdkFile->pStorage, uOffset,
722 cbToRead, pvBuf, pcbRead);
723 else
724 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
725#else
726 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
727 pVmdkFile->pStorage, uOffset,
728 cbToRead, pvBuf, pcbRead);
729#endif
730}
731
732/**
733 * Internal: write to a file distinguishing between async and normal operation
734 */
735DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
736 uint64_t uOffset, const void *pvBuf,
737 size_t cbToWrite, size_t *pcbWritten)
738{
739 PVMDKIMAGE pImage = pVmdkFile->pImage;
740
741#ifndef VBOX_WITH_NEW_IO_CODE
742 if (pVmdkFile->fAsyncIO)
743 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
744 pVmdkFile->pStorage, uOffset,
745 cbToWrite, pvBuf, pcbWritten);
746 else
747 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
748#else
749 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
750 pVmdkFile->pStorage, uOffset,
751 cbToWrite, pvBuf, pcbWritten);
752#endif
753}
754
755/**
756 * Internal: get the size of a file distinguishing beween async and normal operation
757 */
758DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
759{
760 PVMDKIMAGE pImage = pVmdkFile->pImage;
761
762#ifndef VBOX_WITH_NEW_IO_CODE
763 if (pVmdkFile->fAsyncIO)
764 {
765 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
766 pVmdkFile->pStorage,
767 pcbSize);
768 }
769 else
770 return RTFileGetSize(pVmdkFile->File, pcbSize);
771#else
772 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
773 pVmdkFile->pStorage,
774 pcbSize);
775#endif
776}
777
778/**
779 * Internal: set the size of a file distinguishing beween async and normal operation
780 */
781DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
782{
783 PVMDKIMAGE pImage = pVmdkFile->pImage;
784
785#ifndef VBOX_WITH_NEW_IO_CODE
786 if (pVmdkFile->fAsyncIO)
787 {
788 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
789 pVmdkFile->pStorage,
790 cbSize);
791 }
792 else
793 return RTFileSetSize(pVmdkFile->File, cbSize);
794#else
795 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
796 pVmdkFile->pStorage,
797 cbSize);
798#endif
799}
800
801/**
802 * Internal: flush a file distinguishing between async and normal operation
803 */
804DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
805{
806 PVMDKIMAGE pImage = pVmdkFile->pImage;
807
808#ifndef VBOX_WITH_NEW_IO_CODE
809 if (pVmdkFile->fAsyncIO)
810 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
811 pVmdkFile->pStorage);
812 else
813 return RTFileFlush(pVmdkFile->File);
814#else
815 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
816 pVmdkFile->pStorage);
817#endif
818}
819
820
821DECLINLINE(int) vmdkFileFlushAsync(PVMDKFILE pVmdkFile, PVDIOCTX pIoCtx)
822{
823 PVMDKIMAGE pImage = pVmdkFile->pImage;
824
825 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
826 pVmdkFile->pStorage, pIoCtx,
827 NULL, NULL);
828}
829
830
831static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
832{
833 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
834
835 Assert(cbBuf);
836 if (pInflateState->iOffset < 0)
837 {
838 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
839 if (pcbBuf)
840 *pcbBuf = 1;
841 pInflateState->iOffset = 0;
842 return VINF_SUCCESS;
843 }
844 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
845 int rc = vmdkFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
846 if (RT_FAILURE(rc))
847 return rc;
848 pInflateState->uFileOffset += cbBuf;
849 pInflateState->iOffset += cbBuf;
850 pInflateState->cbSize -= cbBuf;
851 Assert(pcbBuf);
852 *pcbBuf = cbBuf;
853 return VINF_SUCCESS;
854}
855
856/**
857 * Internal: read from a file and inflate the compressed data,
858 * distinguishing between async and normal operation
859 */
860DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
861 uint64_t uOffset, void *pvBuf,
862 size_t cbToRead, unsigned uMarker,
863 uint64_t *puLBA, uint32_t *pcbMarkerData)
864{
865 if (pVmdkFile->fAsyncIO)
866 {
867 AssertMsgFailed(("TODO\n"));
868 return VERR_NOT_SUPPORTED;
869 }
870 else
871 {
872 int rc;
873 PRTZIPDECOMP pZip = NULL;
874 VMDKMARKER Marker;
875 uint64_t uCompOffset, cbComp;
876 VMDKINFLATESTATE InflateState;
877 size_t cbActuallyRead;
878 size_t cbMarker = sizeof(Marker);
879
880 if (uMarker == VMDK_MARKER_IGNORE)
881 cbMarker -= sizeof(Marker.uType);
882 rc = vmdkFileReadAt(pVmdkFile, uOffset, &Marker, cbMarker, NULL);
883 if (RT_FAILURE(rc))
884 return rc;
885 Marker.uSector = RT_LE2H_U64(Marker.uSector);
886 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
887 if ( uMarker != VMDK_MARKER_IGNORE
888 && ( RT_LE2H_U32(Marker.uType) != uMarker
889 || Marker.cbSize != 0))
890 return VERR_VD_VMDK_INVALID_FORMAT;
891 if (Marker.cbSize != 0)
892 {
893 /* Compressed grain marker. Data follows immediately. */
894 uCompOffset = uOffset + 12;
895 cbComp = Marker.cbSize;
896 if (puLBA)
897 *puLBA = Marker.uSector;
898 if (pcbMarkerData)
899 *pcbMarkerData = cbComp + 12;
900 }
901 else
902 {
903 Marker.uType = RT_LE2H_U32(Marker.uType);
904 if (Marker.uType == VMDK_MARKER_EOS)
905 {
906 Assert(uMarker != VMDK_MARKER_EOS);
907 return VERR_VD_VMDK_INVALID_FORMAT;
908 }
909 else if ( Marker.uType == VMDK_MARKER_GT
910 || Marker.uType == VMDK_MARKER_GD
911 || Marker.uType == VMDK_MARKER_FOOTER)
912 {
913 uCompOffset = uOffset + 512;
914 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
915 if (pcbMarkerData)
916 *pcbMarkerData = cbComp + 512;
917 }
918 else
919 {
920 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
921 return VERR_VD_VMDK_INVALID_FORMAT;
922 }
923 }
924 InflateState.File = pVmdkFile;
925 InflateState.cbSize = cbComp;
926 InflateState.uFileOffset = uCompOffset;
927 InflateState.iOffset = -1;
928 /* Sanity check - the expansion ratio should be much less than 2. */
929 Assert(cbComp < 2 * cbToRead);
930 if (cbComp >= 2 * cbToRead)
931 return VERR_VD_VMDK_INVALID_FORMAT;
932
933 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
934 if (RT_FAILURE(rc))
935 return rc;
936 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
937 RTZipDecompDestroy(pZip);
938 if (RT_FAILURE(rc))
939 return rc;
940 if (cbActuallyRead != cbToRead)
941 rc = VERR_VD_VMDK_INVALID_FORMAT;
942 return rc;
943 }
944}
945
946static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
947{
948 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
949
950 Assert(cbBuf);
951 if (pDeflateState->iOffset < 0)
952 {
953 pvBuf = (const uint8_t *)pvBuf + 1;
954 cbBuf--;
955 pDeflateState->iOffset = 0;
956 }
957 if (!cbBuf)
958 return VINF_SUCCESS;
959 int rc = vmdkFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
960 if (RT_FAILURE(rc))
961 return rc;
962 pDeflateState->uFileOffset += cbBuf;
963 pDeflateState->iOffset += cbBuf;
964 return VINF_SUCCESS;
965}
966
967/**
968 * Internal: deflate the uncompressed data and write to a file,
969 * distinguishing between async and normal operation
970 */
971DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
972 uint64_t uOffset, const void *pvBuf,
973 size_t cbToWrite, unsigned uMarker,
974 uint64_t uLBA, uint32_t *pcbMarkerData)
975{
976 if (pVmdkFile->fAsyncIO)
977 {
978 AssertMsgFailed(("TODO\n"));
979 return VERR_NOT_SUPPORTED;
980 }
981 else
982 {
983 int rc;
984 PRTZIPCOMP pZip = NULL;
985 VMDKMARKER Marker;
986 uint64_t uCompOffset, cbDecomp;
987 VMDKDEFLATESTATE DeflateState;
988
989 Marker.uSector = RT_H2LE_U64(uLBA);
990 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
991 if (uMarker == VMDK_MARKER_IGNORE)
992 {
993 /* Compressed grain marker. Data follows immediately. */
994 uCompOffset = uOffset + 12;
995 cbDecomp = cbToWrite;
996 }
997 else
998 {
999 /** @todo implement creating the other marker types */
1000 return VERR_NOT_IMPLEMENTED;
1001 }
1002 DeflateState.File = pVmdkFile;
1003 DeflateState.uFileOffset = uCompOffset;
1004 DeflateState.iOffset = -1;
1005
1006 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
1007 if (RT_FAILURE(rc))
1008 return rc;
1009 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
1010 if (RT_SUCCESS(rc))
1011 rc = RTZipCompFinish(pZip);
1012 RTZipCompDestroy(pZip);
1013 if (RT_SUCCESS(rc))
1014 {
1015 if (pcbMarkerData)
1016 *pcbMarkerData = 12 + DeflateState.iOffset;
1017 /* Set the file size to remove old garbage in case the block is
1018 * rewritten. Cannot cause data loss as the code calling this
1019 * guarantees that data gets only appended. */
1020 Assert(DeflateState.uFileOffset > uCompOffset);
1021 rc = vmdkFileSetSize(pVmdkFile, DeflateState.uFileOffset);
1022
1023 if (uMarker == VMDK_MARKER_IGNORE)
1024 {
1025 /* Compressed grain marker. */
1026 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
1027 rc = vmdkFileWriteAt(pVmdkFile, uOffset, &Marker, 12, NULL);
1028 if (RT_FAILURE(rc))
1029 return rc;
1030 }
1031 else
1032 {
1033 /** @todo implement creating the other marker types */
1034 return VERR_NOT_IMPLEMENTED;
1035 }
1036 }
1037 return rc;
1038 }
1039}
1040
1041/**
1042 * Internal: check if all files are closed, prevent leaking resources.
1043 */
1044static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1045{
1046 int rc = VINF_SUCCESS, rc2;
1047 PVMDKFILE pVmdkFile;
1048
1049 Assert(pImage->pFiles == NULL);
1050 for (pVmdkFile = pImage->pFiles;
1051 pVmdkFile != NULL;
1052 pVmdkFile = pVmdkFile->pNext)
1053 {
1054 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1055 pVmdkFile->pszFilename));
1056 pImage->pFiles = pVmdkFile->pNext;
1057
1058 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
1059 rc2 = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
1060 pVmdkFile->pStorage);
1061 else
1062 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1063
1064 if (RT_SUCCESS(rc))
1065 rc = rc2;
1066 }
1067 return rc;
1068}
1069
1070/**
1071 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1072 * critical non-ASCII characters.
1073 */
1074static char *vmdkEncodeString(const char *psz)
1075{
1076 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1077 char *pszDst = szEnc;
1078
1079 AssertPtr(psz);
1080
1081 for (; *psz; psz = RTStrNextCp(psz))
1082 {
1083 char *pszDstPrev = pszDst;
1084 RTUNICP Cp = RTStrGetCp(psz);
1085 if (Cp == '\\')
1086 {
1087 pszDst = RTStrPutCp(pszDst, Cp);
1088 pszDst = RTStrPutCp(pszDst, Cp);
1089 }
1090 else if (Cp == '\n')
1091 {
1092 pszDst = RTStrPutCp(pszDst, '\\');
1093 pszDst = RTStrPutCp(pszDst, 'n');
1094 }
1095 else if (Cp == '\r')
1096 {
1097 pszDst = RTStrPutCp(pszDst, '\\');
1098 pszDst = RTStrPutCp(pszDst, 'r');
1099 }
1100 else
1101 pszDst = RTStrPutCp(pszDst, Cp);
1102 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1103 {
1104 pszDst = pszDstPrev;
1105 break;
1106 }
1107 }
1108 *pszDst = '\0';
1109 return RTStrDup(szEnc);
1110}
1111
1112/**
1113 * Internal: decode a string and store it into the specified string.
1114 */
1115static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1116{
1117 int rc = VINF_SUCCESS;
1118 char szBuf[4];
1119
1120 if (!cb)
1121 return VERR_BUFFER_OVERFLOW;
1122
1123 AssertPtr(psz);
1124
1125 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1126 {
1127 char *pszDst = szBuf;
1128 RTUNICP Cp = RTStrGetCp(pszEncoded);
1129 if (Cp == '\\')
1130 {
1131 pszEncoded = RTStrNextCp(pszEncoded);
1132 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1133 if (CpQ == 'n')
1134 RTStrPutCp(pszDst, '\n');
1135 else if (CpQ == 'r')
1136 RTStrPutCp(pszDst, '\r');
1137 else if (CpQ == '\0')
1138 {
1139 rc = VERR_VD_VMDK_INVALID_HEADER;
1140 break;
1141 }
1142 else
1143 RTStrPutCp(pszDst, CpQ);
1144 }
1145 else
1146 pszDst = RTStrPutCp(pszDst, Cp);
1147
1148 /* Need to leave space for terminating NUL. */
1149 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1150 {
1151 rc = VERR_BUFFER_OVERFLOW;
1152 break;
1153 }
1154 memcpy(psz, szBuf, pszDst - szBuf);
1155 psz += pszDst - szBuf;
1156 }
1157 *psz = '\0';
1158 return rc;
1159}
1160
1161static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1162{
1163 int rc = VINF_SUCCESS;
1164 unsigned i;
1165 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1166 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1167
1168 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1169 goto out;
1170
1171 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1172 if (!pGD)
1173 {
1174 rc = VERR_NO_MEMORY;
1175 goto out;
1176 }
1177 pExtent->pGD = pGD;
1178 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1179 * life files don't have them. The spec is wrong in creative ways. */
1180 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1181 pGD, cbGD, NULL);
1182 AssertRC(rc);
1183 if (RT_FAILURE(rc))
1184 {
1185 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1186 goto out;
1187 }
1188 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1189 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1190
1191 if (pExtent->uSectorRGD)
1192 {
1193 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1194 if (!pRGD)
1195 {
1196 rc = VERR_NO_MEMORY;
1197 goto out;
1198 }
1199 pExtent->pRGD = pRGD;
1200 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1201 * life files don't have them. The spec is wrong in creative ways. */
1202 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1203 pRGD, cbGD, NULL);
1204 AssertRC(rc);
1205 if (RT_FAILURE(rc))
1206 {
1207 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1208 goto out;
1209 }
1210 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1211 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1212
1213 /* Check grain table and redundant grain table for consistency. */
1214 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1215 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1216 if (!pTmpGT1)
1217 {
1218 rc = VERR_NO_MEMORY;
1219 goto out;
1220 }
1221 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1222 if (!pTmpGT2)
1223 {
1224 RTMemTmpFree(pTmpGT1);
1225 rc = VERR_NO_MEMORY;
1226 goto out;
1227 }
1228
1229 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1230 i < pExtent->cGDEntries;
1231 i++, pGDTmp++, pRGDTmp++)
1232 {
1233 /* If no grain table is allocated skip the entry. */
1234 if (*pGDTmp == 0 && *pRGDTmp == 0)
1235 continue;
1236
1237 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1238 {
1239 /* Just one grain directory entry refers to a not yet allocated
1240 * grain table or both grain directory copies refer to the same
1241 * grain table. Not allowed. */
1242 RTMemTmpFree(pTmpGT1);
1243 RTMemTmpFree(pTmpGT2);
1244 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1245 goto out;
1246 }
1247 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1248 * life files don't have them. The spec is wrong in creative ways. */
1249 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1250 pTmpGT1, cbGT, NULL);
1251 if (RT_FAILURE(rc))
1252 {
1253 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1254 RTMemTmpFree(pTmpGT1);
1255 RTMemTmpFree(pTmpGT2);
1256 goto out;
1257 }
1258 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1259 * life files don't have them. The spec is wrong in creative ways. */
1260 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1261 pTmpGT2, cbGT, NULL);
1262 if (RT_FAILURE(rc))
1263 {
1264 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1265 RTMemTmpFree(pTmpGT1);
1266 RTMemTmpFree(pTmpGT2);
1267 goto out;
1268 }
1269 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1270 {
1271 RTMemTmpFree(pTmpGT1);
1272 RTMemTmpFree(pTmpGT2);
1273 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1274 goto out;
1275 }
1276 }
1277
1278 /** @todo figure out what to do for unclean VMDKs. */
1279 RTMemTmpFree(pTmpGT1);
1280 RTMemTmpFree(pTmpGT2);
1281 }
1282
1283 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1284 {
1285 uint32_t uLastGrainWritten = 0;
1286 uint32_t uLastGrainSector = 0;
1287 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1288 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1289 if (!pTmpGT)
1290 {
1291 rc = VERR_NO_MEMORY;
1292 goto out;
1293 }
1294 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1295 {
1296 /* If no grain table is allocated skip the entry. */
1297 if (*pGDTmp == 0)
1298 continue;
1299
1300 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1301 * life files don't have them. The spec is wrong in creative ways. */
1302 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1303 pTmpGT, cbGT, NULL);
1304 if (RT_FAILURE(rc))
1305 {
1306 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1307 RTMemTmpFree(pTmpGT);
1308 goto out;
1309 }
1310 uint32_t j;
1311 uint32_t *pGTTmp;
1312 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1313 {
1314 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1315
1316 /* If no grain is allocated skip the entry. */
1317 if (uGTTmp == 0)
1318 continue;
1319
1320 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1321 {
1322 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1323 RTMemTmpFree(pTmpGT);
1324 goto out;
1325 }
1326 uLastGrainSector = uGTTmp;
1327 uLastGrainWritten = i * pExtent->cGTEntries + j;
1328 }
1329 }
1330 RTMemTmpFree(pTmpGT);
1331
1332 /* streamOptimized extents need a grain decompress buffer. */
1333 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1334 if (!pExtent->pvGrain)
1335 {
1336 rc = VERR_NO_MEMORY;
1337 goto out;
1338 }
1339
1340 if (uLastGrainSector)
1341 {
1342 uint64_t uLBA = 0;
1343 uint32_t cbMarker = 0;
1344 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1345 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1346 if (RT_FAILURE(rc))
1347 goto out;
1348
1349 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1350 pExtent->uGrainSector = uLastGrainSector;
1351 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1352 }
1353 pExtent->uLastGrainWritten = uLastGrainWritten;
1354 pExtent->uLastGrainSector = uLastGrainSector;
1355 }
1356
1357out:
1358 if (RT_FAILURE(rc))
1359 vmdkFreeGrainDirectory(pExtent);
1360 return rc;
1361}
1362
1363static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1364 bool fPreAlloc)
1365{
1366 int rc = VINF_SUCCESS;
1367 unsigned i;
1368 uint32_t *pGD = NULL, *pRGD = NULL;
1369 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1370 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1371 size_t cbGTRounded;
1372 uint64_t cbOverhead;
1373
1374 if (fPreAlloc)
1375 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1376 else
1377 cbGTRounded = 0;
1378
1379 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1380 if (!pGD)
1381 {
1382 rc = VERR_NO_MEMORY;
1383 goto out;
1384 }
1385 pExtent->pGD = pGD;
1386 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1387 if (!pRGD)
1388 {
1389 rc = VERR_NO_MEMORY;
1390 goto out;
1391 }
1392 pExtent->pRGD = pRGD;
1393
1394 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1395 /* For streamOptimized extents put the end-of-stream marker at the end. */
1396 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1397 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1398 else
1399 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1400 if (RT_FAILURE(rc))
1401 goto out;
1402 pExtent->uSectorRGD = uStartSector;
1403 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1404
1405 if (fPreAlloc)
1406 {
1407 uint32_t uGTSectorLE;
1408 uint64_t uOffsetSectors;
1409
1410 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1411 for (i = 0; i < pExtent->cGDEntries; i++)
1412 {
1413 pRGD[i] = uOffsetSectors;
1414 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1415 /* Write the redundant grain directory entry to disk. */
1416 rc = vmdkFileWriteAt(pExtent->pFile,
1417 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1418 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1419 if (RT_FAILURE(rc))
1420 {
1421 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1422 goto out;
1423 }
1424 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1425 }
1426
1427 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1428 for (i = 0; i < pExtent->cGDEntries; i++)
1429 {
1430 pGD[i] = uOffsetSectors;
1431 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1432 /* Write the grain directory entry to disk. */
1433 rc = vmdkFileWriteAt(pExtent->pFile,
1434 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1435 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1436 if (RT_FAILURE(rc))
1437 {
1438 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1439 goto out;
1440 }
1441 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1442 }
1443 }
1444 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1445
1446 /* streamOptimized extents need a grain decompress buffer. */
1447 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1448 {
1449 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1450 if (!pExtent->pvGrain)
1451 {
1452 rc = VERR_NO_MEMORY;
1453 goto out;
1454 }
1455 }
1456
1457out:
1458 if (RT_FAILURE(rc))
1459 vmdkFreeGrainDirectory(pExtent);
1460 return rc;
1461}
1462
1463static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1464{
1465 if (pExtent->pGD)
1466 {
1467 RTMemFree(pExtent->pGD);
1468 pExtent->pGD = NULL;
1469 }
1470 if (pExtent->pRGD)
1471 {
1472 RTMemFree(pExtent->pRGD);
1473 pExtent->pRGD = NULL;
1474 }
1475}
1476
1477static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1478 char **ppszUnquoted, char **ppszNext)
1479{
1480 char *pszQ;
1481 char *pszUnquoted;
1482
1483 /* Skip over whitespace. */
1484 while (*pszStr == ' ' || *pszStr == '\t')
1485 pszStr++;
1486
1487 if (*pszStr != '"')
1488 {
1489 pszQ = (char *)pszStr;
1490 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1491 pszQ++;
1492 }
1493 else
1494 {
1495 pszStr++;
1496 pszQ = (char *)strchr(pszStr, '"');
1497 if (pszQ == NULL)
1498 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1499 }
1500
1501 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1502 if (!pszUnquoted)
1503 return VERR_NO_MEMORY;
1504 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1505 pszUnquoted[pszQ - pszStr] = '\0';
1506 *ppszUnquoted = pszUnquoted;
1507 if (ppszNext)
1508 *ppszNext = pszQ + 1;
1509 return VINF_SUCCESS;
1510}
1511
1512static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1513 const char *pszLine)
1514{
1515 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1516 ssize_t cbDiff = strlen(pszLine) + 1;
1517
1518 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1519 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1520 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1521
1522 memcpy(pEnd, pszLine, cbDiff);
1523 pDescriptor->cLines++;
1524 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1525 pDescriptor->fDirty = true;
1526
1527 return VINF_SUCCESS;
1528}
1529
1530static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1531 const char *pszKey, const char **ppszValue)
1532{
1533 size_t cbKey = strlen(pszKey);
1534 const char *pszValue;
1535
1536 while (uStart != 0)
1537 {
1538 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1539 {
1540 /* Key matches, check for a '=' (preceded by whitespace). */
1541 pszValue = pDescriptor->aLines[uStart] + cbKey;
1542 while (*pszValue == ' ' || *pszValue == '\t')
1543 pszValue++;
1544 if (*pszValue == '=')
1545 {
1546 *ppszValue = pszValue + 1;
1547 break;
1548 }
1549 }
1550 uStart = pDescriptor->aNextLines[uStart];
1551 }
1552 return !!uStart;
1553}
1554
1555static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1556 unsigned uStart,
1557 const char *pszKey, const char *pszValue)
1558{
1559 char *pszTmp;
1560 size_t cbKey = strlen(pszKey);
1561 unsigned uLast = 0;
1562
1563 while (uStart != 0)
1564 {
1565 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1566 {
1567 /* Key matches, check for a '=' (preceded by whitespace). */
1568 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1569 while (*pszTmp == ' ' || *pszTmp == '\t')
1570 pszTmp++;
1571 if (*pszTmp == '=')
1572 {
1573 pszTmp++;
1574 while (*pszTmp == ' ' || *pszTmp == '\t')
1575 pszTmp++;
1576 break;
1577 }
1578 }
1579 if (!pDescriptor->aNextLines[uStart])
1580 uLast = uStart;
1581 uStart = pDescriptor->aNextLines[uStart];
1582 }
1583 if (uStart)
1584 {
1585 if (pszValue)
1586 {
1587 /* Key already exists, replace existing value. */
1588 size_t cbOldVal = strlen(pszTmp);
1589 size_t cbNewVal = strlen(pszValue);
1590 ssize_t cbDiff = cbNewVal - cbOldVal;
1591 /* Check for buffer overflow. */
1592 if ( pDescriptor->aLines[pDescriptor->cLines]
1593 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1594 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1595
1596 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1597 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1598 memcpy(pszTmp, pszValue, cbNewVal + 1);
1599 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1600 pDescriptor->aLines[i] += cbDiff;
1601 }
1602 else
1603 {
1604 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1605 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1606 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1607 {
1608 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1609 if (pDescriptor->aNextLines[i])
1610 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1611 else
1612 pDescriptor->aNextLines[i-1] = 0;
1613 }
1614 pDescriptor->cLines--;
1615 /* Adjust starting line numbers of following descriptor sections. */
1616 if (uStart < pDescriptor->uFirstExtent)
1617 pDescriptor->uFirstExtent--;
1618 if (uStart < pDescriptor->uFirstDDB)
1619 pDescriptor->uFirstDDB--;
1620 }
1621 }
1622 else
1623 {
1624 /* Key doesn't exist, append after the last entry in this category. */
1625 if (!pszValue)
1626 {
1627 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1628 return VINF_SUCCESS;
1629 }
1630 cbKey = strlen(pszKey);
1631 size_t cbValue = strlen(pszValue);
1632 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1633 /* Check for buffer overflow. */
1634 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1635 || ( pDescriptor->aLines[pDescriptor->cLines]
1636 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1637 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1638 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1639 {
1640 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1641 if (pDescriptor->aNextLines[i - 1])
1642 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1643 else
1644 pDescriptor->aNextLines[i] = 0;
1645 }
1646 uStart = uLast + 1;
1647 pDescriptor->aNextLines[uLast] = uStart;
1648 pDescriptor->aNextLines[uStart] = 0;
1649 pDescriptor->cLines++;
1650 pszTmp = pDescriptor->aLines[uStart];
1651 memmove(pszTmp + cbDiff, pszTmp,
1652 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1653 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1654 pDescriptor->aLines[uStart][cbKey] = '=';
1655 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1656 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1657 pDescriptor->aLines[i] += cbDiff;
1658
1659 /* Adjust starting line numbers of following descriptor sections. */
1660 if (uStart <= pDescriptor->uFirstExtent)
1661 pDescriptor->uFirstExtent++;
1662 if (uStart <= pDescriptor->uFirstDDB)
1663 pDescriptor->uFirstDDB++;
1664 }
1665 pDescriptor->fDirty = true;
1666 return VINF_SUCCESS;
1667}
1668
1669static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1670 uint32_t *puValue)
1671{
1672 const char *pszValue;
1673
1674 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1675 &pszValue))
1676 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1677 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1678}
1679
1680static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1681 const char *pszKey, const char **ppszValue)
1682{
1683 const char *pszValue;
1684 char *pszValueUnquoted;
1685
1686 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1687 &pszValue))
1688 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1689 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1690 if (RT_FAILURE(rc))
1691 return rc;
1692 *ppszValue = pszValueUnquoted;
1693 return rc;
1694}
1695
1696static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1697 const char *pszKey, const char *pszValue)
1698{
1699 char *pszValueQuoted;
1700
1701 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1702 if (RT_FAILURE(rc))
1703 return rc;
1704 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1705 pszValueQuoted);
1706 RTStrFree(pszValueQuoted);
1707 return rc;
1708}
1709
1710static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1711 PVMDKDESCRIPTOR pDescriptor)
1712{
1713 unsigned uEntry = pDescriptor->uFirstExtent;
1714 ssize_t cbDiff;
1715
1716 if (!uEntry)
1717 return;
1718
1719 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1720 /* Move everything including \0 in the entry marking the end of buffer. */
1721 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1722 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1723 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1724 {
1725 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1726 if (pDescriptor->aNextLines[i])
1727 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1728 else
1729 pDescriptor->aNextLines[i - 1] = 0;
1730 }
1731 pDescriptor->cLines--;
1732 if (pDescriptor->uFirstDDB)
1733 pDescriptor->uFirstDDB--;
1734
1735 return;
1736}
1737
1738static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1739 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1740 VMDKETYPE enmType, const char *pszBasename,
1741 uint64_t uSectorOffset)
1742{
1743 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1744 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1745 char *pszTmp;
1746 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1747 char szExt[1024];
1748 ssize_t cbDiff;
1749
1750 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1751 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1752
1753 /* Find last entry in extent description. */
1754 while (uStart)
1755 {
1756 if (!pDescriptor->aNextLines[uStart])
1757 uLast = uStart;
1758 uStart = pDescriptor->aNextLines[uStart];
1759 }
1760
1761 if (enmType == VMDKETYPE_ZERO)
1762 {
1763 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1764 cNominalSectors, apszType[enmType]);
1765 }
1766 else if (enmType == VMDKETYPE_FLAT)
1767 {
1768 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1769 apszAccess[enmAccess], cNominalSectors,
1770 apszType[enmType], pszBasename, uSectorOffset);
1771 }
1772 else
1773 {
1774 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1775 apszAccess[enmAccess], cNominalSectors,
1776 apszType[enmType], pszBasename);
1777 }
1778 cbDiff = strlen(szExt) + 1;
1779
1780 /* Check for buffer overflow. */
1781 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1782 || ( pDescriptor->aLines[pDescriptor->cLines]
1783 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1784 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1785
1786 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1787 {
1788 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1789 if (pDescriptor->aNextLines[i - 1])
1790 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1791 else
1792 pDescriptor->aNextLines[i] = 0;
1793 }
1794 uStart = uLast + 1;
1795 pDescriptor->aNextLines[uLast] = uStart;
1796 pDescriptor->aNextLines[uStart] = 0;
1797 pDescriptor->cLines++;
1798 pszTmp = pDescriptor->aLines[uStart];
1799 memmove(pszTmp + cbDiff, pszTmp,
1800 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1801 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1802 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1803 pDescriptor->aLines[i] += cbDiff;
1804
1805 /* Adjust starting line numbers of following descriptor sections. */
1806 if (uStart <= pDescriptor->uFirstDDB)
1807 pDescriptor->uFirstDDB++;
1808
1809 pDescriptor->fDirty = true;
1810 return VINF_SUCCESS;
1811}
1812
1813static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1814 const char *pszKey, const char **ppszValue)
1815{
1816 const char *pszValue;
1817 char *pszValueUnquoted;
1818
1819 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1820 &pszValue))
1821 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1822 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1823 if (RT_FAILURE(rc))
1824 return rc;
1825 *ppszValue = pszValueUnquoted;
1826 return rc;
1827}
1828
1829static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1830 const char *pszKey, uint32_t *puValue)
1831{
1832 const char *pszValue;
1833 char *pszValueUnquoted;
1834
1835 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1836 &pszValue))
1837 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1838 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1839 if (RT_FAILURE(rc))
1840 return rc;
1841 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1842 RTMemTmpFree(pszValueUnquoted);
1843 return rc;
1844}
1845
1846static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1847 const char *pszKey, PRTUUID pUuid)
1848{
1849 const char *pszValue;
1850 char *pszValueUnquoted;
1851
1852 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1853 &pszValue))
1854 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1855 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1856 if (RT_FAILURE(rc))
1857 return rc;
1858 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1859 RTMemTmpFree(pszValueUnquoted);
1860 return rc;
1861}
1862
1863static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1864 const char *pszKey, const char *pszVal)
1865{
1866 int rc;
1867 char *pszValQuoted;
1868
1869 if (pszVal)
1870 {
1871 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1872 if (RT_FAILURE(rc))
1873 return rc;
1874 }
1875 else
1876 pszValQuoted = NULL;
1877 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1878 pszValQuoted);
1879 if (pszValQuoted)
1880 RTStrFree(pszValQuoted);
1881 return rc;
1882}
1883
1884static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1885 const char *pszKey, PCRTUUID pUuid)
1886{
1887 char *pszUuid;
1888
1889 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1890 if (RT_FAILURE(rc))
1891 return rc;
1892 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1893 pszUuid);
1894 RTStrFree(pszUuid);
1895 return rc;
1896}
1897
1898static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1899 const char *pszKey, uint32_t uValue)
1900{
1901 char *pszValue;
1902
1903 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1904 if (RT_FAILURE(rc))
1905 return rc;
1906 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1907 pszValue);
1908 RTStrFree(pszValue);
1909 return rc;
1910}
1911
1912static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1913 size_t cbDescData,
1914 PVMDKDESCRIPTOR pDescriptor)
1915{
1916 int rc = VINF_SUCCESS;
1917 unsigned cLine = 0, uLastNonEmptyLine = 0;
1918 char *pTmp = pDescData;
1919
1920 pDescriptor->cbDescAlloc = cbDescData;
1921 while (*pTmp != '\0')
1922 {
1923 pDescriptor->aLines[cLine++] = pTmp;
1924 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1925 {
1926 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1927 goto out;
1928 }
1929
1930 while (*pTmp != '\0' && *pTmp != '\n')
1931 {
1932 if (*pTmp == '\r')
1933 {
1934 if (*(pTmp + 1) != '\n')
1935 {
1936 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1937 goto out;
1938 }
1939 else
1940 {
1941 /* Get rid of CR character. */
1942 *pTmp = '\0';
1943 }
1944 }
1945 pTmp++;
1946 }
1947 /* Get rid of LF character. */
1948 if (*pTmp == '\n')
1949 {
1950 *pTmp = '\0';
1951 pTmp++;
1952 }
1953 }
1954 pDescriptor->cLines = cLine;
1955 /* Pointer right after the end of the used part of the buffer. */
1956 pDescriptor->aLines[cLine] = pTmp;
1957
1958 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1959 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1960 {
1961 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1962 goto out;
1963 }
1964
1965 /* Initialize those, because we need to be able to reopen an image. */
1966 pDescriptor->uFirstDesc = 0;
1967 pDescriptor->uFirstExtent = 0;
1968 pDescriptor->uFirstDDB = 0;
1969 for (unsigned i = 0; i < cLine; i++)
1970 {
1971 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1972 {
1973 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1974 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1975 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1976 {
1977 /* An extent descriptor. */
1978 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1979 {
1980 /* Incorrect ordering of entries. */
1981 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1982 goto out;
1983 }
1984 if (!pDescriptor->uFirstExtent)
1985 {
1986 pDescriptor->uFirstExtent = i;
1987 uLastNonEmptyLine = 0;
1988 }
1989 }
1990 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1991 {
1992 /* A disk database entry. */
1993 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1994 {
1995 /* Incorrect ordering of entries. */
1996 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1997 goto out;
1998 }
1999 if (!pDescriptor->uFirstDDB)
2000 {
2001 pDescriptor->uFirstDDB = i;
2002 uLastNonEmptyLine = 0;
2003 }
2004 }
2005 else
2006 {
2007 /* A normal entry. */
2008 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2009 {
2010 /* Incorrect ordering of entries. */
2011 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2012 goto out;
2013 }
2014 if (!pDescriptor->uFirstDesc)
2015 {
2016 pDescriptor->uFirstDesc = i;
2017 uLastNonEmptyLine = 0;
2018 }
2019 }
2020 if (uLastNonEmptyLine)
2021 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2022 uLastNonEmptyLine = i;
2023 }
2024 }
2025
2026out:
2027 return rc;
2028}
2029
2030static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2031 PCPDMMEDIAGEOMETRY pPCHSGeometry)
2032{
2033 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2034 VMDK_DDB_GEO_PCHS_CYLINDERS,
2035 pPCHSGeometry->cCylinders);
2036 if (RT_FAILURE(rc))
2037 return rc;
2038 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2039 VMDK_DDB_GEO_PCHS_HEADS,
2040 pPCHSGeometry->cHeads);
2041 if (RT_FAILURE(rc))
2042 return rc;
2043 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2044 VMDK_DDB_GEO_PCHS_SECTORS,
2045 pPCHSGeometry->cSectors);
2046 return rc;
2047}
2048
2049static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2050 PCPDMMEDIAGEOMETRY pLCHSGeometry)
2051{
2052 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2053 VMDK_DDB_GEO_LCHS_CYLINDERS,
2054 pLCHSGeometry->cCylinders);
2055 if (RT_FAILURE(rc))
2056 return rc;
2057 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2058 VMDK_DDB_GEO_LCHS_HEADS,
2059 pLCHSGeometry->cHeads);
2060 if (RT_FAILURE(rc))
2061 return rc;
2062 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2063 VMDK_DDB_GEO_LCHS_SECTORS,
2064 pLCHSGeometry->cSectors);
2065 return rc;
2066}
2067
2068static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2069 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2070{
2071 int rc;
2072
2073 pDescriptor->uFirstDesc = 0;
2074 pDescriptor->uFirstExtent = 0;
2075 pDescriptor->uFirstDDB = 0;
2076 pDescriptor->cLines = 0;
2077 pDescriptor->cbDescAlloc = cbDescData;
2078 pDescriptor->fDirty = false;
2079 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2080 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2081
2082 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2083 if (RT_FAILURE(rc))
2084 goto out;
2085 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2086 if (RT_FAILURE(rc))
2087 goto out;
2088 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2089 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2090 if (RT_FAILURE(rc))
2091 goto out;
2092 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2093 if (RT_FAILURE(rc))
2094 goto out;
2095 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2096 if (RT_FAILURE(rc))
2097 goto out;
2098 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2099 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2100 if (RT_FAILURE(rc))
2101 goto out;
2102 /* The trailing space is created by VMware, too. */
2103 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2104 if (RT_FAILURE(rc))
2105 goto out;
2106 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2107 if (RT_FAILURE(rc))
2108 goto out;
2109 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2110 if (RT_FAILURE(rc))
2111 goto out;
2112 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2113 if (RT_FAILURE(rc))
2114 goto out;
2115 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2116
2117 /* Now that the framework is in place, use the normal functions to insert
2118 * the remaining keys. */
2119 char szBuf[9];
2120 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2121 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2122 "CID", szBuf);
2123 if (RT_FAILURE(rc))
2124 goto out;
2125 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2126 "parentCID", "ffffffff");
2127 if (RT_FAILURE(rc))
2128 goto out;
2129
2130 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2131 if (RT_FAILURE(rc))
2132 goto out;
2133
2134out:
2135 return rc;
2136}
2137
2138static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2139 size_t cbDescData)
2140{
2141 int rc;
2142 unsigned cExtents;
2143 unsigned uLine;
2144 unsigned i;
2145
2146 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2147 &pImage->Descriptor);
2148 if (RT_FAILURE(rc))
2149 return rc;
2150
2151 /* Check version, must be 1. */
2152 uint32_t uVersion;
2153 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2154 if (RT_FAILURE(rc))
2155 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2156 if (uVersion != 1)
2157 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2158
2159 /* Get image creation type and determine image flags. */
2160 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2161 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2162 &pszCreateType);
2163 if (RT_FAILURE(rc))
2164 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2165 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2166 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2167 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2168 else if ( !strcmp(pszCreateType, "partitionedDevice")
2169 || !strcmp(pszCreateType, "fullDevice"))
2170 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2171 else if (!strcmp(pszCreateType, "streamOptimized"))
2172 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2173 else if (!strcmp(pszCreateType, "vmfs"))
2174 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2175 RTStrFree((char *)(void *)pszCreateType);
2176
2177 /* Count the number of extent config entries. */
2178 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2179 uLine != 0;
2180 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2181 /* nothing */;
2182
2183 if (!pImage->pDescData && cExtents != 1)
2184 {
2185 /* Monolithic image, must have only one extent (already opened). */
2186 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2187 }
2188
2189 if (pImage->pDescData)
2190 {
2191 /* Non-monolithic image, extents need to be allocated. */
2192 rc = vmdkCreateExtents(pImage, cExtents);
2193 if (RT_FAILURE(rc))
2194 return rc;
2195 }
2196
2197 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2198 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2199 {
2200 char *pszLine = pImage->Descriptor.aLines[uLine];
2201
2202 /* Access type of the extent. */
2203 if (!strncmp(pszLine, "RW", 2))
2204 {
2205 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2206 pszLine += 2;
2207 }
2208 else if (!strncmp(pszLine, "RDONLY", 6))
2209 {
2210 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2211 pszLine += 6;
2212 }
2213 else if (!strncmp(pszLine, "NOACCESS", 8))
2214 {
2215 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2216 pszLine += 8;
2217 }
2218 else
2219 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2220 if (*pszLine++ != ' ')
2221 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2222
2223 /* Nominal size of the extent. */
2224 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2225 &pImage->pExtents[i].cNominalSectors);
2226 if (RT_FAILURE(rc))
2227 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2228 if (*pszLine++ != ' ')
2229 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2230
2231 /* Type of the extent. */
2232#ifdef VBOX_WITH_VMDK_ESX
2233 /** @todo Add the ESX extent types. Not necessary for now because
2234 * the ESX extent types are only used inside an ESX server. They are
2235 * automatically converted if the VMDK is exported. */
2236#endif /* VBOX_WITH_VMDK_ESX */
2237 if (!strncmp(pszLine, "SPARSE", 6))
2238 {
2239 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2240 pszLine += 6;
2241 }
2242 else if (!strncmp(pszLine, "FLAT", 4))
2243 {
2244 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2245 pszLine += 4;
2246 }
2247 else if (!strncmp(pszLine, "ZERO", 4))
2248 {
2249 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2250 pszLine += 4;
2251 }
2252 else if (!strncmp(pszLine, "VMFS", 4))
2253 {
2254 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2255 pszLine += 4;
2256 }
2257 else
2258 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2259 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2260 {
2261 /* This one has no basename or offset. */
2262 if (*pszLine == ' ')
2263 pszLine++;
2264 if (*pszLine != '\0')
2265 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2266 pImage->pExtents[i].pszBasename = NULL;
2267 }
2268 else
2269 {
2270 /* All other extent types have basename and optional offset. */
2271 if (*pszLine++ != ' ')
2272 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2273
2274 /* Basename of the image. Surrounded by quotes. */
2275 char *pszBasename;
2276 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2277 if (RT_FAILURE(rc))
2278 return rc;
2279 pImage->pExtents[i].pszBasename = pszBasename;
2280 if (*pszLine == ' ')
2281 {
2282 pszLine++;
2283 if (*pszLine != '\0')
2284 {
2285 /* Optional offset in extent specified. */
2286 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2287 &pImage->pExtents[i].uSectorOffset);
2288 if (RT_FAILURE(rc))
2289 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2290 }
2291 }
2292
2293 if (*pszLine != '\0')
2294 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2295 }
2296 }
2297
2298 /* Determine PCHS geometry (autogenerate if necessary). */
2299 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2300 VMDK_DDB_GEO_PCHS_CYLINDERS,
2301 &pImage->PCHSGeometry.cCylinders);
2302 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2303 pImage->PCHSGeometry.cCylinders = 0;
2304 else if (RT_FAILURE(rc))
2305 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2306 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2307 VMDK_DDB_GEO_PCHS_HEADS,
2308 &pImage->PCHSGeometry.cHeads);
2309 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2310 pImage->PCHSGeometry.cHeads = 0;
2311 else if (RT_FAILURE(rc))
2312 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2313 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2314 VMDK_DDB_GEO_PCHS_SECTORS,
2315 &pImage->PCHSGeometry.cSectors);
2316 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2317 pImage->PCHSGeometry.cSectors = 0;
2318 else if (RT_FAILURE(rc))
2319 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2320 if ( pImage->PCHSGeometry.cCylinders == 0
2321 || pImage->PCHSGeometry.cHeads == 0
2322 || pImage->PCHSGeometry.cHeads > 16
2323 || pImage->PCHSGeometry.cSectors == 0
2324 || pImage->PCHSGeometry.cSectors > 63)
2325 {
2326 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2327 * as the total image size isn't known yet). */
2328 pImage->PCHSGeometry.cCylinders = 0;
2329 pImage->PCHSGeometry.cHeads = 16;
2330 pImage->PCHSGeometry.cSectors = 63;
2331 }
2332
2333 /* Determine LCHS geometry (set to 0 if not specified). */
2334 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2335 VMDK_DDB_GEO_LCHS_CYLINDERS,
2336 &pImage->LCHSGeometry.cCylinders);
2337 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2338 pImage->LCHSGeometry.cCylinders = 0;
2339 else if (RT_FAILURE(rc))
2340 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2341 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2342 VMDK_DDB_GEO_LCHS_HEADS,
2343 &pImage->LCHSGeometry.cHeads);
2344 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2345 pImage->LCHSGeometry.cHeads = 0;
2346 else if (RT_FAILURE(rc))
2347 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2348 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2349 VMDK_DDB_GEO_LCHS_SECTORS,
2350 &pImage->LCHSGeometry.cSectors);
2351 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2352 pImage->LCHSGeometry.cSectors = 0;
2353 else if (RT_FAILURE(rc))
2354 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2355 if ( pImage->LCHSGeometry.cCylinders == 0
2356 || pImage->LCHSGeometry.cHeads == 0
2357 || pImage->LCHSGeometry.cSectors == 0)
2358 {
2359 pImage->LCHSGeometry.cCylinders = 0;
2360 pImage->LCHSGeometry.cHeads = 0;
2361 pImage->LCHSGeometry.cSectors = 0;
2362 }
2363
2364 /* Get image UUID. */
2365 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2366 &pImage->ImageUuid);
2367 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2368 {
2369 /* Image without UUID. Probably created by VMware and not yet used
2370 * by VirtualBox. Can only be added for images opened in read/write
2371 * mode, so don't bother producing a sensible UUID otherwise. */
2372 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2373 RTUuidClear(&pImage->ImageUuid);
2374 else
2375 {
2376 rc = RTUuidCreate(&pImage->ImageUuid);
2377 if (RT_FAILURE(rc))
2378 return rc;
2379 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2380 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2381 if (RT_FAILURE(rc))
2382 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2383 }
2384 }
2385 else if (RT_FAILURE(rc))
2386 return rc;
2387
2388 /* Get image modification UUID. */
2389 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2390 VMDK_DDB_MODIFICATION_UUID,
2391 &pImage->ModificationUuid);
2392 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2393 {
2394 /* Image without UUID. Probably created by VMware and not yet used
2395 * by VirtualBox. Can only be added for images opened in read/write
2396 * mode, so don't bother producing a sensible UUID otherwise. */
2397 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2398 RTUuidClear(&pImage->ModificationUuid);
2399 else
2400 {
2401 rc = RTUuidCreate(&pImage->ModificationUuid);
2402 if (RT_FAILURE(rc))
2403 return rc;
2404 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2405 VMDK_DDB_MODIFICATION_UUID,
2406 &pImage->ModificationUuid);
2407 if (RT_FAILURE(rc))
2408 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2409 }
2410 }
2411 else if (RT_FAILURE(rc))
2412 return rc;
2413
2414 /* Get UUID of parent image. */
2415 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2416 &pImage->ParentUuid);
2417 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2418 {
2419 /* Image without UUID. Probably created by VMware and not yet used
2420 * by VirtualBox. Can only be added for images opened in read/write
2421 * mode, so don't bother producing a sensible UUID otherwise. */
2422 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2423 RTUuidClear(&pImage->ParentUuid);
2424 else
2425 {
2426 rc = RTUuidClear(&pImage->ParentUuid);
2427 if (RT_FAILURE(rc))
2428 return rc;
2429 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2430 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2431 if (RT_FAILURE(rc))
2432 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2433 }
2434 }
2435 else if (RT_FAILURE(rc))
2436 return rc;
2437
2438 /* Get parent image modification UUID. */
2439 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2440 VMDK_DDB_PARENT_MODIFICATION_UUID,
2441 &pImage->ParentModificationUuid);
2442 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2443 {
2444 /* Image without UUID. Probably created by VMware and not yet used
2445 * by VirtualBox. Can only be added for images opened in read/write
2446 * mode, so don't bother producing a sensible UUID otherwise. */
2447 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2448 RTUuidClear(&pImage->ParentModificationUuid);
2449 else
2450 {
2451 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2452 if (RT_FAILURE(rc))
2453 return rc;
2454 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2455 VMDK_DDB_PARENT_MODIFICATION_UUID,
2456 &pImage->ParentModificationUuid);
2457 if (RT_FAILURE(rc))
2458 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2459 }
2460 }
2461 else if (RT_FAILURE(rc))
2462 return rc;
2463
2464 return VINF_SUCCESS;
2465}
2466
2467/**
2468 * Internal: write/update the descriptor part of the image.
2469 */
2470static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2471{
2472 int rc = VINF_SUCCESS;
2473 uint64_t cbLimit;
2474 uint64_t uOffset;
2475 PVMDKFILE pDescFile;
2476
2477 if (pImage->pDescData)
2478 {
2479 /* Separate descriptor file. */
2480 uOffset = 0;
2481 cbLimit = 0;
2482 pDescFile = pImage->pFile;
2483 }
2484 else
2485 {
2486 /* Embedded descriptor file. */
2487 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2488 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2489 pDescFile = pImage->pExtents[0].pFile;
2490 }
2491 /* Bail out if there is no file to write to. */
2492 if (pDescFile == NULL)
2493 return VERR_INVALID_PARAMETER;
2494
2495 /*
2496 * Allocate temporary descriptor buffer.
2497 * In case there is no limit allocate a default
2498 * and increase if required.
2499 */
2500 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2501 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2502 unsigned offDescriptor = 0;
2503
2504 if (!pszDescriptor)
2505 return VERR_NO_MEMORY;
2506
2507 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2508 {
2509 const char *psz = pImage->Descriptor.aLines[i];
2510 size_t cb = strlen(psz);
2511
2512 /*
2513 * Increase the descriptor if there is no limit and
2514 * there is not enough room left for this line.
2515 */
2516 if (offDescriptor + cb + 1 > cbDescriptor)
2517 {
2518 if (cbLimit)
2519 {
2520 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2521 break;
2522 }
2523 else
2524 {
2525 char *pszDescriptorNew = NULL;
2526 LogFlow(("Increasing descriptor cache\n"));
2527
2528 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2529 if (!pszDescriptorNew)
2530 {
2531 rc = VERR_NO_MEMORY;
2532 break;
2533 }
2534 pszDescriptorNew = pszDescriptor;
2535 cbDescriptor += cb + 4 * _1K;
2536 }
2537 }
2538
2539 if (cb > 0)
2540 {
2541 memcpy(pszDescriptor + offDescriptor, psz, cb);
2542 offDescriptor += cb;
2543 }
2544
2545 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2546 offDescriptor++;
2547 }
2548
2549 if (RT_SUCCESS(rc))
2550 {
2551 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2552 if (RT_FAILURE(rc))
2553 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2554 }
2555
2556 if (RT_SUCCESS(rc) && !cbLimit)
2557 {
2558 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2559 if (RT_FAILURE(rc))
2560 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2561 }
2562
2563 if (RT_SUCCESS(rc))
2564 pImage->Descriptor.fDirty = false;
2565
2566 RTMemFree(pszDescriptor);
2567 return rc;
2568}
2569
2570/**
2571 * Internal: write/update the descriptor part of the image - async version.
2572 */
2573static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2574{
2575 int rc = VINF_SUCCESS;
2576 uint64_t cbLimit;
2577 uint64_t uOffset;
2578 PVMDKFILE pDescFile;
2579
2580 if (pImage->pDescData)
2581 {
2582 /* Separate descriptor file. */
2583 uOffset = 0;
2584 cbLimit = 0;
2585 pDescFile = pImage->pFile;
2586 }
2587 else
2588 {
2589 /* Embedded descriptor file. */
2590 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2591 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2592 pDescFile = pImage->pExtents[0].pFile;
2593 }
2594 /* Bail out if there is no file to write to. */
2595 if (pDescFile == NULL)
2596 return VERR_INVALID_PARAMETER;
2597
2598 /*
2599 * Allocate temporary descriptor buffer.
2600 * In case there is no limit allocate a default
2601 * and increase if required.
2602 */
2603 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2604 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2605 unsigned offDescriptor = 0;
2606
2607 if (!pszDescriptor)
2608 return VERR_NO_MEMORY;
2609
2610 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2611 {
2612 const char *psz = pImage->Descriptor.aLines[i];
2613 size_t cb = strlen(psz);
2614
2615 /*
2616 * Increase the descriptor if there is no limit and
2617 * there is not enough room left for this line.
2618 */
2619 if (offDescriptor + cb + 1 > cbDescriptor)
2620 {
2621 if (cbLimit)
2622 {
2623 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2624 break;
2625 }
2626 else
2627 {
2628 char *pszDescriptorNew = NULL;
2629 LogFlow(("Increasing descriptor cache\n"));
2630
2631 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2632 if (!pszDescriptorNew)
2633 {
2634 rc = VERR_NO_MEMORY;
2635 break;
2636 }
2637 pszDescriptorNew = pszDescriptor;
2638 cbDescriptor += cb + 4 * _1K;
2639 }
2640 }
2641
2642 if (cb > 0)
2643 {
2644 memcpy(pszDescriptor + offDescriptor, psz, cb);
2645 offDescriptor += cb;
2646 }
2647
2648 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2649 offDescriptor++;
2650 }
2651
2652 if (RT_SUCCESS(rc))
2653 {
2654 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2655 if (RT_FAILURE(rc))
2656 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2657 }
2658
2659 if (RT_SUCCESS(rc) && !cbLimit)
2660 {
2661 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2662 if (RT_FAILURE(rc))
2663 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2664 }
2665
2666 if (RT_SUCCESS(rc))
2667 pImage->Descriptor.fDirty = false;
2668
2669 RTMemFree(pszDescriptor);
2670 return rc;
2671}
2672
2673/**
2674 * Internal: validate the consistency check values in a binary header.
2675 */
2676static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2677{
2678 int rc = VINF_SUCCESS;
2679 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2680 {
2681 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2682 return rc;
2683 }
2684 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2685 {
2686 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2687 return rc;
2688 }
2689 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2690 && ( pHeader->singleEndLineChar != '\n'
2691 || pHeader->nonEndLineChar != ' '
2692 || pHeader->doubleEndLineChar1 != '\r'
2693 || pHeader->doubleEndLineChar2 != '\n') )
2694 {
2695 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2696 return rc;
2697 }
2698 return rc;
2699}
2700
2701/**
2702 * Internal: read metadata belonging to an extent with binary header, i.e.
2703 * as found in monolithic files.
2704 */
2705static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2706{
2707 SparseExtentHeader Header;
2708 uint64_t cSectorsPerGDE;
2709
2710 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2711 AssertRC(rc);
2712 if (RT_FAILURE(rc))
2713 {
2714 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2715 goto out;
2716 }
2717 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2718 if (RT_FAILURE(rc))
2719 goto out;
2720 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2721 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2722 {
2723 /* Read the footer, which isn't compressed and comes before the
2724 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2725 * VMware reality. Theory and practice have very little in common. */
2726 uint64_t cbSize;
2727 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2728 AssertRC(rc);
2729 if (RT_FAILURE(rc))
2730 {
2731 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2732 goto out;
2733 }
2734 cbSize = RT_ALIGN_64(cbSize, 512);
2735 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2736 AssertRC(rc);
2737 if (RT_FAILURE(rc))
2738 {
2739 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2740 goto out;
2741 }
2742 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2743 if (RT_FAILURE(rc))
2744 goto out;
2745 pExtent->fFooter = true;
2746 }
2747 pExtent->uVersion = RT_LE2H_U32(Header.version);
2748 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2749 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2750 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2751 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2752 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2753 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2754 {
2755 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2756 goto out;
2757 }
2758 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2759 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2760 {
2761 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2762 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2763 }
2764 else
2765 {
2766 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2767 pExtent->uSectorRGD = 0;
2768 }
2769 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2770 {
2771 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2772 goto out;
2773 }
2774 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2775 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2776 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2777 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2778 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2779 {
2780 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2781 goto out;
2782 }
2783 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2784 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2785
2786 /* Fix up the number of descriptor sectors, as some flat images have
2787 * really just one, and this causes failures when inserting the UUID
2788 * values and other extra information. */
2789 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2790 {
2791 /* Do it the easy way - just fix it for flat images which have no
2792 * other complicated metadata which needs space too. */
2793 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2794 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2795 pExtent->cDescriptorSectors = 4;
2796 }
2797
2798out:
2799 if (RT_FAILURE(rc))
2800 vmdkFreeExtentData(pImage, pExtent, false);
2801
2802 return rc;
2803}
2804
2805/**
2806 * Internal: read additional metadata belonging to an extent. For those
2807 * extents which have no additional metadata just verify the information.
2808 */
2809static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2810{
2811 int rc = VINF_SUCCESS;
2812 uint64_t cbExtentSize;
2813
2814 /* The image must be a multiple of a sector in size and contain the data
2815 * area (flat images only). If not, it means the image is at least
2816 * truncated, or even seriously garbled. */
2817 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2818 if (RT_FAILURE(rc))
2819 {
2820 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2821 goto out;
2822 }
2823/* disabled the size check again as there are too many too short vmdks out there */
2824#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2825 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2826 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2827 {
2828 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2829 goto out;
2830 }
2831#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2832 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2833 goto out;
2834
2835 /* The spec says that this must be a power of two and greater than 8,
2836 * but probably they meant not less than 8. */
2837 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2838 || pExtent->cSectorsPerGrain < 8)
2839 {
2840 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2841 goto out;
2842 }
2843
2844 /* This code requires that a grain table must hold a power of two multiple
2845 * of the number of entries per GT cache entry. */
2846 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2847 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2848 {
2849 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2850 goto out;
2851 }
2852
2853 rc = vmdkReadGrainDirectory(pExtent);
2854
2855out:
2856 if (RT_FAILURE(rc))
2857 vmdkFreeExtentData(pImage, pExtent, false);
2858
2859 return rc;
2860}
2861
2862/**
2863 * Internal: write/update the metadata for a sparse extent.
2864 */
2865static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2866{
2867 SparseExtentHeader Header;
2868
2869 memset(&Header, '\0', sizeof(Header));
2870 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2871 Header.version = RT_H2LE_U32(pExtent->uVersion);
2872 Header.flags = RT_H2LE_U32(RT_BIT(0));
2873 if (pExtent->pRGD)
2874 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2875 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2876 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2877 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2878 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2879 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2880 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2881 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2882 if (pExtent->fFooter && uOffset == 0)
2883 {
2884 if (pExtent->pRGD)
2885 {
2886 Assert(pExtent->uSectorRGD);
2887 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2888 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2889 }
2890 else
2891 {
2892 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2893 }
2894 }
2895 else
2896 {
2897 if (pExtent->pRGD)
2898 {
2899 Assert(pExtent->uSectorRGD);
2900 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2901 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2902 }
2903 else
2904 {
2905 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2906 }
2907 }
2908 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2909 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2910 Header.singleEndLineChar = '\n';
2911 Header.nonEndLineChar = ' ';
2912 Header.doubleEndLineChar1 = '\r';
2913 Header.doubleEndLineChar2 = '\n';
2914 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2915
2916 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2917 AssertRC(rc);
2918 if (RT_FAILURE(rc))
2919 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2920 return rc;
2921}
2922
2923/**
2924 * Internal: write/update the metadata for a sparse extent - async version.
2925 */
2926static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2927 uint64_t uOffset, PVDIOCTX pIoCtx)
2928{
2929 SparseExtentHeader Header;
2930
2931 memset(&Header, '\0', sizeof(Header));
2932 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2933 Header.version = RT_H2LE_U32(pExtent->uVersion);
2934 Header.flags = RT_H2LE_U32(RT_BIT(0));
2935 if (pExtent->pRGD)
2936 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2937 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2938 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2939 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2940 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2941 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2942 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2943 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2944 if (pExtent->fFooter && uOffset == 0)
2945 {
2946 if (pExtent->pRGD)
2947 {
2948 Assert(pExtent->uSectorRGD);
2949 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2950 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2951 }
2952 else
2953 {
2954 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2955 }
2956 }
2957 else
2958 {
2959 if (pExtent->pRGD)
2960 {
2961 Assert(pExtent->uSectorRGD);
2962 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2963 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2964 }
2965 else
2966 {
2967 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2968 }
2969 }
2970 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2971 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2972 Header.singleEndLineChar = '\n';
2973 Header.nonEndLineChar = ' ';
2974 Header.doubleEndLineChar1 = '\r';
2975 Header.doubleEndLineChar2 = '\n';
2976 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2977
2978 int rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pImage->pInterfaceIO->pvUser,
2979 pExtent->pFile->pStorage,
2980 uOffset, &Header, sizeof(Header),
2981 pIoCtx, NULL, NULL);
2982 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2983 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2984 return rc;
2985}
2986
2987#ifdef VBOX_WITH_VMDK_ESX
2988/**
2989 * Internal: unused code to read the metadata of a sparse ESX extent.
2990 *
2991 * Such extents never leave ESX server, so this isn't ever used.
2992 */
2993static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2994{
2995 COWDisk_Header Header;
2996 uint64_t cSectorsPerGDE;
2997
2998 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2999 AssertRC(rc);
3000 if (RT_FAILURE(rc))
3001 goto out;
3002 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
3003 || RT_LE2H_U32(Header.version) != 1
3004 || RT_LE2H_U32(Header.flags) != 3)
3005 {
3006 rc = VERR_VD_VMDK_INVALID_HEADER;
3007 goto out;
3008 }
3009 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
3010 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
3011 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
3012 /* The spec says that this must be between 1 sector and 1MB. This code
3013 * assumes it's a power of two, so check that requirement, too. */
3014 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
3015 || pExtent->cSectorsPerGrain == 0
3016 || pExtent->cSectorsPerGrain > 2048)
3017 {
3018 rc = VERR_VD_VMDK_INVALID_HEADER;
3019 goto out;
3020 }
3021 pExtent->uDescriptorSector = 0;
3022 pExtent->cDescriptorSectors = 0;
3023 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
3024 pExtent->uSectorRGD = 0;
3025 pExtent->cOverheadSectors = 0;
3026 pExtent->cGTEntries = 4096;
3027 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3028 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
3029 {
3030 rc = VERR_VD_VMDK_INVALID_HEADER;
3031 goto out;
3032 }
3033 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3034 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3035 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
3036 {
3037 /* Inconsistency detected. Computed number of GD entries doesn't match
3038 * stored value. Better be safe than sorry. */
3039 rc = VERR_VD_VMDK_INVALID_HEADER;
3040 goto out;
3041 }
3042 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
3043 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
3044
3045 rc = vmdkReadGrainDirectory(pExtent);
3046
3047out:
3048 if (RT_FAILURE(rc))
3049 vmdkFreeExtentData(pImage, pExtent, false);
3050
3051 return rc;
3052}
3053#endif /* VBOX_WITH_VMDK_ESX */
3054
3055/**
3056 * Internal: free the memory used by the extent data structure, optionally
3057 * deleting the referenced files.
3058 */
3059static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3060 bool fDelete)
3061{
3062 vmdkFreeGrainDirectory(pExtent);
3063 if (pExtent->pDescData)
3064 {
3065 RTMemFree(pExtent->pDescData);
3066 pExtent->pDescData = NULL;
3067 }
3068 if (pExtent->pFile != NULL)
3069 {
3070 /* Do not delete raw extents, these have full and base names equal. */
3071 vmdkFileClose(pImage, &pExtent->pFile,
3072 fDelete
3073 && pExtent->pszFullname
3074 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3075 }
3076 if (pExtent->pszBasename)
3077 {
3078 RTMemTmpFree((void *)pExtent->pszBasename);
3079 pExtent->pszBasename = NULL;
3080 }
3081 if (pExtent->pszFullname)
3082 {
3083 RTStrFree((char *)(void *)pExtent->pszFullname);
3084 pExtent->pszFullname = NULL;
3085 }
3086 if (pExtent->pvGrain)
3087 {
3088 RTMemFree(pExtent->pvGrain);
3089 pExtent->pvGrain = NULL;
3090 }
3091}
3092
3093/**
3094 * Internal: allocate grain table cache if necessary for this image.
3095 */
3096static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3097{
3098 PVMDKEXTENT pExtent;
3099
3100 /* Allocate grain table cache if any sparse extent is present. */
3101 for (unsigned i = 0; i < pImage->cExtents; i++)
3102 {
3103 pExtent = &pImage->pExtents[i];
3104 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3105#ifdef VBOX_WITH_VMDK_ESX
3106 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3107#endif /* VBOX_WITH_VMDK_ESX */
3108 )
3109 {
3110 /* Allocate grain table cache. */
3111 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3112 if (!pImage->pGTCache)
3113 return VERR_NO_MEMORY;
3114 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3115 {
3116 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3117 pGCE->uExtent = UINT32_MAX;
3118 }
3119 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3120 break;
3121 }
3122 }
3123
3124 return VINF_SUCCESS;
3125}
3126
3127/**
3128 * Internal: allocate the given number of extents.
3129 */
3130static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3131{
3132 int rc = VINF_SUCCESS;
3133 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3134 if (pImage)
3135 {
3136 for (unsigned i = 0; i < cExtents; i++)
3137 {
3138 pExtents[i].pFile = NULL;
3139 pExtents[i].pszBasename = NULL;
3140 pExtents[i].pszFullname = NULL;
3141 pExtents[i].pGD = NULL;
3142 pExtents[i].pRGD = NULL;
3143 pExtents[i].pDescData = NULL;
3144 pExtents[i].uVersion = 1;
3145 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3146 pExtents[i].uExtent = i;
3147 pExtents[i].pImage = pImage;
3148 }
3149 pImage->pExtents = pExtents;
3150 pImage->cExtents = cExtents;
3151 }
3152 else
3153 rc = VERR_NO_MEMORY;
3154
3155 return rc;
3156}
3157
3158/**
3159 * Internal: Open an image, constructing all necessary data structures.
3160 */
3161static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3162{
3163 int rc;
3164 uint32_t u32Magic;
3165 PVMDKFILE pFile;
3166 PVMDKEXTENT pExtent;
3167
3168 pImage->uOpenFlags = uOpenFlags;
3169
3170 /* Try to get error interface. */
3171 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3172 if (pImage->pInterfaceError)
3173 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3174
3175 /* Try to get async I/O interface. */
3176 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
3177 if (pImage->pInterfaceIO)
3178 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
3179
3180 /*
3181 * Open the image.
3182 * We don't have to check for asynchronous access because
3183 * we only support raw access and the opened file is a description
3184 * file were no data is stored.
3185 */
3186 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3187 uOpenFlags & VD_OPEN_FLAGS_READONLY
3188 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3189 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3190 if (RT_FAILURE(rc))
3191 {
3192 /* Do NOT signal an appropriate error here, as the VD layer has the
3193 * choice of retrying the open if it failed. */
3194 goto out;
3195 }
3196 pImage->pFile = pFile;
3197
3198 /* Read magic (if present). */
3199 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3200 if (RT_FAILURE(rc))
3201 {
3202 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3203 goto out;
3204 }
3205
3206 /* Handle the file according to its magic number. */
3207 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3208 {
3209 /* It's a hosted single-extent image. */
3210 rc = vmdkCreateExtents(pImage, 1);
3211 if (RT_FAILURE(rc))
3212 goto out;
3213 /* The opened file is passed to the extent. No separate descriptor
3214 * file, so no need to keep anything open for the image. */
3215 pExtent = &pImage->pExtents[0];
3216 pExtent->pFile = pFile;
3217 pImage->pFile = NULL;
3218 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3219 if (!pExtent->pszFullname)
3220 {
3221 rc = VERR_NO_MEMORY;
3222 goto out;
3223 }
3224 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3225 if (RT_FAILURE(rc))
3226 goto out;
3227
3228 /* As we're dealing with a monolithic image here, there must
3229 * be a descriptor embedded in the image file. */
3230 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3231 {
3232 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3233 goto out;
3234 }
3235 /* HACK: extend the descriptor if it is unusually small and it fits in
3236 * the unused space after the image header. Allows opening VMDK files
3237 * with extremely small descriptor in read/write mode. */
3238 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3239 && pExtent->cDescriptorSectors < 3
3240 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3241 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3242 {
3243 pExtent->cDescriptorSectors = 4;
3244 pExtent->fMetaDirty = true;
3245 }
3246 /* Read the descriptor from the extent. */
3247 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3248 if (!pExtent->pDescData)
3249 {
3250 rc = VERR_NO_MEMORY;
3251 goto out;
3252 }
3253 rc = vmdkFileReadAt(pExtent->pFile,
3254 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3255 pExtent->pDescData,
3256 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3257 AssertRC(rc);
3258 if (RT_FAILURE(rc))
3259 {
3260 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3261 goto out;
3262 }
3263
3264 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3265 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3266 if (RT_FAILURE(rc))
3267 goto out;
3268
3269 rc = vmdkReadMetaExtent(pImage, pExtent);
3270 if (RT_FAILURE(rc))
3271 goto out;
3272
3273 /* Mark the extent as unclean if opened in read-write mode. */
3274 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3275 {
3276 pExtent->fUncleanShutdown = true;
3277 pExtent->fMetaDirty = true;
3278 }
3279 }
3280 else
3281 {
3282 /* Allocate at least 10K, and make sure that there is 5K free space
3283 * in case new entries need to be added to the descriptor. Never
3284 * alocate more than 128K, because that's no valid descriptor file
3285 * and will result in the correct "truncated read" error handling. */
3286 uint64_t cbFileSize;
3287 rc = vmdkFileGetSize(pFile, &cbFileSize);
3288 if (RT_FAILURE(rc))
3289 goto out;
3290
3291 uint64_t cbSize = cbFileSize;
3292 if (cbSize % VMDK_SECTOR2BYTE(10))
3293 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3294 else
3295 cbSize += VMDK_SECTOR2BYTE(10);
3296 cbSize = RT_MIN(cbSize, _128K);
3297 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3298 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3299 if (!pImage->pDescData)
3300 {
3301 rc = VERR_NO_MEMORY;
3302 goto out;
3303 }
3304
3305 size_t cbRead;
3306 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3307 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3308 &cbRead);
3309 if (RT_FAILURE(rc))
3310 {
3311 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3312 goto out;
3313 }
3314 if (cbRead == pImage->cbDescAlloc)
3315 {
3316 /* Likely the read is truncated. Better fail a bit too early
3317 * (normally the descriptor is much smaller than our buffer). */
3318 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3319 goto out;
3320 }
3321
3322 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3323 pImage->cbDescAlloc);
3324 if (RT_FAILURE(rc))
3325 goto out;
3326
3327 /*
3328 * We have to check for the asynchronous open flag. The
3329 * extents are parsed and the type of all are known now.
3330 * Check if every extent is either FLAT or ZERO.
3331 */
3332 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3333 {
3334 unsigned cFlatExtents = 0;
3335
3336 for (unsigned i = 0; i < pImage->cExtents; i++)
3337 {
3338 pExtent = &pImage->pExtents[i];
3339
3340 if (( pExtent->enmType != VMDKETYPE_FLAT
3341 && pExtent->enmType != VMDKETYPE_ZERO
3342 && pExtent->enmType != VMDKETYPE_VMFS)
3343 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3344 {
3345 /*
3346 * Opened image contains at least one none flat or zero extent.
3347 * Return error but don't set error message as the caller
3348 * has the chance to open in non async I/O mode.
3349 */
3350 rc = VERR_NOT_SUPPORTED;
3351 goto out;
3352 }
3353 if (pExtent->enmType == VMDKETYPE_FLAT)
3354 cFlatExtents++;
3355 }
3356 }
3357
3358 for (unsigned i = 0; i < pImage->cExtents; i++)
3359 {
3360 pExtent = &pImage->pExtents[i];
3361
3362 if (pExtent->pszBasename)
3363 {
3364 /* Hack to figure out whether the specified name in the
3365 * extent descriptor is absolute. Doesn't always work, but
3366 * should be good enough for now. */
3367 char *pszFullname;
3368 /** @todo implement proper path absolute check. */
3369 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3370 {
3371 pszFullname = RTStrDup(pExtent->pszBasename);
3372 if (!pszFullname)
3373 {
3374 rc = VERR_NO_MEMORY;
3375 goto out;
3376 }
3377 }
3378 else
3379 {
3380 size_t cbDirname;
3381 char *pszDirname = RTStrDup(pImage->pszFilename);
3382 if (!pszDirname)
3383 {
3384 rc = VERR_NO_MEMORY;
3385 goto out;
3386 }
3387 RTPathStripFilename(pszDirname);
3388 cbDirname = strlen(pszDirname);
3389 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3390 RTPATH_SLASH, pExtent->pszBasename);
3391 RTStrFree(pszDirname);
3392 if (RT_FAILURE(rc))
3393 goto out;
3394 }
3395 pExtent->pszFullname = pszFullname;
3396 }
3397 else
3398 pExtent->pszFullname = NULL;
3399
3400 switch (pExtent->enmType)
3401 {
3402 case VMDKETYPE_HOSTED_SPARSE:
3403 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3404 uOpenFlags & VD_OPEN_FLAGS_READONLY
3405 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3406 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3407 if (RT_FAILURE(rc))
3408 {
3409 /* Do NOT signal an appropriate error here, as the VD
3410 * layer has the choice of retrying the open if it
3411 * failed. */
3412 goto out;
3413 }
3414 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3415 if (RT_FAILURE(rc))
3416 goto out;
3417 rc = vmdkReadMetaExtent(pImage, pExtent);
3418 if (RT_FAILURE(rc))
3419 goto out;
3420
3421 /* Mark extent as unclean if opened in read-write mode. */
3422 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3423 {
3424 pExtent->fUncleanShutdown = true;
3425 pExtent->fMetaDirty = true;
3426 }
3427 break;
3428 case VMDKETYPE_VMFS:
3429 case VMDKETYPE_FLAT:
3430 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3431 uOpenFlags & VD_OPEN_FLAGS_READONLY
3432 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3433 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3434 if (RT_FAILURE(rc))
3435 {
3436 /* Do NOT signal an appropriate error here, as the VD
3437 * layer has the choice of retrying the open if it
3438 * failed. */
3439 goto out;
3440 }
3441 break;
3442 case VMDKETYPE_ZERO:
3443 /* Nothing to do. */
3444 break;
3445 default:
3446 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3447 }
3448 }
3449 }
3450
3451 /* Make sure this is not reached accidentally with an error status. */
3452 AssertRC(rc);
3453
3454 /* Determine PCHS geometry if not set. */
3455 if (pImage->PCHSGeometry.cCylinders == 0)
3456 {
3457 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3458 / pImage->PCHSGeometry.cHeads
3459 / pImage->PCHSGeometry.cSectors;
3460 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3461 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3462 {
3463 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3464 AssertRC(rc);
3465 }
3466 }
3467
3468 /* Update the image metadata now in case has changed. */
3469 rc = vmdkFlushImage(pImage);
3470 if (RT_FAILURE(rc))
3471 goto out;
3472
3473 /* Figure out a few per-image constants from the extents. */
3474 pImage->cbSize = 0;
3475 for (unsigned i = 0; i < pImage->cExtents; i++)
3476 {
3477 pExtent = &pImage->pExtents[i];
3478 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3479#ifdef VBOX_WITH_VMDK_ESX
3480 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3481#endif /* VBOX_WITH_VMDK_ESX */
3482 )
3483 {
3484 /* Here used to be a check whether the nominal size of an extent
3485 * is a multiple of the grain size. The spec says that this is
3486 * always the case, but unfortunately some files out there in the
3487 * wild violate the spec (e.g. ReactOS 0.3.1). */
3488 }
3489 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3490 }
3491
3492 for (unsigned i = 0; i < pImage->cExtents; i++)
3493 {
3494 pExtent = &pImage->pExtents[i];
3495 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3496 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3497 {
3498 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3499 break;
3500 }
3501 }
3502
3503 rc = vmdkAllocateGrainTableCache(pImage);
3504 if (RT_FAILURE(rc))
3505 goto out;
3506
3507out:
3508 if (RT_FAILURE(rc))
3509 vmdkFreeImage(pImage, false);
3510 return rc;
3511}
3512
3513/**
3514 * Internal: create VMDK images for raw disk/partition access.
3515 */
3516static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3517 uint64_t cbSize)
3518{
3519 int rc = VINF_SUCCESS;
3520 PVMDKEXTENT pExtent;
3521
3522 if (pRaw->fRawDisk)
3523 {
3524 /* Full raw disk access. This requires setting up a descriptor
3525 * file and open the (flat) raw disk. */
3526 rc = vmdkCreateExtents(pImage, 1);
3527 if (RT_FAILURE(rc))
3528 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3529 pExtent = &pImage->pExtents[0];
3530 /* Create raw disk descriptor file. */
3531 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3532 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3533 false);
3534 if (RT_FAILURE(rc))
3535 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3536
3537 /* Set up basename for extent description. Cannot use StrDup. */
3538 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3539 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3540 if (!pszBasename)
3541 return VERR_NO_MEMORY;
3542 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3543 pExtent->pszBasename = pszBasename;
3544 /* For raw disks the full name is identical to the base name. */
3545 pExtent->pszFullname = RTStrDup(pszBasename);
3546 if (!pExtent->pszFullname)
3547 return VERR_NO_MEMORY;
3548 pExtent->enmType = VMDKETYPE_FLAT;
3549 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3550 pExtent->uSectorOffset = 0;
3551 pExtent->enmAccess = VMDKACCESS_READWRITE;
3552 pExtent->fMetaDirty = false;
3553
3554 /* Open flat image, the raw disk. */
3555 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3556 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3557 if (RT_FAILURE(rc))
3558 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3559 }
3560 else
3561 {
3562 /* Raw partition access. This requires setting up a descriptor
3563 * file, write the partition information to a flat extent and
3564 * open all the (flat) raw disk partitions. */
3565
3566 /* First pass over the partition data areas to determine how many
3567 * extents we need. One data area can require up to 2 extents, as
3568 * it might be necessary to skip over unpartitioned space. */
3569 unsigned cExtents = 0;
3570 uint64_t uStart = 0;
3571 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3572 {
3573 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3574 if (uStart > pPart->uStart)
3575 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3576
3577 if (uStart < pPart->uStart)
3578 cExtents++;
3579 uStart = pPart->uStart + pPart->cbData;
3580 cExtents++;
3581 }
3582 /* Another extent for filling up the rest of the image. */
3583 if (uStart != cbSize)
3584 cExtents++;
3585
3586 rc = vmdkCreateExtents(pImage, cExtents);
3587 if (RT_FAILURE(rc))
3588 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3589
3590 /* Create raw partition descriptor file. */
3591 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3592 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3593 false);
3594 if (RT_FAILURE(rc))
3595 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3596
3597 /* Create base filename for the partition table extent. */
3598 /** @todo remove fixed buffer without creating memory leaks. */
3599 char pszPartition[1024];
3600 const char *pszBase = RTPathFilename(pImage->pszFilename);
3601 const char *pszExt = RTPathExt(pszBase);
3602 if (pszExt == NULL)
3603 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3604 char *pszBaseBase = RTStrDup(pszBase);
3605 if (!pszBaseBase)
3606 return VERR_NO_MEMORY;
3607 RTPathStripExt(pszBaseBase);
3608 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3609 pszBaseBase, pszExt);
3610 RTStrFree(pszBaseBase);
3611
3612 /* Second pass over the partitions, now define all extents. */
3613 uint64_t uPartOffset = 0;
3614 cExtents = 0;
3615 uStart = 0;
3616 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3617 {
3618 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3619 pExtent = &pImage->pExtents[cExtents++];
3620
3621 if (uStart < pPart->uStart)
3622 {
3623 pExtent->pszBasename = NULL;
3624 pExtent->pszFullname = NULL;
3625 pExtent->enmType = VMDKETYPE_ZERO;
3626 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3627 pExtent->uSectorOffset = 0;
3628 pExtent->enmAccess = VMDKACCESS_READWRITE;
3629 pExtent->fMetaDirty = false;
3630 /* go to next extent */
3631 pExtent = &pImage->pExtents[cExtents++];
3632 }
3633 uStart = pPart->uStart + pPart->cbData;
3634
3635 if (pPart->pvPartitionData)
3636 {
3637 /* Set up basename for extent description. Can't use StrDup. */
3638 size_t cbBasename = strlen(pszPartition) + 1;
3639 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3640 if (!pszBasename)
3641 return VERR_NO_MEMORY;
3642 memcpy(pszBasename, pszPartition, cbBasename);
3643 pExtent->pszBasename = pszBasename;
3644
3645 /* Set up full name for partition extent. */
3646 size_t cbDirname;
3647 char *pszDirname = RTStrDup(pImage->pszFilename);
3648 if (!pszDirname)
3649 return VERR_NO_MEMORY;
3650 RTPathStripFilename(pszDirname);
3651 cbDirname = strlen(pszDirname);
3652 char *pszFullname;
3653 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3654 RTPATH_SLASH, pExtent->pszBasename);
3655 RTStrFree(pszDirname);
3656 if (RT_FAILURE(rc))
3657 return rc;
3658 pExtent->pszFullname = pszFullname;
3659 pExtent->enmType = VMDKETYPE_FLAT;
3660 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3661 pExtent->uSectorOffset = uPartOffset;
3662 pExtent->enmAccess = VMDKACCESS_READWRITE;
3663 pExtent->fMetaDirty = false;
3664
3665 /* Create partition table flat image. */
3666 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3667 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3668 false);
3669 if (RT_FAILURE(rc))
3670 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3671 rc = vmdkFileWriteAt(pExtent->pFile,
3672 VMDK_SECTOR2BYTE(uPartOffset),
3673 pPart->pvPartitionData,
3674 pPart->cbData, NULL);
3675 if (RT_FAILURE(rc))
3676 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3677 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3678 }
3679 else
3680 {
3681 if (pPart->pszRawDevice)
3682 {
3683 /* Set up basename for extent descr. Can't use StrDup. */
3684 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3685 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3686 if (!pszBasename)
3687 return VERR_NO_MEMORY;
3688 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3689 pExtent->pszBasename = pszBasename;
3690 /* For raw disks full name is identical to base name. */
3691 pExtent->pszFullname = RTStrDup(pszBasename);
3692 if (!pExtent->pszFullname)
3693 return VERR_NO_MEMORY;
3694 pExtent->enmType = VMDKETYPE_FLAT;
3695 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3696 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3697 pExtent->enmAccess = VMDKACCESS_READWRITE;
3698 pExtent->fMetaDirty = false;
3699
3700 /* Open flat image, the raw partition. */
3701 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3702 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3703 false);
3704 if (RT_FAILURE(rc))
3705 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3706 }
3707 else
3708 {
3709 pExtent->pszBasename = NULL;
3710 pExtent->pszFullname = NULL;
3711 pExtent->enmType = VMDKETYPE_ZERO;
3712 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3713 pExtent->uSectorOffset = 0;
3714 pExtent->enmAccess = VMDKACCESS_READWRITE;
3715 pExtent->fMetaDirty = false;
3716 }
3717 }
3718 }
3719 /* Another extent for filling up the rest of the image. */
3720 if (uStart != cbSize)
3721 {
3722 pExtent = &pImage->pExtents[cExtents++];
3723 pExtent->pszBasename = NULL;
3724 pExtent->pszFullname = NULL;
3725 pExtent->enmType = VMDKETYPE_ZERO;
3726 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3727 pExtent->uSectorOffset = 0;
3728 pExtent->enmAccess = VMDKACCESS_READWRITE;
3729 pExtent->fMetaDirty = false;
3730 }
3731 }
3732
3733 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3734 pRaw->fRawDisk ?
3735 "fullDevice" : "partitionedDevice");
3736 if (RT_FAILURE(rc))
3737 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3738 return rc;
3739}
3740
3741/**
3742 * Internal: create a regular (i.e. file-backed) VMDK image.
3743 */
3744static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3745 unsigned uImageFlags,
3746 PFNVDPROGRESS pfnProgress, void *pvUser,
3747 unsigned uPercentStart, unsigned uPercentSpan)
3748{
3749 int rc = VINF_SUCCESS;
3750 unsigned cExtents = 1;
3751 uint64_t cbOffset = 0;
3752 uint64_t cbRemaining = cbSize;
3753
3754 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3755 {
3756 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3757 /* Do proper extent computation: need one smaller extent if the total
3758 * size isn't evenly divisible by the split size. */
3759 if (cbSize % VMDK_2G_SPLIT_SIZE)
3760 cExtents++;
3761 }
3762 rc = vmdkCreateExtents(pImage, cExtents);
3763 if (RT_FAILURE(rc))
3764 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3765
3766 /* Basename strings needed for constructing the extent names. */
3767 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3768 AssertPtr(pszBasenameSubstr);
3769 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3770
3771 /* Create searate descriptor file if necessary. */
3772 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3773 {
3774 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3775 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3776 false);
3777 if (RT_FAILURE(rc))
3778 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3779 }
3780 else
3781 pImage->pFile = NULL;
3782
3783 /* Set up all extents. */
3784 for (unsigned i = 0; i < cExtents; i++)
3785 {
3786 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3787 uint64_t cbExtent = cbRemaining;
3788
3789 /* Set up fullname/basename for extent description. Cannot use StrDup
3790 * for basename, as it is not guaranteed that the memory can be freed
3791 * with RTMemTmpFree, which must be used as in other code paths
3792 * StrDup is not usable. */
3793 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3794 {
3795 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3796 if (!pszBasename)
3797 return VERR_NO_MEMORY;
3798 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3799 pExtent->pszBasename = pszBasename;
3800 }
3801 else
3802 {
3803 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3804 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3805 RTPathStripExt(pszBasenameBase);
3806 char *pszTmp;
3807 size_t cbTmp;
3808 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3809 {
3810 if (cExtents == 1)
3811 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3812 pszBasenameExt);
3813 else
3814 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3815 i+1, pszBasenameExt);
3816 }
3817 else
3818 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3819 pszBasenameExt);
3820 RTStrFree(pszBasenameBase);
3821 if (RT_FAILURE(rc))
3822 return rc;
3823 cbTmp = strlen(pszTmp) + 1;
3824 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3825 if (!pszBasename)
3826 return VERR_NO_MEMORY;
3827 memcpy(pszBasename, pszTmp, cbTmp);
3828 RTStrFree(pszTmp);
3829 pExtent->pszBasename = pszBasename;
3830 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3831 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3832 }
3833 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3834 RTPathStripFilename(pszBasedirectory);
3835 char *pszFullname;
3836 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3837 RTPATH_SLASH, pExtent->pszBasename);
3838 RTStrFree(pszBasedirectory);
3839 if (RT_FAILURE(rc))
3840 return rc;
3841 pExtent->pszFullname = pszFullname;
3842
3843 /* Create file for extent. */
3844 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3845 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3846 false);
3847 if (RT_FAILURE(rc))
3848 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3849 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3850 {
3851 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3852 if (RT_FAILURE(rc))
3853 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3854
3855 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3856 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3857 * file and the guest could complain about an ATA timeout. */
3858
3859 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3860 * Currently supported file systems are ext4 and ocfs2. */
3861
3862 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3863 const size_t cbBuf = 128 * _1K;
3864 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3865 if (!pvBuf)
3866 return VERR_NO_MEMORY;
3867
3868 uint64_t uOff = 0;
3869 /* Write data to all image blocks. */
3870 while (uOff < cbExtent)
3871 {
3872 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3873
3874 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3875 if (RT_FAILURE(rc))
3876 {
3877 RTMemFree(pvBuf);
3878 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3879 }
3880
3881 uOff += cbChunk;
3882
3883 if (pfnProgress)
3884 {
3885 rc = pfnProgress(pvUser,
3886 uPercentStart + uOff * uPercentSpan / cbExtent);
3887 if (RT_FAILURE(rc))
3888 {
3889 RTMemFree(pvBuf);
3890 return rc;
3891 }
3892 }
3893 }
3894 RTMemTmpFree(pvBuf);
3895 }
3896
3897 /* Place descriptor file information (where integrated). */
3898 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3899 {
3900 pExtent->uDescriptorSector = 1;
3901 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3902 /* The descriptor is part of the (only) extent. */
3903 pExtent->pDescData = pImage->pDescData;
3904 pImage->pDescData = NULL;
3905 }
3906
3907 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3908 {
3909 uint64_t cSectorsPerGDE, cSectorsPerGD;
3910 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3911 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3912 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3913 pExtent->cGTEntries = 512;
3914 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3915 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3916 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3917 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3918 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3919 {
3920 /* The spec says version is 1 for all VMDKs, but the vast
3921 * majority of streamOptimized VMDKs actually contain
3922 * version 3 - so go with the majority. Both are acepted. */
3923 pExtent->uVersion = 3;
3924 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3925 }
3926 }
3927 else
3928 {
3929 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3930 pExtent->enmType = VMDKETYPE_VMFS;
3931 else
3932 pExtent->enmType = VMDKETYPE_FLAT;
3933 }
3934
3935 pExtent->enmAccess = VMDKACCESS_READWRITE;
3936 pExtent->fUncleanShutdown = true;
3937 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3938 pExtent->uSectorOffset = 0;
3939 pExtent->fMetaDirty = true;
3940
3941 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3942 {
3943 /* fPreAlloc should never be false because VMware can't use such images. */
3944 rc = vmdkCreateGrainDirectory(pExtent,
3945 RT_MAX( pExtent->uDescriptorSector
3946 + pExtent->cDescriptorSectors,
3947 1),
3948 true /* fPreAlloc */);
3949 if (RT_FAILURE(rc))
3950 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3951 }
3952
3953 if (RT_SUCCESS(rc) && pfnProgress)
3954 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
3955
3956 cbRemaining -= cbExtent;
3957 cbOffset += cbExtent;
3958 }
3959
3960 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3961 {
3962 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3963 * controller type is set in an image. */
3964 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3965 if (RT_FAILURE(rc))
3966 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3967 }
3968
3969 const char *pszDescType = NULL;
3970 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3971 {
3972 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3973 pszDescType = "vmfs";
3974 else
3975 pszDescType = (cExtents == 1)
3976 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3977 }
3978 else
3979 {
3980 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3981 pszDescType = "streamOptimized";
3982 else
3983 {
3984 pszDescType = (cExtents == 1)
3985 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3986 }
3987 }
3988 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3989 pszDescType);
3990 if (RT_FAILURE(rc))
3991 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3992 return rc;
3993}
3994
3995/**
3996 * Internal: The actual code for creating any VMDK variant currently in
3997 * existence on hosted environments.
3998 */
3999static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4000 unsigned uImageFlags, const char *pszComment,
4001 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4002 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4003 PFNVDPROGRESS pfnProgress, void *pvUser,
4004 unsigned uPercentStart, unsigned uPercentSpan)
4005{
4006 int rc;
4007
4008 pImage->uImageFlags = uImageFlags;
4009
4010 /* Try to get error interface. */
4011 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4012 if (pImage->pInterfaceError)
4013 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4014
4015 /* Try to get async I/O interface. */
4016 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
4017 if (pImage->pInterfaceIO)
4018 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
4019
4020 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4021 &pImage->Descriptor);
4022 if (RT_FAILURE(rc))
4023 {
4024 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4025 goto out;
4026 }
4027
4028 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4029 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4030 {
4031 /* Raw disk image (includes raw partition). */
4032 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4033 /* As the comment is misused, zap it so that no garbage comment
4034 * is set below. */
4035 pszComment = NULL;
4036 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4037 }
4038 else
4039 {
4040 /* Regular fixed or sparse image (monolithic or split). */
4041 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4042 pfnProgress, pvUser, uPercentStart,
4043 uPercentSpan * 95 / 100);
4044 }
4045
4046 if (RT_FAILURE(rc))
4047 goto out;
4048
4049 if (RT_SUCCESS(rc) && pfnProgress)
4050 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4051
4052 pImage->cbSize = cbSize;
4053
4054 for (unsigned i = 0; i < pImage->cExtents; i++)
4055 {
4056 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4057
4058 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4059 pExtent->cNominalSectors, pExtent->enmType,
4060 pExtent->pszBasename, pExtent->uSectorOffset);
4061 if (RT_FAILURE(rc))
4062 {
4063 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4064 goto out;
4065 }
4066 }
4067 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4068
4069 if ( pPCHSGeometry->cCylinders != 0
4070 && pPCHSGeometry->cHeads != 0
4071 && pPCHSGeometry->cSectors != 0)
4072 {
4073 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4074 if (RT_FAILURE(rc))
4075 goto out;
4076 }
4077 if ( pLCHSGeometry->cCylinders != 0
4078 && pLCHSGeometry->cHeads != 0
4079 && pLCHSGeometry->cSectors != 0)
4080 {
4081 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4082 if (RT_FAILURE(rc))
4083 goto out;
4084 }
4085
4086 pImage->LCHSGeometry = *pLCHSGeometry;
4087 pImage->PCHSGeometry = *pPCHSGeometry;
4088
4089 pImage->ImageUuid = *pUuid;
4090 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4091 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4092 if (RT_FAILURE(rc))
4093 {
4094 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4095 goto out;
4096 }
4097 RTUuidClear(&pImage->ParentUuid);
4098 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4099 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4100 if (RT_FAILURE(rc))
4101 {
4102 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4103 goto out;
4104 }
4105 RTUuidClear(&pImage->ModificationUuid);
4106 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4107 VMDK_DDB_MODIFICATION_UUID,
4108 &pImage->ModificationUuid);
4109 if (RT_FAILURE(rc))
4110 {
4111 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4112 goto out;
4113 }
4114 RTUuidClear(&pImage->ParentModificationUuid);
4115 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4116 VMDK_DDB_PARENT_MODIFICATION_UUID,
4117 &pImage->ParentModificationUuid);
4118 if (RT_FAILURE(rc))
4119 {
4120 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4121 goto out;
4122 }
4123
4124 rc = vmdkAllocateGrainTableCache(pImage);
4125 if (RT_FAILURE(rc))
4126 goto out;
4127
4128 rc = vmdkSetImageComment(pImage, pszComment);
4129 if (RT_FAILURE(rc))
4130 {
4131 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4132 goto out;
4133 }
4134
4135 if (RT_SUCCESS(rc) && pfnProgress)
4136 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4137
4138 rc = vmdkFlushImage(pImage);
4139
4140out:
4141 if (RT_SUCCESS(rc) && pfnProgress)
4142 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4143
4144 if (RT_FAILURE(rc))
4145 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4146 return rc;
4147}
4148
4149/**
4150 * Internal: Update image comment.
4151 */
4152static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4153{
4154 char *pszCommentEncoded;
4155 if (pszComment)
4156 {
4157 pszCommentEncoded = vmdkEncodeString(pszComment);
4158 if (!pszCommentEncoded)
4159 return VERR_NO_MEMORY;
4160 }
4161 else
4162 pszCommentEncoded = NULL;
4163 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4164 "ddb.comment", pszCommentEncoded);
4165 if (pszComment)
4166 RTStrFree(pszCommentEncoded);
4167 if (RT_FAILURE(rc))
4168 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4169 return VINF_SUCCESS;
4170}
4171
4172/**
4173 * Internal. Free all allocated space for representing an image, and optionally
4174 * delete the image from disk.
4175 */
4176static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4177{
4178 AssertPtr(pImage);
4179
4180 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4181 {
4182 /* Mark all extents as clean. */
4183 for (unsigned i = 0; i < pImage->cExtents; i++)
4184 {
4185 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4186#ifdef VBOX_WITH_VMDK_ESX
4187 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4188#endif /* VBOX_WITH_VMDK_ESX */
4189 )
4190 && pImage->pExtents[i].fUncleanShutdown)
4191 {
4192 pImage->pExtents[i].fUncleanShutdown = false;
4193 pImage->pExtents[i].fMetaDirty = true;
4194 }
4195 }
4196 }
4197 (void)vmdkFlushImage(pImage);
4198
4199 if (pImage->pExtents != NULL)
4200 {
4201 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4202 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4203 RTMemFree(pImage->pExtents);
4204 pImage->pExtents = NULL;
4205 }
4206 pImage->cExtents = 0;
4207 if (pImage->pFile != NULL)
4208 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4209 vmdkFileCheckAllClose(pImage);
4210 if (pImage->pGTCache)
4211 {
4212 RTMemFree(pImage->pGTCache);
4213 pImage->pGTCache = NULL;
4214 }
4215 if (pImage->pDescData)
4216 {
4217 RTMemFree(pImage->pDescData);
4218 pImage->pDescData = NULL;
4219 }
4220}
4221
4222/**
4223 * Internal. Flush image data (and metadata) to disk.
4224 */
4225static int vmdkFlushImage(PVMDKIMAGE pImage)
4226{
4227 PVMDKEXTENT pExtent;
4228 int rc = VINF_SUCCESS;
4229
4230 /* Update descriptor if changed. */
4231 if (pImage->Descriptor.fDirty)
4232 {
4233 rc = vmdkWriteDescriptor(pImage);
4234 if (RT_FAILURE(rc))
4235 goto out;
4236 }
4237
4238 for (unsigned i = 0; i < pImage->cExtents; i++)
4239 {
4240 pExtent = &pImage->pExtents[i];
4241 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4242 {
4243 switch (pExtent->enmType)
4244 {
4245 case VMDKETYPE_HOSTED_SPARSE:
4246 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
4247 if (RT_FAILURE(rc))
4248 goto out;
4249 if (pExtent->fFooter)
4250 {
4251 uint64_t cbSize;
4252 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4253 if (RT_FAILURE(rc))
4254 goto out;
4255 cbSize = RT_ALIGN_64(cbSize, 512);
4256 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4257 if (RT_FAILURE(rc))
4258 goto out;
4259 }
4260 break;
4261#ifdef VBOX_WITH_VMDK_ESX
4262 case VMDKETYPE_ESX_SPARSE:
4263 /** @todo update the header. */
4264 break;
4265#endif /* VBOX_WITH_VMDK_ESX */
4266 case VMDKETYPE_VMFS:
4267 case VMDKETYPE_FLAT:
4268 /* Nothing to do. */
4269 break;
4270 case VMDKETYPE_ZERO:
4271 default:
4272 AssertMsgFailed(("extent with type %d marked as dirty\n",
4273 pExtent->enmType));
4274 break;
4275 }
4276 }
4277 switch (pExtent->enmType)
4278 {
4279 case VMDKETYPE_HOSTED_SPARSE:
4280#ifdef VBOX_WITH_VMDK_ESX
4281 case VMDKETYPE_ESX_SPARSE:
4282#endif /* VBOX_WITH_VMDK_ESX */
4283 case VMDKETYPE_VMFS:
4284 case VMDKETYPE_FLAT:
4285 /** @todo implement proper path absolute check. */
4286 if ( pExtent->pFile != NULL
4287 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4288 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4289 rc = vmdkFileFlush(pExtent->pFile);
4290 break;
4291 case VMDKETYPE_ZERO:
4292 /* No need to do anything for this extent. */
4293 break;
4294 default:
4295 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4296 break;
4297 }
4298 }
4299
4300out:
4301 return rc;
4302}
4303
4304/**
4305 * Internal. Flush image data (and metadata) to disk - async version.
4306 */
4307static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4308{
4309 PVMDKEXTENT pExtent;
4310 int rc = VINF_SUCCESS;
4311
4312 /* Update descriptor if changed. */
4313 if (pImage->Descriptor.fDirty)
4314 {
4315 rc = vmdkWriteDescriptor(pImage);
4316 if (RT_FAILURE(rc))
4317 goto out;
4318 }
4319
4320 for (unsigned i = 0; i < pImage->cExtents; i++)
4321 {
4322 pExtent = &pImage->pExtents[i];
4323 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4324 {
4325 switch (pExtent->enmType)
4326 {
4327 case VMDKETYPE_HOSTED_SPARSE:
4328 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4329 break;
4330#ifdef VBOX_WITH_VMDK_ESX
4331 case VMDKETYPE_ESX_SPARSE:
4332 /** @todo update the header. */
4333 break;
4334#endif /* VBOX_WITH_VMDK_ESX */
4335 case VMDKETYPE_VMFS:
4336 case VMDKETYPE_FLAT:
4337 /* Nothing to do. */
4338 break;
4339 case VMDKETYPE_ZERO:
4340 default:
4341 AssertMsgFailed(("extent with type %d marked as dirty\n",
4342 pExtent->enmType));
4343 break;
4344 }
4345 }
4346 switch (pExtent->enmType)
4347 {
4348 case VMDKETYPE_HOSTED_SPARSE:
4349#ifdef VBOX_WITH_VMDK_ESX
4350 case VMDKETYPE_ESX_SPARSE:
4351#endif /* VBOX_WITH_VMDK_ESX */
4352 case VMDKETYPE_VMFS:
4353 case VMDKETYPE_FLAT:
4354 /** @todo implement proper path absolute check. */
4355 if ( pExtent->pFile != NULL
4356 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4357 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4358 rc = vmdkFileFlushAsync(pExtent->pFile, pIoCtx);
4359 break;
4360 case VMDKETYPE_ZERO:
4361 /* No need to do anything for this extent. */
4362 break;
4363 default:
4364 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4365 break;
4366 }
4367 }
4368
4369out:
4370 return rc;
4371}
4372
4373/**
4374 * Internal. Find extent corresponding to the sector number in the disk.
4375 */
4376static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4377 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4378{
4379 PVMDKEXTENT pExtent = NULL;
4380 int rc = VINF_SUCCESS;
4381
4382 for (unsigned i = 0; i < pImage->cExtents; i++)
4383 {
4384 if (offSector < pImage->pExtents[i].cNominalSectors)
4385 {
4386 pExtent = &pImage->pExtents[i];
4387 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4388 break;
4389 }
4390 offSector -= pImage->pExtents[i].cNominalSectors;
4391 }
4392
4393 if (pExtent)
4394 *ppExtent = pExtent;
4395 else
4396 rc = VERR_IO_SECTOR_NOT_FOUND;
4397
4398 return rc;
4399}
4400
4401/**
4402 * Internal. Hash function for placing the grain table hash entries.
4403 */
4404static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4405 unsigned uExtent)
4406{
4407 /** @todo this hash function is quite simple, maybe use a better one which
4408 * scrambles the bits better. */
4409 return (uSector + uExtent) % pCache->cEntries;
4410}
4411
4412/**
4413 * Internal. Get sector number in the extent file from the relative sector
4414 * number in the extent.
4415 */
4416static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4417 uint64_t uSector, uint64_t *puExtentSector)
4418{
4419 uint64_t uGDIndex, uGTSector, uGTBlock;
4420 uint32_t uGTHash, uGTBlockIndex;
4421 PVMDKGTCACHEENTRY pGTCacheEntry;
4422 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4423 int rc;
4424
4425 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4426 if (uGDIndex >= pExtent->cGDEntries)
4427 return VERR_OUT_OF_RANGE;
4428 uGTSector = pExtent->pGD[uGDIndex];
4429 if (!uGTSector)
4430 {
4431 /* There is no grain table referenced by this grain directory
4432 * entry. So there is absolutely no data in this area. */
4433 *puExtentSector = 0;
4434 return VINF_SUCCESS;
4435 }
4436
4437 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4438 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4439 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4440 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4441 || pGTCacheEntry->uGTBlock != uGTBlock)
4442 {
4443 /* Cache miss, fetch data from disk. */
4444 rc = vmdkFileReadAt(pExtent->pFile,
4445 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4446 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4447 if (RT_FAILURE(rc))
4448 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4449 pGTCacheEntry->uExtent = pExtent->uExtent;
4450 pGTCacheEntry->uGTBlock = uGTBlock;
4451 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4452 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4453 }
4454 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4455 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4456 if (uGrainSector)
4457 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4458 else
4459 *puExtentSector = 0;
4460 return VINF_SUCCESS;
4461}
4462
4463/**
4464 * Internal. Get sector number in the extent file from the relative sector
4465 * number in the extent - version for async access.
4466 */
4467static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4468 PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4469 uint64_t uSector, uint64_t *puExtentSector)
4470{
4471 uint64_t uGDIndex, uGTSector, uGTBlock;
4472 uint32_t uGTHash, uGTBlockIndex;
4473 PVMDKGTCACHEENTRY pGTCacheEntry;
4474 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4475 int rc;
4476
4477 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4478 if (uGDIndex >= pExtent->cGDEntries)
4479 return VERR_OUT_OF_RANGE;
4480 uGTSector = pExtent->pGD[uGDIndex];
4481 if (!uGTSector)
4482 {
4483 /* There is no grain table referenced by this grain directory
4484 * entry. So there is absolutely no data in this area. */
4485 *puExtentSector = 0;
4486 return VINF_SUCCESS;
4487 }
4488
4489 LogFlowFunc(("uGTSector=%llu\n", uGTSector));
4490
4491 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4492 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4493 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4494 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4495 || pGTCacheEntry->uGTBlock != uGTBlock)
4496 {
4497 /* Cache miss, fetch data from disk. */
4498 PVDMETAXFER pMetaXfer;
4499 rc = pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pImage->pInterfaceIO->pvUser,
4500 pExtent->pFile->pStorage,
4501 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4502 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4503 if (RT_FAILURE(rc))
4504 return rc;
4505 /* We can release the metadata transfer immediately. */
4506 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pImage->pInterfaceIO->pvUser, pMetaXfer);
4507 pGTCacheEntry->uExtent = pExtent->uExtent;
4508 pGTCacheEntry->uGTBlock = uGTBlock;
4509 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4510 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4511 }
4512 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4513 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4514 if (uGrainSector)
4515 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4516 else
4517 *puExtentSector = 0;
4518 return VINF_SUCCESS;
4519}
4520
4521/**
4522 * Internal. Allocates a new grain table (if necessary), writes the grain
4523 * and updates the grain table. The cache is also updated by this operation.
4524 * This is separate from vmdkGetSector, because that should be as fast as
4525 * possible. Most code from vmdkGetSector also appears here.
4526 */
4527static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4528 uint64_t uSector, const void *pvBuf,
4529 uint64_t cbWrite)
4530{
4531 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4532 uint64_t cbExtentSize;
4533 uint32_t uGTHash, uGTBlockIndex;
4534 PVMDKGTCACHEENTRY pGTCacheEntry;
4535 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4536 int rc;
4537
4538 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4539 if (uGDIndex >= pExtent->cGDEntries)
4540 return VERR_OUT_OF_RANGE;
4541 uGTSector = pExtent->pGD[uGDIndex];
4542 if (pExtent->pRGD)
4543 uRGTSector = pExtent->pRGD[uGDIndex];
4544 else
4545 uRGTSector = 0; /**< avoid compiler warning */
4546 if (!uGTSector)
4547 {
4548 /* There is no grain table referenced by this grain directory
4549 * entry. So there is absolutely no data in this area. Allocate
4550 * a new grain table and put the reference to it in the GDs. */
4551 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4552 if (RT_FAILURE(rc))
4553 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4554 Assert(!(cbExtentSize % 512));
4555 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4556 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4557 /* For writable streamOptimized extents the final sector is the
4558 * end-of-stream marker. Will be re-added after the grain table.
4559 * If the file has a footer it also will be re-added before EOS. */
4560 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4561 {
4562 uint64_t uEOSOff = 0;
4563 uGTSector--;
4564 if (pExtent->fFooter)
4565 {
4566 uGTSector--;
4567 uEOSOff = 512;
4568 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4569 if (RT_FAILURE(rc))
4570 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4571 }
4572 pExtent->uLastGrainSector = 0;
4573 uint8_t aEOS[512];
4574 memset(aEOS, '\0', sizeof(aEOS));
4575 rc = vmdkFileWriteAt(pExtent->pFile,
4576 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4577 aEOS, sizeof(aEOS), NULL);
4578 if (RT_FAILURE(rc))
4579 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4580 }
4581 /* Normally the grain table is preallocated for hosted sparse extents
4582 * that support more than 32 bit sector numbers. So this shouldn't
4583 * ever happen on a valid extent. */
4584 if (uGTSector > UINT32_MAX)
4585 return VERR_VD_VMDK_INVALID_HEADER;
4586 /* Write grain table by writing the required number of grain table
4587 * cache chunks. Avoids dynamic memory allocation, but is a bit
4588 * slower. But as this is a pretty infrequently occurring case it
4589 * should be acceptable. */
4590 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4591 for (unsigned i = 0;
4592 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4593 i++)
4594 {
4595 rc = vmdkFileWriteAt(pExtent->pFile,
4596 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4597 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4598 if (RT_FAILURE(rc))
4599 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4600 }
4601 if (pExtent->pRGD)
4602 {
4603 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4604 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4605 if (RT_FAILURE(rc))
4606 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4607 Assert(!(cbExtentSize % 512));
4608 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4609 /* For writable streamOptimized extents the final sector is the
4610 * end-of-stream marker. Will be re-added after the grain table.
4611 * If the file has a footer it also will be re-added before EOS. */
4612 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4613 {
4614 uint64_t uEOSOff = 0;
4615 uRGTSector--;
4616 if (pExtent->fFooter)
4617 {
4618 uRGTSector--;
4619 uEOSOff = 512;
4620 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4621 if (RT_FAILURE(rc))
4622 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4623 }
4624 pExtent->uLastGrainSector = 0;
4625 uint8_t aEOS[512];
4626 memset(aEOS, '\0', sizeof(aEOS));
4627 rc = vmdkFileWriteAt(pExtent->pFile,
4628 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4629 aEOS, sizeof(aEOS), NULL);
4630 if (RT_FAILURE(rc))
4631 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4632 }
4633 /* Normally the redundant grain table is preallocated for hosted
4634 * sparse extents that support more than 32 bit sector numbers. So
4635 * this shouldn't ever happen on a valid extent. */
4636 if (uRGTSector > UINT32_MAX)
4637 return VERR_VD_VMDK_INVALID_HEADER;
4638 /* Write backup grain table by writing the required number of grain
4639 * table cache chunks. Avoids dynamic memory allocation, but is a
4640 * bit slower. But as this is a pretty infrequently occurring case
4641 * it should be acceptable. */
4642 for (unsigned i = 0;
4643 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4644 i++)
4645 {
4646 rc = vmdkFileWriteAt(pExtent->pFile,
4647 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4648 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4649 if (RT_FAILURE(rc))
4650 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4651 }
4652 }
4653
4654 /* Update the grain directory on disk (doing it before writing the
4655 * grain table will result in a garbled extent if the operation is
4656 * aborted for some reason. Otherwise the worst that can happen is
4657 * some unused sectors in the extent. */
4658 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4659 rc = vmdkFileWriteAt(pExtent->pFile,
4660 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4661 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4662 if (RT_FAILURE(rc))
4663 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4664 if (pExtent->pRGD)
4665 {
4666 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4667 rc = vmdkFileWriteAt(pExtent->pFile,
4668 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4669 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4670 if (RT_FAILURE(rc))
4671 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4672 }
4673
4674 /* As the final step update the in-memory copy of the GDs. */
4675 pExtent->pGD[uGDIndex] = uGTSector;
4676 if (pExtent->pRGD)
4677 pExtent->pRGD[uGDIndex] = uRGTSector;
4678 }
4679
4680 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4681 if (RT_FAILURE(rc))
4682 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4683 Assert(!(cbExtentSize % 512));
4684
4685 /* Write the data. Always a full grain, or we're in big trouble. */
4686 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4687 {
4688 /* For streamOptimized extents this is a little more difficult, as the
4689 * cached data also needs to be updated, to handle updating the last
4690 * written block properly. Also we're trying to avoid unnecessary gaps.
4691 * Additionally the end-of-stream marker needs to be written. */
4692 if (!pExtent->uLastGrainSector)
4693 {
4694 cbExtentSize -= 512;
4695 if (pExtent->fFooter)
4696 cbExtentSize -= 512;
4697 }
4698 else
4699 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4700 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4701 uint32_t cbGrain = 0;
4702 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4703 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4704 if (RT_FAILURE(rc))
4705 {
4706 pExtent->uGrainSector = 0;
4707 pExtent->uLastGrainSector = 0;
4708 AssertRC(rc);
4709 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4710 }
4711 cbGrain = RT_ALIGN(cbGrain, 512);
4712 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4713 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4714 pExtent->cbLastGrainWritten = cbGrain;
4715 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4716 pExtent->uGrainSector = uSector;
4717
4718 uint64_t uEOSOff = 0;
4719 if (pExtent->fFooter)
4720 {
4721 uEOSOff = 512;
4722 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4723 if (RT_FAILURE(rc))
4724 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4725 }
4726 uint8_t aEOS[512];
4727 memset(aEOS, '\0', sizeof(aEOS));
4728 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4729 aEOS, sizeof(aEOS), NULL);
4730 if (RT_FAILURE(rc))
4731 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4732 }
4733 else
4734 {
4735 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4736 if (RT_FAILURE(rc))
4737 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4738 }
4739
4740 /* Update the grain table (and the cache). */
4741 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4742 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4743 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4744 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4745 || pGTCacheEntry->uGTBlock != uGTBlock)
4746 {
4747 /* Cache miss, fetch data from disk. */
4748 rc = vmdkFileReadAt(pExtent->pFile,
4749 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4750 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4751 if (RT_FAILURE(rc))
4752 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4753 pGTCacheEntry->uExtent = pExtent->uExtent;
4754 pGTCacheEntry->uGTBlock = uGTBlock;
4755 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4756 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4757 }
4758 else
4759 {
4760 /* Cache hit. Convert grain table block back to disk format, otherwise
4761 * the code below will write garbage for all but the updated entry. */
4762 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4763 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4764 }
4765 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4766 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4767 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4768 /* Update grain table on disk. */
4769 rc = vmdkFileWriteAt(pExtent->pFile,
4770 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4771 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4772 if (RT_FAILURE(rc))
4773 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4774 if (pExtent->pRGD)
4775 {
4776 /* Update backup grain table on disk. */
4777 rc = vmdkFileWriteAt(pExtent->pFile,
4778 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4779 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4780 if (RT_FAILURE(rc))
4781 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4782 }
4783#ifdef VBOX_WITH_VMDK_ESX
4784 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4785 {
4786 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4787 pExtent->fMetaDirty = true;
4788 }
4789#endif /* VBOX_WITH_VMDK_ESX */
4790 return rc;
4791}
4792
4793/**
4794 * Internal: Updates the grain table during a async grain allocation.
4795 */
4796static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4797 PVMDKGTCACHE pCache, PVDIOCTX pIoCtx,
4798 PVMDKGRAINALLOCASYNC pGrainAlloc)
4799{
4800 int rc = VINF_SUCCESS;
4801 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4802 uint32_t uGTHash, uGTBlockIndex;
4803 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4804 uint64_t uSector = pGrainAlloc->uSector;
4805 PVMDKGTCACHEENTRY pGTCacheEntry;
4806
4807 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
4808 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
4809
4810 uGTSector = pGrainAlloc->uGTSector;
4811 uRGTSector = pGrainAlloc->uRGTSector;
4812 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4813
4814 /* Update the grain table (and the cache). */
4815 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4816 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4817 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4818 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4819 || pGTCacheEntry->uGTBlock != uGTBlock)
4820 {
4821 /* Cache miss, fetch data from disk. */
4822 LogFlow(("Cache miss, fetch data from disk\n"));
4823 PVDMETAXFER pMetaXfer = NULL;
4824 rc = pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4825 pExtent->pFile->pStorage,
4826 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4827 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4828 &pMetaXfer,
4829 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4830 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4831 {
4832 pGrainAlloc->cIoXfersPending++;
4833 pGrainAlloc->fGTUpdateNeeded = true;
4834 /* Leave early, we will be called again after the read completed. */
4835 LogFlowFunc(("Metadata read in progress, leaving\n"));
4836 return rc;
4837 }
4838 else if (RT_FAILURE(rc))
4839 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4840 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pExtent->pImage->pInterfaceIO->pvUser, pMetaXfer);
4841 pGTCacheEntry->uExtent = pExtent->uExtent;
4842 pGTCacheEntry->uGTBlock = uGTBlock;
4843 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4844 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4845 }
4846 else
4847 {
4848 /* Cache hit. Convert grain table block back to disk format, otherwise
4849 * the code below will write garbage for all but the updated entry. */
4850 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4851 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4852 }
4853 pGrainAlloc->fGTUpdateNeeded = false;
4854 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4855 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize));
4856 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize);
4857 /* Update grain table on disk. */
4858 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4859 pExtent->pFile->pStorage,
4860 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4861 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4862 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4863 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4864 pGrainAlloc->cIoXfersPending++;
4865 else if (RT_FAILURE(rc))
4866 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4867 if (pExtent->pRGD)
4868 {
4869 /* Update backup grain table on disk. */
4870 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4871 pExtent->pFile->pStorage,
4872 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4873 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4874 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4875 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4876 pGrainAlloc->cIoXfersPending++;
4877 else if (RT_FAILURE(rc))
4878 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4879 }
4880#ifdef VBOX_WITH_VMDK_ESX
4881 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4882 {
4883 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4884 pExtent->fMetaDirty = true;
4885 }
4886#endif /* VBOX_WITH_VMDK_ESX */
4887
4888 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4889
4890 return rc;
4891}
4892
4893/**
4894 * Internal - complete the grain allocation by updating disk grain table if required.
4895 */
4896static int vmdkAllocGrainAsyncComplete(void *pvBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
4897{
4898 int rc = VINF_SUCCESS;
4899 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
4900 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
4901 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
4902
4903 LogFlowFunc(("pvBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
4904 pvBackendData, pIoCtx, pvUser, rcReq));
4905
4906 pGrainAlloc->cIoXfersPending--;
4907 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
4908 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent, pImage->pGTCache,
4909 pIoCtx, pGrainAlloc);
4910
4911 if (!pGrainAlloc->cIoXfersPending)
4912 {
4913 /* Grain allocation completed. */
4914 RTMemFree(pGrainAlloc);
4915 }
4916
4917 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
4918 return rc;
4919}
4920
4921/**
4922 * Internal. Allocates a new grain table (if necessary) - async version.
4923 */
4924static int vmdkAllocGrainAsync(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4925 PVDIOCTX pIoCtx, uint64_t uSector,
4926 uint64_t cbWrite)
4927{
4928 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4929 uint64_t cbExtentSize;
4930 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4931 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
4932 PVMDKIMAGE pImage = pExtent->pImage;
4933 int rc;
4934
4935 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
4936 pCache, pExtent, pIoCtx, uSector, cbWrite));
4937
4938 AssertReturn(!(pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
4939
4940 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
4941 if (!pGrainAlloc)
4942 return VERR_NO_MEMORY;
4943
4944 pGrainAlloc->pExtent = pExtent;
4945 pGrainAlloc->uSector = uSector;
4946
4947 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4948 if (uGDIndex >= pExtent->cGDEntries)
4949 return VERR_OUT_OF_RANGE;
4950 uGTSector = pExtent->pGD[uGDIndex];
4951 if (pExtent->pRGD)
4952 uRGTSector = pExtent->pRGD[uGDIndex];
4953 else
4954 uRGTSector = 0; /**< avoid compiler warning */
4955 if (!uGTSector)
4956 {
4957 LogFlow(("Allocating new grain table\n"));
4958
4959 /* There is no grain table referenced by this grain directory
4960 * entry. So there is absolutely no data in this area. Allocate
4961 * a new grain table and put the reference to it in the GDs. */
4962 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4963 if (RT_FAILURE(rc))
4964 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4965 Assert(!(cbExtentSize % 512));
4966
4967 pGrainAlloc->cbExtentOld = cbExtentSize;
4968
4969 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4970 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4971
4972 /* Normally the grain table is preallocated for hosted sparse extents
4973 * that support more than 32 bit sector numbers. So this shouldn't
4974 * ever happen on a valid extent. */
4975 if (uGTSector > UINT32_MAX)
4976 return VERR_VD_VMDK_INVALID_HEADER;
4977
4978 /* Write grain table by writing the required number of grain table
4979 * cache chunks. Allocate memory dynamically here or we flood the
4980 * metadata cache with very small entries.
4981 */
4982 size_t cbGTDataTmp = (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE) * VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
4983 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
4984
4985 if (!paGTDataTmp)
4986 return VERR_NO_MEMORY;
4987
4988 memset(paGTDataTmp, '\0', cbGTDataTmp);
4989 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
4990 pExtent->pFile->pStorage,
4991 VMDK_SECTOR2BYTE(uGTSector),
4992 paGTDataTmp, cbGTDataTmp, pIoCtx,
4993 vmdkAllocGrainAsyncComplete, pGrainAlloc);
4994 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4995 pGrainAlloc->cIoXfersPending++;
4996 else if (RT_FAILURE(rc))
4997 {
4998 RTMemTmpFree(paGTDataTmp);
4999 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5000 }
5001
5002 if (pExtent->pRGD)
5003 {
5004 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5005 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
5006 if (RT_FAILURE(rc))
5007 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5008 Assert(!(cbExtentSize % 512));
5009 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5010
5011 /* Normally the redundant grain table is preallocated for hosted
5012 * sparse extents that support more than 32 bit sector numbers. So
5013 * this shouldn't ever happen on a valid extent. */
5014 if (uRGTSector > UINT32_MAX)
5015 {
5016 RTMemTmpFree(paGTDataTmp);
5017 return VERR_VD_VMDK_INVALID_HEADER;
5018 }
5019 /* Write backup grain table by writing the required number of grain
5020 * table cache chunks. */
5021 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5022 pExtent->pFile->pStorage,
5023 VMDK_SECTOR2BYTE(uRGTSector),
5024 paGTDataTmp, cbGTDataTmp, pIoCtx,
5025 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5026 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5027 pGrainAlloc->cIoXfersPending++;
5028 else if (RT_FAILURE(rc))
5029 {
5030 RTMemTmpFree(paGTDataTmp);
5031 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5032 }
5033 }
5034
5035 RTMemTmpFree(paGTDataTmp);
5036
5037 /* Update the grain directory on disk (doing it before writing the
5038 * grain table will result in a garbled extent if the operation is
5039 * aborted for some reason. Otherwise the worst that can happen is
5040 * some unused sectors in the extent. */
5041 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5042 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5043 pExtent->pFile->pStorage,
5044 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5045 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5046 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5047 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5048 pGrainAlloc->cIoXfersPending++;
5049 else if (RT_FAILURE(rc))
5050 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5051 if (pExtent->pRGD)
5052 {
5053 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5054 rc = pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pExtent->pImage->pInterfaceIO->pvUser,
5055 pExtent->pFile->pStorage,
5056 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5057 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5058 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5059 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5060 pGrainAlloc->cIoXfersPending++;
5061 else if (RT_FAILURE(rc))
5062 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5063 }
5064
5065 /* As the final step update the in-memory copy of the GDs. */
5066 pExtent->pGD[uGDIndex] = uGTSector;
5067 if (pExtent->pRGD)
5068 pExtent->pRGD[uGDIndex] = uRGTSector;
5069 }
5070
5071 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5072 pGrainAlloc->uGTSector = uGTSector;
5073 pGrainAlloc->uRGTSector = uRGTSector;
5074
5075 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
5076 if (RT_FAILURE(rc))
5077 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5078 Assert(!(cbExtentSize % 512));
5079
5080 if (!pGrainAlloc->cbExtentOld)
5081 pGrainAlloc->cbExtentOld = cbExtentSize;
5082
5083 pGrainAlloc->cbExtentSize = cbExtentSize;
5084
5085 /* Write the data. Always a full grain, or we're in big trouble. */
5086 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
5087 pExtent->pFile->pStorage,
5088 cbExtentSize,
5089 pIoCtx, cbWrite,
5090 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5091 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5092 pGrainAlloc->cIoXfersPending++;
5093 else if (RT_FAILURE(rc))
5094 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5095
5096 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pCache, pIoCtx, pGrainAlloc);
5097
5098 if (!pGrainAlloc->cIoXfersPending)
5099 {
5100 /* Grain allocation completed. */
5101 RTMemFree(pGrainAlloc);
5102 }
5103
5104 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5105
5106 return rc;
5107}
5108
5109
5110/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5111static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
5112{
5113 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
5114 int rc = VINF_SUCCESS;
5115 PVMDKIMAGE pImage;
5116
5117 if ( !pszFilename
5118 || !*pszFilename
5119 || strchr(pszFilename, '"'))
5120 {
5121 rc = VERR_INVALID_PARAMETER;
5122 goto out;
5123 }
5124
5125 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5126 if (!pImage)
5127 {
5128 rc = VERR_NO_MEMORY;
5129 goto out;
5130 }
5131 pImage->pszFilename = pszFilename;
5132 pImage->pFile = NULL;
5133 pImage->pExtents = NULL;
5134 pImage->pFiles = NULL;
5135 pImage->pGTCache = NULL;
5136 pImage->pDescData = NULL;
5137 pImage->pVDIfsDisk = pVDIfsDisk;
5138 pImage->pVDIfsImage = pVDIfsDisk;
5139 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5140 * much as possible in vmdkOpenImage. */
5141 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5142 vmdkFreeImage(pImage, false);
5143 RTMemFree(pImage);
5144
5145out:
5146 LogFlowFunc(("returns %Rrc\n", rc));
5147 return rc;
5148}
5149
5150/** @copydoc VBOXHDDBACKEND::pfnOpen */
5151static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5152 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5153 void **ppBackendData)
5154{
5155 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5156 int rc;
5157 PVMDKIMAGE pImage;
5158
5159 /* Check open flags. All valid flags are supported. */
5160 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5161 {
5162 rc = VERR_INVALID_PARAMETER;
5163 goto out;
5164 }
5165
5166 /* Check remaining arguments. */
5167 if ( !VALID_PTR(pszFilename)
5168 || !*pszFilename
5169 || strchr(pszFilename, '"'))
5170 {
5171 rc = VERR_INVALID_PARAMETER;
5172 goto out;
5173 }
5174
5175
5176 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5177 if (!pImage)
5178 {
5179 rc = VERR_NO_MEMORY;
5180 goto out;
5181 }
5182 pImage->pszFilename = pszFilename;
5183 pImage->pFile = NULL;
5184 pImage->pExtents = NULL;
5185 pImage->pFiles = NULL;
5186 pImage->pGTCache = NULL;
5187 pImage->pDescData = NULL;
5188 pImage->pVDIfsDisk = pVDIfsDisk;
5189 pImage->pVDIfsImage = pVDIfsImage;
5190
5191 rc = vmdkOpenImage(pImage, uOpenFlags);
5192 if (RT_SUCCESS(rc))
5193 *ppBackendData = pImage;
5194
5195out:
5196 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5197 return rc;
5198}
5199
5200/** @copydoc VBOXHDDBACKEND::pfnCreate */
5201static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
5202 unsigned uImageFlags, const char *pszComment,
5203 PCPDMMEDIAGEOMETRY pPCHSGeometry,
5204 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5205 unsigned uOpenFlags, unsigned uPercentStart,
5206 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
5207 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
5208 void **ppBackendData)
5209{
5210 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5211 int rc;
5212 PVMDKIMAGE pImage;
5213
5214 PFNVDPROGRESS pfnProgress = NULL;
5215 void *pvUser = NULL;
5216 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
5217 VDINTERFACETYPE_PROGRESS);
5218 PVDINTERFACEPROGRESS pCbProgress = NULL;
5219 if (pIfProgress)
5220 {
5221 pCbProgress = VDGetInterfaceProgress(pIfProgress);
5222 pfnProgress = pCbProgress->pfnProgress;
5223 pvUser = pIfProgress->pvUser;
5224 }
5225
5226 /* Check open flags. All valid flags are supported. */
5227 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5228 {
5229 rc = VERR_INVALID_PARAMETER;
5230 goto out;
5231 }
5232
5233 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
5234 if ( !cbSize
5235 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
5236 {
5237 rc = VERR_VD_INVALID_SIZE;
5238 goto out;
5239 }
5240
5241 /* Check remaining arguments. */
5242 if ( !VALID_PTR(pszFilename)
5243 || !*pszFilename
5244 || strchr(pszFilename, '"')
5245 || !VALID_PTR(pPCHSGeometry)
5246 || !VALID_PTR(pLCHSGeometry)
5247#ifndef VBOX_WITH_VMDK_ESX
5248 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
5249 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5250#endif
5251 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5252 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
5253 {
5254 rc = VERR_INVALID_PARAMETER;
5255 goto out;
5256 }
5257
5258 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5259 if (!pImage)
5260 {
5261 rc = VERR_NO_MEMORY;
5262 goto out;
5263 }
5264 pImage->pszFilename = pszFilename;
5265 pImage->pFile = NULL;
5266 pImage->pExtents = NULL;
5267 pImage->pFiles = NULL;
5268 pImage->pGTCache = NULL;
5269 pImage->pDescData = NULL;
5270 pImage->pVDIfsDisk = pVDIfsDisk;
5271 pImage->pVDIfsImage = pVDIfsImage;
5272 /* Descriptors for split images can be pretty large, especially if the
5273 * filename is long. So prepare for the worst, and allocate quite some
5274 * memory for the descriptor in this case. */
5275 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5276 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5277 else
5278 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5279 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5280 if (!pImage->pDescData)
5281 {
5282 rc = VERR_NO_MEMORY;
5283 goto out;
5284 }
5285
5286 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5287 pPCHSGeometry, pLCHSGeometry, pUuid,
5288 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5289 if (RT_SUCCESS(rc))
5290 {
5291 /* So far the image is opened in read/write mode. Make sure the
5292 * image is opened in read-only mode if the caller requested that. */
5293 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5294 {
5295 vmdkFreeImage(pImage, false);
5296 rc = vmdkOpenImage(pImage, uOpenFlags);
5297 if (RT_FAILURE(rc))
5298 goto out;
5299 }
5300 *ppBackendData = pImage;
5301 }
5302 else
5303 {
5304 RTMemFree(pImage->pDescData);
5305 RTMemFree(pImage);
5306 }
5307
5308out:
5309 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5310 return rc;
5311}
5312
5313/**
5314 * Replaces a fragment of a string with the specified string.
5315 *
5316 * @returns Pointer to the allocated UTF-8 string.
5317 * @param pszWhere UTF-8 string to search in.
5318 * @param pszWhat UTF-8 string to search for.
5319 * @param pszByWhat UTF-8 string to replace the found string with.
5320 */
5321static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
5322{
5323 AssertPtr(pszWhere);
5324 AssertPtr(pszWhat);
5325 AssertPtr(pszByWhat);
5326 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5327 if (!pszFoundStr)
5328 return NULL;
5329 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5330 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5331 if (pszNewStr)
5332 {
5333 char *pszTmp = pszNewStr;
5334 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5335 pszTmp += pszFoundStr - pszWhere;
5336 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5337 pszTmp += strlen(pszByWhat);
5338 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5339 }
5340 return pszNewStr;
5341}
5342
5343/** @copydoc VBOXHDDBACKEND::pfnRename */
5344static int vmdkRename(void *pBackendData, const char *pszFilename)
5345{
5346 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5347
5348 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5349 int rc = VINF_SUCCESS;
5350 char **apszOldName = NULL;
5351 char **apszNewName = NULL;
5352 char **apszNewLines = NULL;
5353 char *pszOldDescName = NULL;
5354 bool fImageFreed = false;
5355 bool fEmbeddedDesc = false;
5356 unsigned cExtents = pImage->cExtents;
5357 char *pszNewBaseName = NULL;
5358 char *pszOldBaseName = NULL;
5359 char *pszNewFullName = NULL;
5360 char *pszOldFullName = NULL;
5361 const char *pszOldImageName;
5362 unsigned i, line;
5363 VMDKDESCRIPTOR DescriptorCopy;
5364 VMDKEXTENT ExtentCopy;
5365
5366 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
5367
5368 /* Check arguments. */
5369 if ( !pImage
5370 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5371 || !VALID_PTR(pszFilename)
5372 || !*pszFilename)
5373 {
5374 rc = VERR_INVALID_PARAMETER;
5375 goto out;
5376 }
5377
5378 /*
5379 * Allocate an array to store both old and new names of renamed files
5380 * in case we have to roll back the changes. Arrays are initialized
5381 * with zeros. We actually save stuff when and if we change it.
5382 */
5383 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5384 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5385 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
5386 if (!apszOldName || !apszNewName || !apszNewLines)
5387 {
5388 rc = VERR_NO_MEMORY;
5389 goto out;
5390 }
5391
5392 /* Save the descriptor size and position. */
5393 if (pImage->pDescData)
5394 {
5395 /* Separate descriptor file. */
5396 fEmbeddedDesc = false;
5397 }
5398 else
5399 {
5400 /* Embedded descriptor file. */
5401 ExtentCopy = pImage->pExtents[0];
5402 fEmbeddedDesc = true;
5403 }
5404 /* Save the descriptor content. */
5405 DescriptorCopy.cLines = pImage->Descriptor.cLines;
5406 for (i = 0; i < DescriptorCopy.cLines; i++)
5407 {
5408 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5409 if (!DescriptorCopy.aLines[i])
5410 {
5411 rc = VERR_NO_MEMORY;
5412 goto out;
5413 }
5414 }
5415
5416 /* Prepare both old and new base names used for string replacement. */
5417 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5418 RTPathStripExt(pszNewBaseName);
5419 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5420 RTPathStripExt(pszOldBaseName);
5421 /* Prepare both old and new full names used for string replacement. */
5422 pszNewFullName = RTStrDup(pszFilename);
5423 RTPathStripExt(pszNewFullName);
5424 pszOldFullName = RTStrDup(pImage->pszFilename);
5425 RTPathStripExt(pszOldFullName);
5426
5427 /* --- Up to this point we have not done any damage yet. --- */
5428
5429 /* Save the old name for easy access to the old descriptor file. */
5430 pszOldDescName = RTStrDup(pImage->pszFilename);
5431 /* Save old image name. */
5432 pszOldImageName = pImage->pszFilename;
5433
5434 /* Update the descriptor with modified extent names. */
5435 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5436 i < cExtents;
5437 i++, line = pImage->Descriptor.aNextLines[line])
5438 {
5439 /* Assume that vmdkStrReplace will fail. */
5440 rc = VERR_NO_MEMORY;
5441 /* Update the descriptor. */
5442 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5443 pszOldBaseName, pszNewBaseName);
5444 if (!apszNewLines[i])
5445 goto rollback;
5446 pImage->Descriptor.aLines[line] = apszNewLines[i];
5447 }
5448 /* Make sure the descriptor gets written back. */
5449 pImage->Descriptor.fDirty = true;
5450 /* Flush the descriptor now, in case it is embedded. */
5451 (void)vmdkFlushImage(pImage);
5452
5453 /* Close and rename/move extents. */
5454 for (i = 0; i < cExtents; i++)
5455 {
5456 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5457 /* Compose new name for the extent. */
5458 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5459 pszOldFullName, pszNewFullName);
5460 if (!apszNewName[i])
5461 goto rollback;
5462 /* Close the extent file. */
5463 vmdkFileClose(pImage, &pExtent->pFile, false);
5464 /* Rename the extent file. */
5465 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
5466 if (RT_FAILURE(rc))
5467 goto rollback;
5468 /* Remember the old name. */
5469 apszOldName[i] = RTStrDup(pExtent->pszFullname);
5470 }
5471 /* Release all old stuff. */
5472 vmdkFreeImage(pImage, false);
5473
5474 fImageFreed = true;
5475
5476 /* Last elements of new/old name arrays are intended for
5477 * storing descriptor's names.
5478 */
5479 apszNewName[cExtents] = RTStrDup(pszFilename);
5480 /* Rename the descriptor file if it's separate. */
5481 if (!fEmbeddedDesc)
5482 {
5483 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
5484 if (RT_FAILURE(rc))
5485 goto rollback;
5486 /* Save old name only if we may need to change it back. */
5487 apszOldName[cExtents] = RTStrDup(pszFilename);
5488 }
5489
5490 /* Update pImage with the new information. */
5491 pImage->pszFilename = pszFilename;
5492
5493 /* Open the new image. */
5494 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5495 if (RT_SUCCESS(rc))
5496 goto out;
5497
5498rollback:
5499 /* Roll back all changes in case of failure. */
5500 if (RT_FAILURE(rc))
5501 {
5502 int rrc;
5503 if (!fImageFreed)
5504 {
5505 /*
5506 * Some extents may have been closed, close the rest. We will
5507 * re-open the whole thing later.
5508 */
5509 vmdkFreeImage(pImage, false);
5510 }
5511 /* Rename files back. */
5512 for (i = 0; i <= cExtents; i++)
5513 {
5514 if (apszOldName[i])
5515 {
5516 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
5517 AssertRC(rrc);
5518 }
5519 }
5520 /* Restore the old descriptor. */
5521 PVMDKFILE pFile;
5522 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
5523 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
5524 AssertRC(rrc);
5525 if (fEmbeddedDesc)
5526 {
5527 ExtentCopy.pFile = pFile;
5528 pImage->pExtents = &ExtentCopy;
5529 }
5530 else
5531 {
5532 /* Shouldn't be null for separate descriptor.
5533 * There will be no access to the actual content.
5534 */
5535 pImage->pDescData = pszOldDescName;
5536 pImage->pFile = pFile;
5537 }
5538 pImage->Descriptor = DescriptorCopy;
5539 vmdkWriteDescriptor(pImage);
5540 vmdkFileClose(pImage, &pFile, false);
5541 /* Get rid of the stuff we implanted. */
5542 pImage->pExtents = NULL;
5543 pImage->pFile = NULL;
5544 pImage->pDescData = NULL;
5545 /* Re-open the image back. */
5546 pImage->pszFilename = pszOldImageName;
5547 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5548 AssertRC(rrc);
5549 }
5550
5551out:
5552 for (i = 0; i < DescriptorCopy.cLines; i++)
5553 if (DescriptorCopy.aLines[i])
5554 RTStrFree(DescriptorCopy.aLines[i]);
5555 if (apszOldName)
5556 {
5557 for (i = 0; i <= cExtents; i++)
5558 if (apszOldName[i])
5559 RTStrFree(apszOldName[i]);
5560 RTMemTmpFree(apszOldName);
5561 }
5562 if (apszNewName)
5563 {
5564 for (i = 0; i <= cExtents; i++)
5565 if (apszNewName[i])
5566 RTStrFree(apszNewName[i]);
5567 RTMemTmpFree(apszNewName);
5568 }
5569 if (apszNewLines)
5570 {
5571 for (i = 0; i < cExtents; i++)
5572 if (apszNewLines[i])
5573 RTStrFree(apszNewLines[i]);
5574 RTMemTmpFree(apszNewLines);
5575 }
5576 if (pszOldDescName)
5577 RTStrFree(pszOldDescName);
5578 if (pszOldBaseName)
5579 RTStrFree(pszOldBaseName);
5580 if (pszNewBaseName)
5581 RTStrFree(pszNewBaseName);
5582 if (pszOldFullName)
5583 RTStrFree(pszOldFullName);
5584 if (pszNewFullName)
5585 RTStrFree(pszNewFullName);
5586 LogFlowFunc(("returns %Rrc\n", rc));
5587 return rc;
5588}
5589
5590/** @copydoc VBOXHDDBACKEND::pfnClose */
5591static int vmdkClose(void *pBackendData, bool fDelete)
5592{
5593 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5594 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5595 int rc = VINF_SUCCESS;
5596
5597 /* Freeing a never allocated image (e.g. because the open failed) is
5598 * not signalled as an error. After all nothing bad happens. */
5599 if (pImage)
5600 {
5601 vmdkFreeImage(pImage, fDelete);
5602 RTMemFree(pImage);
5603 }
5604
5605 LogFlowFunc(("returns %Rrc\n", rc));
5606 return rc;
5607}
5608
5609/** @copydoc VBOXHDDBACKEND::pfnRead */
5610static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5611 size_t cbToRead, size_t *pcbActuallyRead)
5612{
5613 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5614 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5615 PVMDKEXTENT pExtent;
5616 uint64_t uSectorExtentRel;
5617 uint64_t uSectorExtentAbs;
5618 int rc;
5619
5620 AssertPtr(pImage);
5621 Assert(uOffset % 512 == 0);
5622 Assert(cbToRead % 512 == 0);
5623
5624 if ( uOffset + cbToRead > pImage->cbSize
5625 || cbToRead == 0)
5626 {
5627 rc = VERR_INVALID_PARAMETER;
5628 goto out;
5629 }
5630
5631 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5632 &pExtent, &uSectorExtentRel);
5633 if (RT_FAILURE(rc))
5634 goto out;
5635
5636 /* Check access permissions as defined in the extent descriptor. */
5637 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5638 {
5639 rc = VERR_VD_VMDK_INVALID_STATE;
5640 goto out;
5641 }
5642
5643 /* Clip read range to remain in this extent. */
5644 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5645
5646 /* Handle the read according to the current extent type. */
5647 switch (pExtent->enmType)
5648 {
5649 case VMDKETYPE_HOSTED_SPARSE:
5650#ifdef VBOX_WITH_VMDK_ESX
5651 case VMDKETYPE_ESX_SPARSE:
5652#endif /* VBOX_WITH_VMDK_ESX */
5653 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5654 &uSectorExtentAbs);
5655 if (RT_FAILURE(rc))
5656 goto out;
5657 /* Clip read range to at most the rest of the grain. */
5658 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5659 Assert(!(cbToRead % 512));
5660 if (uSectorExtentAbs == 0)
5661 rc = VERR_VD_BLOCK_FREE;
5662 else
5663 {
5664 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5665 {
5666 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5667 uSectorExtentAbs -= uSectorInGrain;
5668 uint64_t uLBA;
5669 if (pExtent->uGrainSector != uSectorExtentAbs)
5670 {
5671 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5672 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5673 if (RT_FAILURE(rc))
5674 {
5675 pExtent->uGrainSector = 0;
5676 AssertRC(rc);
5677 goto out;
5678 }
5679 pExtent->uGrainSector = uSectorExtentAbs;
5680 Assert(uLBA == uSectorExtentRel);
5681 }
5682 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5683 }
5684 else
5685 {
5686 rc = vmdkFileReadAt(pExtent->pFile,
5687 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5688 pvBuf, cbToRead, NULL);
5689 }
5690 }
5691 break;
5692 case VMDKETYPE_VMFS:
5693 case VMDKETYPE_FLAT:
5694 rc = vmdkFileReadAt(pExtent->pFile,
5695 VMDK_SECTOR2BYTE(uSectorExtentRel),
5696 pvBuf, cbToRead, NULL);
5697 break;
5698 case VMDKETYPE_ZERO:
5699 memset(pvBuf, '\0', cbToRead);
5700 break;
5701 }
5702 if (pcbActuallyRead)
5703 *pcbActuallyRead = cbToRead;
5704
5705out:
5706 LogFlowFunc(("returns %Rrc\n", rc));
5707 return rc;
5708}
5709
5710/** @copydoc VBOXHDDBACKEND::pfnWrite */
5711static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5712 size_t cbToWrite, size_t *pcbWriteProcess,
5713 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5714{
5715 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5716 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5717 PVMDKEXTENT pExtent;
5718 uint64_t uSectorExtentRel;
5719 uint64_t uSectorExtentAbs;
5720 int rc;
5721
5722 AssertPtr(pImage);
5723 Assert(uOffset % 512 == 0);
5724 Assert(cbToWrite % 512 == 0);
5725
5726 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5727 {
5728 rc = VERR_VD_IMAGE_READ_ONLY;
5729 goto out;
5730 }
5731
5732 if (cbToWrite == 0)
5733 {
5734 rc = VERR_INVALID_PARAMETER;
5735 goto out;
5736 }
5737
5738 /* No size check here, will do that later when the extent is located.
5739 * There are sparse images out there which according to the spec are
5740 * invalid, because the total size is not a multiple of the grain size.
5741 * Also for sparse images which are stitched together in odd ways (not at
5742 * grain boundaries, and with the nominal size not being a multiple of the
5743 * grain size), this would prevent writing to the last grain. */
5744
5745 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5746 &pExtent, &uSectorExtentRel);
5747 if (RT_FAILURE(rc))
5748 goto out;
5749
5750 /* Check access permissions as defined in the extent descriptor. */
5751 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5752 {
5753 rc = VERR_VD_VMDK_INVALID_STATE;
5754 goto out;
5755 }
5756
5757 /* Handle the write according to the current extent type. */
5758 switch (pExtent->enmType)
5759 {
5760 case VMDKETYPE_HOSTED_SPARSE:
5761#ifdef VBOX_WITH_VMDK_ESX
5762 case VMDKETYPE_ESX_SPARSE:
5763#endif /* VBOX_WITH_VMDK_ESX */
5764 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5765 &uSectorExtentAbs);
5766 if (RT_FAILURE(rc))
5767 goto out;
5768 /* Clip write range to at most the rest of the grain. */
5769 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5770 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5771 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5772 {
5773 rc = VERR_VD_VMDK_INVALID_WRITE;
5774 goto out;
5775 }
5776 if (uSectorExtentAbs == 0)
5777 {
5778 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5779 {
5780 /* Full block write to a previously unallocated block.
5781 * Check if the caller wants to avoid the automatic alloc. */
5782 if (!(fWrite & VD_WRITE_NO_ALLOC))
5783 {
5784 /* Allocate GT and find out where to store the grain. */
5785 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5786 uSectorExtentRel, pvBuf, cbToWrite);
5787 }
5788 else
5789 rc = VERR_VD_BLOCK_FREE;
5790 *pcbPreRead = 0;
5791 *pcbPostRead = 0;
5792 }
5793 else
5794 {
5795 /* Clip write range to remain in this extent. */
5796 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5797 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5798 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5799 rc = VERR_VD_BLOCK_FREE;
5800 }
5801 }
5802 else
5803 {
5804 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5805 {
5806 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5807 uSectorExtentAbs -= uSectorInGrain;
5808 uint64_t uLBA = uSectorExtentRel;
5809 if ( pExtent->uGrainSector != uSectorExtentAbs
5810 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5811 {
5812 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5813 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5814 if (RT_FAILURE(rc))
5815 {
5816 pExtent->uGrainSector = 0;
5817 pExtent->uLastGrainSector = 0;
5818 AssertRC(rc);
5819 goto out;
5820 }
5821 pExtent->uGrainSector = uSectorExtentAbs;
5822 pExtent->uLastGrainSector = uSectorExtentAbs;
5823 Assert(uLBA == uSectorExtentRel);
5824 }
5825 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5826 uint32_t cbGrain = 0;
5827 rc = vmdkFileDeflateAt(pExtent->pFile,
5828 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5829 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5830 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5831 if (RT_FAILURE(rc))
5832 {
5833 pExtent->uGrainSector = 0;
5834 pExtent->uLastGrainSector = 0;
5835 AssertRC(rc);
5836 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5837 }
5838 cbGrain = RT_ALIGN(cbGrain, 512);
5839 pExtent->uLastGrainSector = uSectorExtentAbs;
5840 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5841 pExtent->cbLastGrainWritten = cbGrain;
5842
5843 uint64_t uEOSOff = 0;
5844 if (pExtent->fFooter)
5845 {
5846 uEOSOff = 512;
5847 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5848 if (RT_FAILURE(rc))
5849 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5850 }
5851 uint8_t aEOS[512];
5852 memset(aEOS, '\0', sizeof(aEOS));
5853 rc = vmdkFileWriteAt(pExtent->pFile,
5854 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5855 aEOS, sizeof(aEOS), NULL);
5856 if (RT_FAILURE(rc))
5857 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5858 }
5859 else
5860 {
5861 rc = vmdkFileWriteAt(pExtent->pFile,
5862 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5863 pvBuf, cbToWrite, NULL);
5864 }
5865 }
5866 break;
5867 case VMDKETYPE_VMFS:
5868 case VMDKETYPE_FLAT:
5869 /* Clip write range to remain in this extent. */
5870 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5871 rc = vmdkFileWriteAt(pExtent->pFile,
5872 VMDK_SECTOR2BYTE(uSectorExtentRel),
5873 pvBuf, cbToWrite, NULL);
5874 break;
5875 case VMDKETYPE_ZERO:
5876 /* Clip write range to remain in this extent. */
5877 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5878 break;
5879 }
5880 if (pcbWriteProcess)
5881 *pcbWriteProcess = cbToWrite;
5882
5883out:
5884 LogFlowFunc(("returns %Rrc\n", rc));
5885 return rc;
5886}
5887
5888/** @copydoc VBOXHDDBACKEND::pfnFlush */
5889static int vmdkFlush(void *pBackendData)
5890{
5891 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5892 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5893 int rc;
5894
5895 AssertPtr(pImage);
5896
5897 rc = vmdkFlushImage(pImage);
5898 LogFlowFunc(("returns %Rrc\n", rc));
5899 return rc;
5900}
5901
5902/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5903static unsigned vmdkGetVersion(void *pBackendData)
5904{
5905 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5906 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5907
5908 AssertPtr(pImage);
5909
5910 if (pImage)
5911 return VMDK_IMAGE_VERSION;
5912 else
5913 return 0;
5914}
5915
5916/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5917static uint64_t vmdkGetSize(void *pBackendData)
5918{
5919 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5920 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5921
5922 AssertPtr(pImage);
5923
5924 if (pImage)
5925 return pImage->cbSize;
5926 else
5927 return 0;
5928}
5929
5930/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5931static uint64_t vmdkGetFileSize(void *pBackendData)
5932{
5933 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5934 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5935 uint64_t cb = 0;
5936
5937 AssertPtr(pImage);
5938
5939 if (pImage)
5940 {
5941 uint64_t cbFile;
5942 if (pImage->pFile != NULL)
5943 {
5944 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5945 if (RT_SUCCESS(rc))
5946 cb += cbFile;
5947 }
5948 for (unsigned i = 0; i < pImage->cExtents; i++)
5949 {
5950 if (pImage->pExtents[i].pFile != NULL)
5951 {
5952 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5953 if (RT_SUCCESS(rc))
5954 cb += cbFile;
5955 }
5956 }
5957 }
5958
5959 LogFlowFunc(("returns %lld\n", cb));
5960 return cb;
5961}
5962
5963/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5964static int vmdkGetPCHSGeometry(void *pBackendData,
5965 PPDMMEDIAGEOMETRY pPCHSGeometry)
5966{
5967 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5968 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5969 int rc;
5970
5971 AssertPtr(pImage);
5972
5973 if (pImage)
5974 {
5975 if (pImage->PCHSGeometry.cCylinders)
5976 {
5977 *pPCHSGeometry = pImage->PCHSGeometry;
5978 rc = VINF_SUCCESS;
5979 }
5980 else
5981 rc = VERR_VD_GEOMETRY_NOT_SET;
5982 }
5983 else
5984 rc = VERR_VD_NOT_OPENED;
5985
5986 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5987 return rc;
5988}
5989
5990/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5991static int vmdkSetPCHSGeometry(void *pBackendData,
5992 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5993{
5994 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5995 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5996 int rc;
5997
5998 AssertPtr(pImage);
5999
6000 if (pImage)
6001 {
6002 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6003 {
6004 rc = VERR_VD_IMAGE_READ_ONLY;
6005 goto out;
6006 }
6007 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6008 if (RT_FAILURE(rc))
6009 goto out;
6010
6011 pImage->PCHSGeometry = *pPCHSGeometry;
6012 rc = VINF_SUCCESS;
6013 }
6014 else
6015 rc = VERR_VD_NOT_OPENED;
6016
6017out:
6018 LogFlowFunc(("returns %Rrc\n", rc));
6019 return rc;
6020}
6021
6022/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
6023static int vmdkGetLCHSGeometry(void *pBackendData,
6024 PPDMMEDIAGEOMETRY pLCHSGeometry)
6025{
6026 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6027 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6028 int rc;
6029
6030 AssertPtr(pImage);
6031
6032 if (pImage)
6033 {
6034 if (pImage->LCHSGeometry.cCylinders)
6035 {
6036 *pLCHSGeometry = pImage->LCHSGeometry;
6037 rc = VINF_SUCCESS;
6038 }
6039 else
6040 rc = VERR_VD_GEOMETRY_NOT_SET;
6041 }
6042 else
6043 rc = VERR_VD_NOT_OPENED;
6044
6045 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6046 return rc;
6047}
6048
6049/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
6050static int vmdkSetLCHSGeometry(void *pBackendData,
6051 PCPDMMEDIAGEOMETRY pLCHSGeometry)
6052{
6053 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6054 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6055 int rc;
6056
6057 AssertPtr(pImage);
6058
6059 if (pImage)
6060 {
6061 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6062 {
6063 rc = VERR_VD_IMAGE_READ_ONLY;
6064 goto out;
6065 }
6066 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6067 if (RT_FAILURE(rc))
6068 goto out;
6069
6070 pImage->LCHSGeometry = *pLCHSGeometry;
6071 rc = VINF_SUCCESS;
6072 }
6073 else
6074 rc = VERR_VD_NOT_OPENED;
6075
6076out:
6077 LogFlowFunc(("returns %Rrc\n", rc));
6078 return rc;
6079}
6080
6081/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
6082static unsigned vmdkGetImageFlags(void *pBackendData)
6083{
6084 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6085 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6086 unsigned uImageFlags;
6087
6088 AssertPtr(pImage);
6089
6090 if (pImage)
6091 uImageFlags = pImage->uImageFlags;
6092 else
6093 uImageFlags = 0;
6094
6095 LogFlowFunc(("returns %#x\n", uImageFlags));
6096 return uImageFlags;
6097}
6098
6099/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
6100static unsigned vmdkGetOpenFlags(void *pBackendData)
6101{
6102 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6103 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6104 unsigned uOpenFlags;
6105
6106 AssertPtr(pImage);
6107
6108 if (pImage)
6109 uOpenFlags = pImage->uOpenFlags;
6110 else
6111 uOpenFlags = 0;
6112
6113 LogFlowFunc(("returns %#x\n", uOpenFlags));
6114 return uOpenFlags;
6115}
6116
6117/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6118static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6119{
6120 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
6121 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6122 int rc;
6123
6124 /* Image must be opened and the new flags must be valid. Just readonly and
6125 * info flags are supported. */
6126 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
6127 {
6128 rc = VERR_INVALID_PARAMETER;
6129 goto out;
6130 }
6131
6132 /* Implement this operation via reopening the image. */
6133 vmdkFreeImage(pImage, false);
6134 rc = vmdkOpenImage(pImage, uOpenFlags);
6135
6136out:
6137 LogFlowFunc(("returns %Rrc\n", rc));
6138 return rc;
6139}
6140
6141/** @copydoc VBOXHDDBACKEND::pfnGetComment */
6142static int vmdkGetComment(void *pBackendData, char *pszComment,
6143 size_t cbComment)
6144{
6145 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6146 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6147 int rc;
6148
6149 AssertPtr(pImage);
6150
6151 if (pImage)
6152 {
6153 const char *pszCommentEncoded = NULL;
6154 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6155 "ddb.comment", &pszCommentEncoded);
6156 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6157 pszCommentEncoded = NULL;
6158 else if (RT_FAILURE(rc))
6159 goto out;
6160
6161 if (pszComment && pszCommentEncoded)
6162 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6163 else
6164 {
6165 if (pszComment)
6166 *pszComment = '\0';
6167 rc = VINF_SUCCESS;
6168 }
6169 if (pszCommentEncoded)
6170 RTStrFree((char *)(void *)pszCommentEncoded);
6171 }
6172 else
6173 rc = VERR_VD_NOT_OPENED;
6174
6175out:
6176 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6177 return rc;
6178}
6179
6180/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6181static int vmdkSetComment(void *pBackendData, const char *pszComment)
6182{
6183 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6184 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6185 int rc;
6186
6187 AssertPtr(pImage);
6188
6189 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6190 {
6191 rc = VERR_VD_IMAGE_READ_ONLY;
6192 goto out;
6193 }
6194
6195 if (pImage)
6196 rc = vmdkSetImageComment(pImage, pszComment);
6197 else
6198 rc = VERR_VD_NOT_OPENED;
6199
6200out:
6201 LogFlowFunc(("returns %Rrc\n", rc));
6202 return rc;
6203}
6204
6205/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
6206static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6207{
6208 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6209 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6210 int rc;
6211
6212 AssertPtr(pImage);
6213
6214 if (pImage)
6215 {
6216 *pUuid = pImage->ImageUuid;
6217 rc = VINF_SUCCESS;
6218 }
6219 else
6220 rc = VERR_VD_NOT_OPENED;
6221
6222 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6223 return rc;
6224}
6225
6226/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6227static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6228{
6229 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6230 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6231 int rc;
6232
6233 LogFlowFunc(("%RTuuid\n", pUuid));
6234 AssertPtr(pImage);
6235
6236 if (pImage)
6237 {
6238 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6239 {
6240 pImage->ImageUuid = *pUuid;
6241 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6242 VMDK_DDB_IMAGE_UUID, pUuid);
6243 if (RT_FAILURE(rc))
6244 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6245 rc = VINF_SUCCESS;
6246 }
6247 else
6248 rc = VERR_VD_IMAGE_READ_ONLY;
6249 }
6250 else
6251 rc = VERR_VD_NOT_OPENED;
6252
6253 LogFlowFunc(("returns %Rrc\n", rc));
6254 return rc;
6255}
6256
6257/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
6258static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6259{
6260 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6261 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6262 int rc;
6263
6264 AssertPtr(pImage);
6265
6266 if (pImage)
6267 {
6268 *pUuid = pImage->ModificationUuid;
6269 rc = VINF_SUCCESS;
6270 }
6271 else
6272 rc = VERR_VD_NOT_OPENED;
6273
6274 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6275 return rc;
6276}
6277
6278/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6279static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6280{
6281 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6282 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6283 int rc;
6284
6285 AssertPtr(pImage);
6286
6287 if (pImage)
6288 {
6289 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6290 {
6291 /*
6292 * Only change the modification uuid if it changed.
6293 * Avoids a lot of unneccessary 1-byte writes during
6294 * vmdkFlush.
6295 */
6296 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6297 {
6298 pImage->ModificationUuid = *pUuid;
6299 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6300 VMDK_DDB_MODIFICATION_UUID, pUuid);
6301 if (RT_FAILURE(rc))
6302 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6303 }
6304 rc = VINF_SUCCESS;
6305 }
6306 else
6307 rc = VERR_VD_IMAGE_READ_ONLY;
6308 }
6309 else
6310 rc = VERR_VD_NOT_OPENED;
6311
6312 LogFlowFunc(("returns %Rrc\n", rc));
6313 return rc;
6314}
6315
6316/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
6317static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6318{
6319 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6320 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6321 int rc;
6322
6323 AssertPtr(pImage);
6324
6325 if (pImage)
6326 {
6327 *pUuid = pImage->ParentUuid;
6328 rc = VINF_SUCCESS;
6329 }
6330 else
6331 rc = VERR_VD_NOT_OPENED;
6332
6333 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6334 return rc;
6335}
6336
6337/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6338static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6339{
6340 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6341 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6342 int rc;
6343
6344 AssertPtr(pImage);
6345
6346 if (pImage)
6347 {
6348 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6349 {
6350 pImage->ParentUuid = *pUuid;
6351 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6352 VMDK_DDB_PARENT_UUID, pUuid);
6353 if (RT_FAILURE(rc))
6354 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6355 rc = VINF_SUCCESS;
6356 }
6357 else
6358 rc = VERR_VD_IMAGE_READ_ONLY;
6359 }
6360 else
6361 rc = VERR_VD_NOT_OPENED;
6362
6363 LogFlowFunc(("returns %Rrc\n", rc));
6364 return rc;
6365}
6366
6367/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
6368static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6369{
6370 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6371 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6372 int rc;
6373
6374 AssertPtr(pImage);
6375
6376 if (pImage)
6377 {
6378 *pUuid = pImage->ParentModificationUuid;
6379 rc = VINF_SUCCESS;
6380 }
6381 else
6382 rc = VERR_VD_NOT_OPENED;
6383
6384 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6385 return rc;
6386}
6387
6388/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6389static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6390{
6391 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6392 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6393 int rc;
6394
6395 AssertPtr(pImage);
6396
6397 if (pImage)
6398 {
6399 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6400 {
6401 pImage->ParentModificationUuid = *pUuid;
6402 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6403 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6404 if (RT_FAILURE(rc))
6405 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6406 rc = VINF_SUCCESS;
6407 }
6408 else
6409 rc = VERR_VD_IMAGE_READ_ONLY;
6410 }
6411 else
6412 rc = VERR_VD_NOT_OPENED;
6413
6414 LogFlowFunc(("returns %Rrc\n", rc));
6415 return rc;
6416}
6417
6418/** @copydoc VBOXHDDBACKEND::pfnDump */
6419static void vmdkDump(void *pBackendData)
6420{
6421 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6422
6423 AssertPtr(pImage);
6424 if (pImage)
6425 {
6426 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6427 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6428 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6429 VMDK_BYTE2SECTOR(pImage->cbSize));
6430 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6431 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6432 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6433 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6434 }
6435}
6436
6437
6438static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6439{
6440 int rc = VERR_NOT_IMPLEMENTED;
6441 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6442 return rc;
6443}
6444
6445static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6446{
6447 int rc = VERR_NOT_IMPLEMENTED;
6448 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6449 return rc;
6450}
6451
6452static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
6453{
6454 int rc = VERR_NOT_IMPLEMENTED;
6455 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6456 return rc;
6457}
6458
6459static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
6460{
6461 int rc = VERR_NOT_IMPLEMENTED;
6462 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6463 return rc;
6464}
6465
6466static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
6467{
6468 int rc = VERR_NOT_IMPLEMENTED;
6469 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6470 return rc;
6471}
6472
6473static bool vmdkIsAsyncIOSupported(void *pvBackendData)
6474{
6475 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6476
6477 /* We do not support async I/O for stream optimized VMDK images. */
6478 return (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) == 0;
6479}
6480
6481static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
6482 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6483{
6484 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6485 pvBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
6486 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6487 PVMDKEXTENT pExtent;
6488 uint64_t uSectorExtentRel;
6489 uint64_t uSectorExtentAbs;
6490 int rc;
6491
6492 AssertPtr(pImage);
6493 Assert(uOffset % 512 == 0);
6494 Assert(cbRead % 512 == 0);
6495
6496 if ( uOffset + cbRead > pImage->cbSize
6497 || cbRead == 0)
6498 {
6499 rc = VERR_INVALID_PARAMETER;
6500 goto out;
6501 }
6502
6503 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6504 &pExtent, &uSectorExtentRel);
6505 if (RT_FAILURE(rc))
6506 goto out;
6507
6508 /* Check access permissions as defined in the extent descriptor. */
6509 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6510 {
6511 rc = VERR_VD_VMDK_INVALID_STATE;
6512 goto out;
6513 }
6514
6515 /* Clip read range to remain in this extent. */
6516 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6517
6518 /* Handle the read according to the current extent type. */
6519 switch (pExtent->enmType)
6520 {
6521 case VMDKETYPE_HOSTED_SPARSE:
6522#ifdef VBOX_WITH_VMDK_ESX
6523 case VMDKETYPE_ESX_SPARSE:
6524#endif /* VBOX_WITH_VMDK_ESX */
6525 rc = vmdkGetSectorAsync(pImage, pIoCtx, pImage->pGTCache, pExtent,
6526 uSectorExtentRel, &uSectorExtentAbs);
6527 if (RT_FAILURE(rc))
6528 goto out;
6529 /* Clip read range to at most the rest of the grain. */
6530 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6531 Assert(!(cbRead % 512));
6532 if (uSectorExtentAbs == 0)
6533 rc = VERR_VD_BLOCK_FREE;
6534 else
6535 {
6536 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
6537 rc = pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
6538 pExtent->pFile->pStorage,
6539 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6540 pIoCtx, cbRead);
6541 }
6542 break;
6543 case VMDKETYPE_VMFS:
6544 case VMDKETYPE_FLAT:
6545 rc = pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
6546 pExtent->pFile->pStorage,
6547 VMDK_SECTOR2BYTE(uSectorExtentRel),
6548 pIoCtx, cbRead);
6549 break;
6550 case VMDKETYPE_ZERO:
6551 size_t cbSet;
6552
6553 cbSet = pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
6554 pIoCtx, 0, cbRead);
6555 Assert(cbSet == cbRead);
6556
6557 rc = VINF_SUCCESS;
6558 break;
6559 }
6560 if (pcbActuallyRead)
6561 *pcbActuallyRead = cbRead;
6562
6563out:
6564 LogFlowFunc(("returns %Rrc\n", rc));
6565 return rc;
6566}
6567
6568static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
6569 PVDIOCTX pIoCtx,
6570 size_t *pcbWriteProcess, size_t *pcbPreRead,
6571 size_t *pcbPostRead, unsigned fWrite)
6572{
6573 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6574 pvBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6575 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6576 PVMDKEXTENT pExtent;
6577 uint64_t uSectorExtentRel;
6578 uint64_t uSectorExtentAbs;
6579 int rc;
6580
6581 AssertPtr(pImage);
6582 Assert(uOffset % 512 == 0);
6583 Assert(cbWrite % 512 == 0);
6584
6585 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6586 {
6587 rc = VERR_VD_IMAGE_READ_ONLY;
6588 goto out;
6589 }
6590
6591 if (cbWrite == 0)
6592 {
6593 rc = VERR_INVALID_PARAMETER;
6594 goto out;
6595 }
6596
6597 /* No size check here, will do that later when the extent is located.
6598 * There are sparse images out there which according to the spec are
6599 * invalid, because the total size is not a multiple of the grain size.
6600 * Also for sparse images which are stitched together in odd ways (not at
6601 * grain boundaries, and with the nominal size not being a multiple of the
6602 * grain size), this would prevent writing to the last grain. */
6603
6604 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6605 &pExtent, &uSectorExtentRel);
6606 if (RT_FAILURE(rc))
6607 goto out;
6608
6609 /* Check access permissions as defined in the extent descriptor. */
6610 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6611 {
6612 rc = VERR_VD_VMDK_INVALID_STATE;
6613 goto out;
6614 }
6615
6616 /* Handle the write according to the current extent type. */
6617 switch (pExtent->enmType)
6618 {
6619 case VMDKETYPE_HOSTED_SPARSE:
6620#ifdef VBOX_WITH_VMDK_ESX
6621 case VMDKETYPE_ESX_SPARSE:
6622#endif /* VBOX_WITH_VMDK_ESX */
6623 rc = vmdkGetSectorAsync(pImage, pIoCtx, pImage->pGTCache, pExtent, uSectorExtentRel,
6624 &uSectorExtentAbs);
6625 if (RT_FAILURE(rc))
6626 goto out;
6627 /* Clip write range to at most the rest of the grain. */
6628 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6629 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6630 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
6631 {
6632 rc = VERR_VD_VMDK_INVALID_WRITE;
6633 goto out;
6634 }
6635 if (uSectorExtentAbs == 0)
6636 {
6637 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6638 {
6639 /* Full block write to a previously unallocated block.
6640 * Check if the caller wants to avoid the automatic alloc. */
6641 if (!(fWrite & VD_WRITE_NO_ALLOC))
6642 {
6643 /* Allocate GT and find out where to store the grain. */
6644 rc = vmdkAllocGrainAsync(pImage->pGTCache, pExtent, pIoCtx,
6645 uSectorExtentRel, cbWrite);
6646 }
6647 else
6648 rc = VERR_VD_BLOCK_FREE;
6649 *pcbPreRead = 0;
6650 *pcbPostRead = 0;
6651 }
6652 else
6653 {
6654 /* Clip write range to remain in this extent. */
6655 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6656 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6657 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
6658 rc = VERR_VD_BLOCK_FREE;
6659 }
6660 }
6661 else
6662 {
6663 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
6664 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
6665 pExtent->pFile->pStorage,
6666 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6667 pIoCtx, cbWrite,
6668 NULL, NULL);
6669 }
6670 break;
6671 case VMDKETYPE_VMFS:
6672 case VMDKETYPE_FLAT:
6673 /* Clip write range to remain in this extent. */
6674 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6675 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
6676 pExtent->pFile->pStorage,
6677 VMDK_SECTOR2BYTE(uSectorExtentRel),
6678 pIoCtx, cbWrite, NULL, NULL);
6679 break;
6680 case VMDKETYPE_ZERO:
6681 /* Clip write range to remain in this extent. */
6682 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6683 break;
6684 }
6685 if (pcbWriteProcess)
6686 *pcbWriteProcess = cbWrite;
6687
6688out:
6689 LogFlowFunc(("returns %Rrc\n", rc));
6690 return rc;
6691}
6692
6693static int vmdkAsyncFlush(void *pvBackendData, PVDIOCTX pIoCtx)
6694{
6695 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6696 PVMDKEXTENT pExtent;
6697 int rc = VINF_SUCCESS;
6698
6699 for (unsigned i = 0; i < pImage->cExtents; i++)
6700 {
6701 pExtent = &pImage->pExtents[i];
6702 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6703 {
6704 switch (pExtent->enmType)
6705 {
6706 case VMDKETYPE_HOSTED_SPARSE:
6707#ifdef VBOX_WITH_VMDK_ESX
6708 case VMDKETYPE_ESX_SPARSE:
6709#endif /* VBOX_WITH_VMDK_ESX */
6710 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
6711 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6712 goto out;
6713 if (pExtent->fFooter)
6714 {
6715 uint64_t cbSize;
6716 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
6717 if (RT_FAILURE(rc))
6718 goto out;
6719 cbSize = RT_ALIGN_64(cbSize, 512);
6720 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
6721 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
6722 goto out;
6723 }
6724 break;
6725 case VMDKETYPE_VMFS:
6726 case VMDKETYPE_FLAT:
6727 /* Nothing to do. */
6728 break;
6729 case VMDKETYPE_ZERO:
6730 default:
6731 AssertMsgFailed(("extent with type %d marked as dirty\n",
6732 pExtent->enmType));
6733 break;
6734 }
6735 }
6736 switch (pExtent->enmType)
6737 {
6738 case VMDKETYPE_HOSTED_SPARSE:
6739#ifdef VBOX_WITH_VMDK_ESX
6740 case VMDKETYPE_ESX_SPARSE:
6741#endif /* VBOX_WITH_VMDK_ESX */
6742 case VMDKETYPE_VMFS:
6743 case VMDKETYPE_FLAT:
6744 /** @todo implement proper path absolute check. */
6745 if ( pExtent->pFile != NULL
6746 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6747 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6748 rc = vmdkFileFlushAsync(pExtent->pFile, pIoCtx);
6749 break;
6750 case VMDKETYPE_ZERO:
6751 /* No need to do anything for this extent. */
6752 break;
6753 default:
6754 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6755 break;
6756 }
6757 }
6758
6759out:
6760 return rc;
6761}
6762
6763
6764VBOXHDDBACKEND g_VmdkBackend =
6765{
6766 /* pszBackendName */
6767 "VMDK",
6768 /* cbSize */
6769 sizeof(VBOXHDDBACKEND),
6770 /* uBackendCaps */
6771 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6772 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6773 /* papszFileExtensions */
6774 s_apszVmdkFileExtensions,
6775 /* paConfigInfo */
6776 NULL,
6777 /* hPlugin */
6778 NIL_RTLDRMOD,
6779 /* pfnCheckIfValid */
6780 vmdkCheckIfValid,
6781 /* pfnOpen */
6782 vmdkOpen,
6783 /* pfnCreate */
6784 vmdkCreate,
6785 /* pfnRename */
6786 vmdkRename,
6787 /* pfnClose */
6788 vmdkClose,
6789 /* pfnRead */
6790 vmdkRead,
6791 /* pfnWrite */
6792 vmdkWrite,
6793 /* pfnFlush */
6794 vmdkFlush,
6795 /* pfnGetVersion */
6796 vmdkGetVersion,
6797 /* pfnGetSize */
6798 vmdkGetSize,
6799 /* pfnGetFileSize */
6800 vmdkGetFileSize,
6801 /* pfnGetPCHSGeometry */
6802 vmdkGetPCHSGeometry,
6803 /* pfnSetPCHSGeometry */
6804 vmdkSetPCHSGeometry,
6805 /* pfnGetLCHSGeometry */
6806 vmdkGetLCHSGeometry,
6807 /* pfnSetLCHSGeometry */
6808 vmdkSetLCHSGeometry,
6809 /* pfnGetImageFlags */
6810 vmdkGetImageFlags,
6811 /* pfnGetOpenFlags */
6812 vmdkGetOpenFlags,
6813 /* pfnSetOpenFlags */
6814 vmdkSetOpenFlags,
6815 /* pfnGetComment */
6816 vmdkGetComment,
6817 /* pfnSetComment */
6818 vmdkSetComment,
6819 /* pfnGetUuid */
6820 vmdkGetUuid,
6821 /* pfnSetUuid */
6822 vmdkSetUuid,
6823 /* pfnGetModificationUuid */
6824 vmdkGetModificationUuid,
6825 /* pfnSetModificationUuid */
6826 vmdkSetModificationUuid,
6827 /* pfnGetParentUuid */
6828 vmdkGetParentUuid,
6829 /* pfnSetParentUuid */
6830 vmdkSetParentUuid,
6831 /* pfnGetParentModificationUuid */
6832 vmdkGetParentModificationUuid,
6833 /* pfnSetParentModificationUuid */
6834 vmdkSetParentModificationUuid,
6835 /* pfnDump */
6836 vmdkDump,
6837 /* pfnGetTimeStamp */
6838 vmdkGetTimeStamp,
6839 /* pfnGetParentTimeStamp */
6840 vmdkGetParentTimeStamp,
6841 /* pfnSetParentTimeStamp */
6842 vmdkSetParentTimeStamp,
6843 /* pfnGetParentFilename */
6844 vmdkGetParentFilename,
6845 /* pfnSetParentFilename */
6846 vmdkSetParentFilename,
6847 /* pfnIsAsyncIOSupported */
6848 vmdkIsAsyncIOSupported,
6849 /* pfnAsyncRead */
6850 vmdkAsyncRead,
6851 /* pfnAsyncWrite */
6852 vmdkAsyncWrite,
6853 /* pfnAsyncFlush */
6854 vmdkAsyncFlush,
6855 /* pfnComposeLocation */
6856 genericFileComposeLocation,
6857 /* pfnComposeName */
6858 genericFileComposeName
6859};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette