VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 25823

Last change on this file since 25823 was 25823, checked in by vboxsync, 15 years ago

Devices: more -Wshadow

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 222.0 KB
Line 
1/* $Id: VmdkHDDCore.cpp 25823 2010-01-14 09:10:56Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VD_VMDK
26#include <VBox/VBoxHDD-Plugin.h>
27#include <VBox/err.h>
28
29#include <VBox/log.h>
30#include <iprt/assert.h>
31#include <iprt/alloc.h>
32#include <iprt/uuid.h>
33#include <iprt/file.h>
34#include <iprt/path.h>
35#include <iprt/string.h>
36#include <iprt/rand.h>
37#include <iprt/zip.h>
38
39
40/*******************************************************************************
41* Constants And Macros, Structures and Typedefs *
42*******************************************************************************/
43
44/** Maximum encoded string size (including NUL) we allow for VMDK images.
45 * Deliberately not set high to avoid running out of descriptor space. */
46#define VMDK_ENCODED_COMMENT_MAX 1024
47
48/** VMDK descriptor DDB entry for PCHS cylinders. */
49#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
50
51/** VMDK descriptor DDB entry for PCHS heads. */
52#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
53
54/** VMDK descriptor DDB entry for PCHS sectors. */
55#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
56
57/** VMDK descriptor DDB entry for LCHS cylinders. */
58#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
59
60/** VMDK descriptor DDB entry for LCHS heads. */
61#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
62
63/** VMDK descriptor DDB entry for LCHS sectors. */
64#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
65
66/** VMDK descriptor DDB entry for image UUID. */
67#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
68
69/** VMDK descriptor DDB entry for image modification UUID. */
70#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
71
72/** VMDK descriptor DDB entry for parent image UUID. */
73#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
74
75/** VMDK descriptor DDB entry for parent image modification UUID. */
76#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
77
78/** No compression for streamOptimized files. */
79#define VMDK_COMPRESSION_NONE 0
80
81/** Deflate compression for streamOptimized files. */
82#define VMDK_COMPRESSION_DEFLATE 1
83
84/** Marker that the actual GD value is stored in the footer. */
85#define VMDK_GD_AT_END 0xffffffffffffffffULL
86
87/** Marker for end-of-stream in streamOptimized images. */
88#define VMDK_MARKER_EOS 0
89
90/** Marker for grain table block in streamOptimized images. */
91#define VMDK_MARKER_GT 1
92
93/** Marker for grain directory block in streamOptimized images. */
94#define VMDK_MARKER_GD 2
95
96/** Marker for footer in streamOptimized images. */
97#define VMDK_MARKER_FOOTER 3
98
99/** Dummy marker for "don't check the marker value". */
100#define VMDK_MARKER_IGNORE 0xffffffffU
101
102/**
103 * Magic number for hosted images created by VMware Workstation 4, VMware
104 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
105 */
106#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
107
108/**
109 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
110 * this header is also used for monolithic flat images.
111 */
112#pragma pack(1)
113typedef struct SparseExtentHeader
114{
115 uint32_t magicNumber;
116 uint32_t version;
117 uint32_t flags;
118 uint64_t capacity;
119 uint64_t grainSize;
120 uint64_t descriptorOffset;
121 uint64_t descriptorSize;
122 uint32_t numGTEsPerGT;
123 uint64_t rgdOffset;
124 uint64_t gdOffset;
125 uint64_t overHead;
126 bool uncleanShutdown;
127 char singleEndLineChar;
128 char nonEndLineChar;
129 char doubleEndLineChar1;
130 char doubleEndLineChar2;
131 uint16_t compressAlgorithm;
132 uint8_t pad[433];
133} SparseExtentHeader;
134#pragma pack()
135
136/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
137 * divisible by the default grain size (64K) */
138#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
139
140/** VMDK streamOptimized file format marker. The type field may or may not
141 * be actually valid, but there's always data to read there. */
142#pragma pack(1)
143typedef struct VMDKMARKER
144{
145 uint64_t uSector;
146 uint32_t cbSize;
147 uint32_t uType;
148} VMDKMARKER;
149#pragma pack()
150
151
152#ifdef VBOX_WITH_VMDK_ESX
153
154/** @todo the ESX code is not tested, not used, and lacks error messages. */
155
156/**
157 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
158 */
159#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
160
161#pragma pack(1)
162typedef struct COWDisk_Header
163{
164 uint32_t magicNumber;
165 uint32_t version;
166 uint32_t flags;
167 uint32_t numSectors;
168 uint32_t grainSize;
169 uint32_t gdOffset;
170 uint32_t numGDEntries;
171 uint32_t freeSector;
172 /* The spec incompletely documents quite a few further fields, but states
173 * that they are unused by the current format. Replace them by padding. */
174 char reserved1[1604];
175 uint32_t savedGeneration;
176 char reserved2[8];
177 uint32_t uncleanShutdown;
178 char padding[396];
179} COWDisk_Header;
180#pragma pack()
181#endif /* VBOX_WITH_VMDK_ESX */
182
183
184/** Convert sector number/size to byte offset/size. */
185#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
186
187/** Convert byte offset/size to sector number/size. */
188#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
189
190/**
191 * VMDK extent type.
192 */
193typedef enum VMDKETYPE
194{
195 /** Hosted sparse extent. */
196 VMDKETYPE_HOSTED_SPARSE = 1,
197 /** Flat extent. */
198 VMDKETYPE_FLAT,
199 /** Zero extent. */
200 VMDKETYPE_ZERO,
201 /** VMFS extent, used by ESX. */
202 VMDKETYPE_VMFS
203#ifdef VBOX_WITH_VMDK_ESX
204 ,
205 /** ESX sparse extent. */
206 VMDKETYPE_ESX_SPARSE
207#endif /* VBOX_WITH_VMDK_ESX */
208} VMDKETYPE, *PVMDKETYPE;
209
210/**
211 * VMDK access type for a extent.
212 */
213typedef enum VMDKACCESS
214{
215 /** No access allowed. */
216 VMDKACCESS_NOACCESS = 0,
217 /** Read-only access. */
218 VMDKACCESS_READONLY,
219 /** Read-write access. */
220 VMDKACCESS_READWRITE
221} VMDKACCESS, *PVMDKACCESS;
222
223/** Forward declaration for PVMDKIMAGE. */
224typedef struct VMDKIMAGE *PVMDKIMAGE;
225
226/**
227 * Extents files entry. Used for opening a particular file only once.
228 */
229typedef struct VMDKFILE
230{
231 /** Pointer to filename. Local copy. */
232 const char *pszFilename;
233 /** File open flags for consistency checking. */
234 unsigned fOpen;
235 /** File handle. */
236 RTFILE File;
237 /** Handle for asnychronous access if requested.*/
238 void *pStorage;
239 /** Flag whether to use File or pStorage. */
240 bool fAsyncIO;
241 /** Reference counter. */
242 unsigned uReferences;
243 /** Flag whether the file should be deleted on last close. */
244 bool fDelete;
245 /** Pointer to the image we belong to. */
246 PVMDKIMAGE pImage;
247 /** Pointer to next file descriptor. */
248 struct VMDKFILE *pNext;
249 /** Pointer to the previous file descriptor. */
250 struct VMDKFILE *pPrev;
251} VMDKFILE, *PVMDKFILE;
252
253/**
254 * VMDK extent data structure.
255 */
256typedef struct VMDKEXTENT
257{
258 /** File handle. */
259 PVMDKFILE pFile;
260 /** Base name of the image extent. */
261 const char *pszBasename;
262 /** Full name of the image extent. */
263 const char *pszFullname;
264 /** Number of sectors in this extent. */
265 uint64_t cSectors;
266 /** Number of sectors per block (grain in VMDK speak). */
267 uint64_t cSectorsPerGrain;
268 /** Starting sector number of descriptor. */
269 uint64_t uDescriptorSector;
270 /** Size of descriptor in sectors. */
271 uint64_t cDescriptorSectors;
272 /** Starting sector number of grain directory. */
273 uint64_t uSectorGD;
274 /** Starting sector number of redundant grain directory. */
275 uint64_t uSectorRGD;
276 /** Total number of metadata sectors. */
277 uint64_t cOverheadSectors;
278 /** Nominal size (i.e. as described by the descriptor) of this extent. */
279 uint64_t cNominalSectors;
280 /** Sector offset (i.e. as described by the descriptor) of this extent. */
281 uint64_t uSectorOffset;
282 /** Number of entries in a grain table. */
283 uint32_t cGTEntries;
284 /** Number of sectors reachable via a grain directory entry. */
285 uint32_t cSectorsPerGDE;
286 /** Number of entries in the grain directory. */
287 uint32_t cGDEntries;
288 /** Pointer to the next free sector. Legacy information. Do not use. */
289 uint32_t uFreeSector;
290 /** Number of this extent in the list of images. */
291 uint32_t uExtent;
292 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
293 char *pDescData;
294 /** Pointer to the grain directory. */
295 uint32_t *pGD;
296 /** Pointer to the redundant grain directory. */
297 uint32_t *pRGD;
298 /** VMDK version of this extent. 1=1.0/1.1 */
299 uint32_t uVersion;
300 /** Type of this extent. */
301 VMDKETYPE enmType;
302 /** Access to this extent. */
303 VMDKACCESS enmAccess;
304 /** Flag whether this extent is marked as unclean. */
305 bool fUncleanShutdown;
306 /** Flag whether the metadata in the extent header needs to be updated. */
307 bool fMetaDirty;
308 /** Flag whether there is a footer in this extent. */
309 bool fFooter;
310 /** Compression type for this extent. */
311 uint16_t uCompression;
312 /** Last grain which has been written to. Only for streamOptimized extents. */
313 uint32_t uLastGrainWritten;
314 /** Sector number of last grain which has been written to. Only for
315 * streamOptimized extents. */
316 uint32_t uLastGrainSector;
317 /** Data size of last grain which has been written to. Only for
318 * streamOptimized extents. */
319 uint32_t cbLastGrainWritten;
320 /** Starting sector of the decompressed grain buffer. */
321 uint32_t uGrainSector;
322 /** Decompressed grain buffer for streamOptimized extents. */
323 void *pvGrain;
324 /** Reference to the image in which this extent is used. Do not use this
325 * on a regular basis to avoid passing pImage references to functions
326 * explicitly. */
327 struct VMDKIMAGE *pImage;
328} VMDKEXTENT, *PVMDKEXTENT;
329
330/**
331 * Grain table cache size. Allocated per image.
332 */
333#define VMDK_GT_CACHE_SIZE 256
334
335/**
336 * Grain table block size. Smaller than an actual grain table block to allow
337 * more grain table blocks to be cached without having to allocate excessive
338 * amounts of memory for the cache.
339 */
340#define VMDK_GT_CACHELINE_SIZE 128
341
342
343/**
344 * Maximum number of lines in a descriptor file. Not worth the effort of
345 * making it variable. Descriptor files are generally very short (~20 lines),
346 * with the exception of sparse files split in 2G chunks, which need for the
347 * maximum size (almost 2T) exactly 1025 lines for the disk database.
348 */
349#define VMDK_DESCRIPTOR_LINES_MAX 1100U
350
351/**
352 * Parsed descriptor information. Allows easy access and update of the
353 * descriptor (whether separate file or not). Free form text files suck.
354 */
355typedef struct VMDKDESCRIPTOR
356{
357 /** Line number of first entry of the disk descriptor. */
358 unsigned uFirstDesc;
359 /** Line number of first entry in the extent description. */
360 unsigned uFirstExtent;
361 /** Line number of first disk database entry. */
362 unsigned uFirstDDB;
363 /** Total number of lines. */
364 unsigned cLines;
365 /** Total amount of memory available for the descriptor. */
366 size_t cbDescAlloc;
367 /** Set if descriptor has been changed and not yet written to disk. */
368 bool fDirty;
369 /** Array of pointers to the data in the descriptor. */
370 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
371 /** Array of line indices pointing to the next non-comment line. */
372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
373} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
374
375
376/**
377 * Cache entry for translating extent/sector to a sector number in that
378 * extent.
379 */
380typedef struct VMDKGTCACHEENTRY
381{
382 /** Extent number for which this entry is valid. */
383 uint32_t uExtent;
384 /** GT data block number. */
385 uint64_t uGTBlock;
386 /** Data part of the cache entry. */
387 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
388} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
389
390/**
391 * Cache data structure for blocks of grain table entries. For now this is a
392 * fixed size direct mapping cache, but this should be adapted to the size of
393 * the sparse image and maybe converted to a set-associative cache. The
394 * implementation below implements a write-through cache with write allocate.
395 */
396typedef struct VMDKGTCACHE
397{
398 /** Cache entries. */
399 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
400 /** Number of cache entries (currently unused). */
401 unsigned cEntries;
402} VMDKGTCACHE, *PVMDKGTCACHE;
403
404/**
405 * Complete VMDK image data structure. Mainly a collection of extents and a few
406 * extra global data fields.
407 */
408typedef struct VMDKIMAGE
409{
410 /** Pointer to the image extents. */
411 PVMDKEXTENT pExtents;
412 /** Number of image extents. */
413 unsigned cExtents;
414 /** Pointer to the files list, for opening a file referenced multiple
415 * times only once (happens mainly with raw partition access). */
416 PVMDKFILE pFiles;
417
418 /** Base image name. */
419 const char *pszFilename;
420 /** Descriptor file if applicable. */
421 PVMDKFILE pFile;
422
423 /** Pointer to the per-disk VD interface list. */
424 PVDINTERFACE pVDIfsDisk;
425
426 /** Error interface. */
427 PVDINTERFACE pInterfaceError;
428 /** Error interface callbacks. */
429 PVDINTERFACEERROR pInterfaceErrorCallbacks;
430
431 /** Async I/O interface. */
432 PVDINTERFACE pInterfaceAsyncIO;
433 /** Async I/O interface callbacks. */
434 PVDINTERFACEASYNCIO pInterfaceAsyncIOCallbacks;
435 /**
436 * Pointer to an array of segment entries for async I/O.
437 * This is an optimization because the task number to submit is not known
438 * and allocating/freeing an array in the read/write functions every time
439 * is too expensive.
440 */
441 PPDMDATASEG paSegments;
442 /** Entries available in the segments array. */
443 unsigned cSegments;
444
445 /** Open flags passed by VBoxHD layer. */
446 unsigned uOpenFlags;
447 /** Image flags defined during creation or determined during open. */
448 unsigned uImageFlags;
449 /** Total size of the image. */
450 uint64_t cbSize;
451 /** Physical geometry of this image. */
452 PDMMEDIAGEOMETRY PCHSGeometry;
453 /** Logical geometry of this image. */
454 PDMMEDIAGEOMETRY LCHSGeometry;
455 /** Image UUID. */
456 RTUUID ImageUuid;
457 /** Image modification UUID. */
458 RTUUID ModificationUuid;
459 /** Parent image UUID. */
460 RTUUID ParentUuid;
461 /** Parent image modification UUID. */
462 RTUUID ParentModificationUuid;
463
464 /** Pointer to grain table cache, if this image contains sparse extents. */
465 PVMDKGTCACHE pGTCache;
466 /** Pointer to the descriptor (NULL if no separate descriptor file). */
467 char *pDescData;
468 /** Allocation size of the descriptor file. */
469 size_t cbDescAlloc;
470 /** Parsed descriptor file content. */
471 VMDKDESCRIPTOR Descriptor;
472} VMDKIMAGE;
473
474
475/** State for the input callout of the inflate reader. */
476typedef struct VMDKINFLATESTATE
477{
478 /* File where the data is stored. */
479 RTFILE File;
480 /* Total size of the data to read. */
481 size_t cbSize;
482 /* Offset in the file to read. */
483 uint64_t uFileOffset;
484 /* Current read position. */
485 ssize_t iOffset;
486} VMDKINFLATESTATE;
487
488/** State for the output callout of the deflate writer. */
489typedef struct VMDKDEFLATESTATE
490{
491 /* File where the data is to be stored. */
492 RTFILE File;
493 /* Offset in the file to write at. */
494 uint64_t uFileOffset;
495 /* Current write position. */
496 ssize_t iOffset;
497} VMDKDEFLATESTATE;
498
499/*******************************************************************************
500 * Static Variables *
501 *******************************************************************************/
502
503/** NULL-terminated array of supported file extensions. */
504static const char *const s_apszVmdkFileExtensions[] =
505{
506 "vmdk",
507 NULL
508};
509
510/*******************************************************************************
511* Internal Functions *
512*******************************************************************************/
513
514static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
515
516static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
517 bool fDelete);
518
519static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
520static int vmdkFlushImage(PVMDKIMAGE pImage);
521static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
522static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
523
524
525/**
526 * Internal: signal an error to the frontend.
527 */
528DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
529 const char *pszFormat, ...)
530{
531 va_list va;
532 va_start(va, pszFormat);
533 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
534 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
535 pszFormat, va);
536 va_end(va);
537 return rc;
538}
539
540/**
541 * Internal: open a file (using a file descriptor cache to ensure each file
542 * is only opened once - anything else can cause locking problems).
543 */
544static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
545 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
546{
547 int rc = VINF_SUCCESS;
548 PVMDKFILE pVmdkFile;
549
550 for (pVmdkFile = pImage->pFiles;
551 pVmdkFile != NULL;
552 pVmdkFile = pVmdkFile->pNext)
553 {
554 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
555 {
556 Assert(fOpen == pVmdkFile->fOpen);
557 pVmdkFile->uReferences++;
558
559 *ppVmdkFile = pVmdkFile;
560
561 return rc;
562 }
563 }
564
565 /* If we get here, there's no matching entry in the cache. */
566 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
567 if (!VALID_PTR(pVmdkFile))
568 {
569 *ppVmdkFile = NULL;
570 return VERR_NO_MEMORY;
571 }
572
573 pVmdkFile->pszFilename = RTStrDup(pszFilename);
574 if (!VALID_PTR(pVmdkFile->pszFilename))
575 {
576 RTMemFree(pVmdkFile);
577 *ppVmdkFile = NULL;
578 return VERR_NO_MEMORY;
579 }
580 pVmdkFile->fOpen = fOpen;
581 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
582 {
583 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
584 pszFilename,
585 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
586 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
587 : 0,
588 NULL,
589 &pVmdkFile->pStorage);
590 pVmdkFile->fAsyncIO = true;
591 }
592 else
593 {
594 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
595 pVmdkFile->fAsyncIO = false;
596 }
597 if (RT_SUCCESS(rc))
598 {
599 pVmdkFile->uReferences = 1;
600 pVmdkFile->pImage = pImage;
601 pVmdkFile->pNext = pImage->pFiles;
602 if (pImage->pFiles)
603 pImage->pFiles->pPrev = pVmdkFile;
604 pImage->pFiles = pVmdkFile;
605 *ppVmdkFile = pVmdkFile;
606 }
607 else
608 {
609 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
610 RTMemFree(pVmdkFile);
611 *ppVmdkFile = NULL;
612 }
613
614 return rc;
615}
616
617/**
618 * Internal: close a file, updating the file descriptor cache.
619 */
620static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
621{
622 int rc = VINF_SUCCESS;
623 PVMDKFILE pVmdkFile = *ppVmdkFile;
624
625 AssertPtr(pVmdkFile);
626
627 pVmdkFile->fDelete |= fDelete;
628 Assert(pVmdkFile->uReferences);
629 pVmdkFile->uReferences--;
630 if (pVmdkFile->uReferences == 0)
631 {
632 PVMDKFILE pPrev;
633 PVMDKFILE pNext;
634
635 /* Unchain the element from the list. */
636 pPrev = pVmdkFile->pPrev;
637 pNext = pVmdkFile->pNext;
638
639 if (pNext)
640 pNext->pPrev = pPrev;
641 if (pPrev)
642 pPrev->pNext = pNext;
643 else
644 pImage->pFiles = pNext;
645
646 if (pVmdkFile->fAsyncIO)
647 {
648 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
649 pVmdkFile->pStorage);
650 }
651 else
652 {
653 rc = RTFileClose(pVmdkFile->File);
654 }
655 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
656 rc = RTFileDelete(pVmdkFile->pszFilename);
657 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
658 RTMemFree(pVmdkFile);
659 }
660
661 *ppVmdkFile = NULL;
662 return rc;
663}
664
665/**
666 * Internal: read from a file distinguishing between async and normal operation
667 */
668DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
669 uint64_t uOffset, void *pvBuf,
670 size_t cbToRead, size_t *pcbRead)
671{
672 PVMDKIMAGE pImage = pVmdkFile->pImage;
673
674 if (pVmdkFile->fAsyncIO)
675 return pImage->pInterfaceAsyncIOCallbacks->pfnReadSync(pImage->pInterfaceAsyncIO->pvUser,
676 pVmdkFile->pStorage, uOffset,
677 cbToRead, pvBuf, pcbRead);
678 else
679 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
680}
681
682/**
683 * Internal: write to a file distinguishing between async and normal operation
684 */
685DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
686 uint64_t uOffset, const void *pvBuf,
687 size_t cbToWrite, size_t *pcbWritten)
688{
689 PVMDKIMAGE pImage = pVmdkFile->pImage;
690
691 if (pVmdkFile->fAsyncIO)
692 return pImage->pInterfaceAsyncIOCallbacks->pfnWriteSync(pImage->pInterfaceAsyncIO->pvUser,
693 pVmdkFile->pStorage, uOffset,
694 cbToWrite, pvBuf, pcbWritten);
695 else
696 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
697}
698
699/**
700 * Internal: get the size of a file distinguishing beween async and normal operation
701 */
702DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
703{
704 PVMDKIMAGE pImage = pVmdkFile->pImage;
705
706 if (pVmdkFile->fAsyncIO)
707 {
708 return pImage->pInterfaceAsyncIOCallbacks->pfnGetSize(pImage->pInterfaceAsyncIO->pvUser,
709 pVmdkFile->pStorage,
710 pcbSize);
711 }
712 else
713 return RTFileGetSize(pVmdkFile->File, pcbSize);
714}
715
716/**
717 * Internal: set the size of a file distinguishing beween async and normal operation
718 */
719DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
720{
721 PVMDKIMAGE pImage = pVmdkFile->pImage;
722
723 if (pVmdkFile->fAsyncIO)
724 {
725 return pImage->pInterfaceAsyncIOCallbacks->pfnSetSize(pImage->pInterfaceAsyncIO->pvUser,
726 pVmdkFile->pStorage,
727 cbSize);
728 }
729 else
730 return RTFileSetSize(pVmdkFile->File, cbSize);
731}
732
733/**
734 * Internal: flush a file distinguishing between async and normal operation
735 */
736DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
737{
738 PVMDKIMAGE pImage = pVmdkFile->pImage;
739
740 if (pVmdkFile->fAsyncIO)
741 return pImage->pInterfaceAsyncIOCallbacks->pfnFlushSync(pImage->pInterfaceAsyncIO->pvUser,
742 pVmdkFile->pStorage);
743 else
744 return RTFileFlush(pVmdkFile->File);
745}
746
747
748static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
749{
750 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
751
752 Assert(cbBuf);
753 if (pInflateState->iOffset < 0)
754 {
755 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
756 if (pcbBuf)
757 *pcbBuf = 1;
758 pInflateState->iOffset = 0;
759 return VINF_SUCCESS;
760 }
761 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
762 int rc = RTFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
763 if (RT_FAILURE(rc))
764 return rc;
765 pInflateState->uFileOffset += cbBuf;
766 pInflateState->iOffset += cbBuf;
767 pInflateState->cbSize -= cbBuf;
768 Assert(pcbBuf);
769 *pcbBuf = cbBuf;
770 return VINF_SUCCESS;
771}
772
773/**
774 * Internal: read from a file and inflate the compressed data,
775 * distinguishing between async and normal operation
776 */
777DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
778 uint64_t uOffset, void *pvBuf,
779 size_t cbToRead, unsigned uMarker,
780 uint64_t *puLBA, uint32_t *pcbMarkerData)
781{
782 if (pVmdkFile->fAsyncIO)
783 {
784 AssertMsgFailed(("TODO\n"));
785 return VERR_NOT_SUPPORTED;
786 }
787 else
788 {
789 int rc;
790 PRTZIPDECOMP pZip = NULL;
791 VMDKMARKER Marker;
792 uint64_t uCompOffset, cbComp;
793 VMDKINFLATESTATE InflateState;
794 size_t cbActuallyRead;
795 size_t cbMarker = sizeof(Marker);
796
797 if (uMarker == VMDK_MARKER_IGNORE)
798 cbMarker -= sizeof(Marker.uType);
799 rc = RTFileReadAt(pVmdkFile->File, uOffset, &Marker, cbMarker, NULL);
800 if (RT_FAILURE(rc))
801 return rc;
802 Marker.uSector = RT_LE2H_U64(Marker.uSector);
803 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
804 if ( uMarker != VMDK_MARKER_IGNORE
805 && ( RT_LE2H_U32(Marker.uType) != uMarker
806 || Marker.cbSize != 0))
807 return VERR_VD_VMDK_INVALID_FORMAT;
808 if (Marker.cbSize != 0)
809 {
810 /* Compressed grain marker. Data follows immediately. */
811 uCompOffset = uOffset + 12;
812 cbComp = Marker.cbSize;
813 if (puLBA)
814 *puLBA = Marker.uSector;
815 if (pcbMarkerData)
816 *pcbMarkerData = cbComp + 12;
817 }
818 else
819 {
820 Marker.uType = RT_LE2H_U32(Marker.uType);
821 if (Marker.uType == VMDK_MARKER_EOS)
822 {
823 Assert(uMarker != VMDK_MARKER_EOS);
824 return VERR_VD_VMDK_INVALID_FORMAT;
825 }
826 else if ( Marker.uType == VMDK_MARKER_GT
827 || Marker.uType == VMDK_MARKER_GD
828 || Marker.uType == VMDK_MARKER_FOOTER)
829 {
830 uCompOffset = uOffset + 512;
831 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
832 if (pcbMarkerData)
833 *pcbMarkerData = cbComp + 512;
834 }
835 else
836 {
837 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
838 return VERR_VD_VMDK_INVALID_FORMAT;
839 }
840 }
841 InflateState.File = pVmdkFile->File;
842 InflateState.cbSize = cbComp;
843 InflateState.uFileOffset = uCompOffset;
844 InflateState.iOffset = -1;
845 /* Sanity check - the expansion ratio should be much less than 2. */
846 Assert(cbComp < 2 * cbToRead);
847 if (cbComp >= 2 * cbToRead)
848 return VERR_VD_VMDK_INVALID_FORMAT;
849
850 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
851 if (RT_FAILURE(rc))
852 return rc;
853 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
854 RTZipDecompDestroy(pZip);
855 if (RT_FAILURE(rc))
856 return rc;
857 if (cbActuallyRead != cbToRead)
858 rc = VERR_VD_VMDK_INVALID_FORMAT;
859 return rc;
860 }
861}
862
863static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
864{
865 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
866
867 Assert(cbBuf);
868 if (pDeflateState->iOffset < 0)
869 {
870 pvBuf = (const uint8_t *)pvBuf + 1;
871 cbBuf--;
872 pDeflateState->iOffset = 0;
873 }
874 if (!cbBuf)
875 return VINF_SUCCESS;
876 int rc = RTFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
877 if (RT_FAILURE(rc))
878 return rc;
879 pDeflateState->uFileOffset += cbBuf;
880 pDeflateState->iOffset += cbBuf;
881 return VINF_SUCCESS;
882}
883
884/**
885 * Internal: deflate the uncompressed data and write to a file,
886 * distinguishing between async and normal operation
887 */
888DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
889 uint64_t uOffset, const void *pvBuf,
890 size_t cbToWrite, unsigned uMarker,
891 uint64_t uLBA, uint32_t *pcbMarkerData)
892{
893 if (pVmdkFile->fAsyncIO)
894 {
895 AssertMsgFailed(("TODO\n"));
896 return VERR_NOT_SUPPORTED;
897 }
898 else
899 {
900 int rc;
901 PRTZIPCOMP pZip = NULL;
902 VMDKMARKER Marker;
903 uint64_t uCompOffset, cbDecomp;
904 VMDKDEFLATESTATE DeflateState;
905
906 Marker.uSector = RT_H2LE_U64(uLBA);
907 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
908 if (uMarker == VMDK_MARKER_IGNORE)
909 {
910 /* Compressed grain marker. Data follows immediately. */
911 uCompOffset = uOffset + 12;
912 cbDecomp = cbToWrite;
913 }
914 else
915 {
916 /** @todo implement creating the other marker types */
917 return VERR_NOT_IMPLEMENTED;
918 }
919 DeflateState.File = pVmdkFile->File;
920 DeflateState.uFileOffset = uCompOffset;
921 DeflateState.iOffset = -1;
922
923 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
924 if (RT_FAILURE(rc))
925 return rc;
926 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
927 if (RT_SUCCESS(rc))
928 rc = RTZipCompFinish(pZip);
929 RTZipCompDestroy(pZip);
930 if (RT_SUCCESS(rc))
931 {
932 if (pcbMarkerData)
933 *pcbMarkerData = 12 + DeflateState.iOffset;
934 /* Set the file size to remove old garbage in case the block is
935 * rewritten. Cannot cause data loss as the code calling this
936 * guarantees that data gets only appended. */
937 Assert(DeflateState.uFileOffset > uCompOffset);
938 rc = RTFileSetSize(pVmdkFile->File, DeflateState.uFileOffset);
939
940 if (uMarker == VMDK_MARKER_IGNORE)
941 {
942 /* Compressed grain marker. */
943 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
944 rc = RTFileWriteAt(pVmdkFile->File, uOffset, &Marker, 12, NULL);
945 if (RT_FAILURE(rc))
946 return rc;
947 }
948 else
949 {
950 /** @todo implement creating the other marker types */
951 return VERR_NOT_IMPLEMENTED;
952 }
953 }
954 return rc;
955 }
956}
957
958/**
959 * Internal: check if all files are closed, prevent leaking resources.
960 */
961static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
962{
963 int rc = VINF_SUCCESS, rc2;
964 PVMDKFILE pVmdkFile;
965
966 Assert(pImage->pFiles == NULL);
967 for (pVmdkFile = pImage->pFiles;
968 pVmdkFile != NULL;
969 pVmdkFile = pVmdkFile->pNext)
970 {
971 LogRel(("VMDK: leaking reference to file \"%s\"\n",
972 pVmdkFile->pszFilename));
973 pImage->pFiles = pVmdkFile->pNext;
974
975 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
976 rc2 = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
977 pVmdkFile->pStorage);
978 else
979 rc2 = RTFileClose(pVmdkFile->File);
980
981 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
982 rc2 = RTFileDelete(pVmdkFile->pszFilename);
983 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
984 RTMemFree(pVmdkFile);
985 if (RT_SUCCESS(rc))
986 rc = rc2;
987 }
988 return rc;
989}
990
991/**
992 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
993 * critical non-ASCII characters.
994 */
995static char *vmdkEncodeString(const char *psz)
996{
997 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
998 char *pszDst = szEnc;
999
1000 AssertPtr(psz);
1001
1002 for (; *psz; psz = RTStrNextCp(psz))
1003 {
1004 char *pszDstPrev = pszDst;
1005 RTUNICP Cp = RTStrGetCp(psz);
1006 if (Cp == '\\')
1007 {
1008 pszDst = RTStrPutCp(pszDst, Cp);
1009 pszDst = RTStrPutCp(pszDst, Cp);
1010 }
1011 else if (Cp == '\n')
1012 {
1013 pszDst = RTStrPutCp(pszDst, '\\');
1014 pszDst = RTStrPutCp(pszDst, 'n');
1015 }
1016 else if (Cp == '\r')
1017 {
1018 pszDst = RTStrPutCp(pszDst, '\\');
1019 pszDst = RTStrPutCp(pszDst, 'r');
1020 }
1021 else
1022 pszDst = RTStrPutCp(pszDst, Cp);
1023 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1024 {
1025 pszDst = pszDstPrev;
1026 break;
1027 }
1028 }
1029 *pszDst = '\0';
1030 return RTStrDup(szEnc);
1031}
1032
1033/**
1034 * Internal: decode a string and store it into the specified string.
1035 */
1036static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1037{
1038 int rc = VINF_SUCCESS;
1039 char szBuf[4];
1040
1041 if (!cb)
1042 return VERR_BUFFER_OVERFLOW;
1043
1044 AssertPtr(psz);
1045
1046 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1047 {
1048 char *pszDst = szBuf;
1049 RTUNICP Cp = RTStrGetCp(pszEncoded);
1050 if (Cp == '\\')
1051 {
1052 pszEncoded = RTStrNextCp(pszEncoded);
1053 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1054 if (CpQ == 'n')
1055 RTStrPutCp(pszDst, '\n');
1056 else if (CpQ == 'r')
1057 RTStrPutCp(pszDst, '\r');
1058 else if (CpQ == '\0')
1059 {
1060 rc = VERR_VD_VMDK_INVALID_HEADER;
1061 break;
1062 }
1063 else
1064 RTStrPutCp(pszDst, CpQ);
1065 }
1066 else
1067 pszDst = RTStrPutCp(pszDst, Cp);
1068
1069 /* Need to leave space for terminating NUL. */
1070 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1071 {
1072 rc = VERR_BUFFER_OVERFLOW;
1073 break;
1074 }
1075 memcpy(psz, szBuf, pszDst - szBuf);
1076 psz += pszDst - szBuf;
1077 }
1078 *psz = '\0';
1079 return rc;
1080}
1081
1082static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1083{
1084 int rc = VINF_SUCCESS;
1085 unsigned i;
1086 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1087 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1088
1089 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1090 goto out;
1091
1092 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1093 if (!pGD)
1094 {
1095 rc = VERR_NO_MEMORY;
1096 goto out;
1097 }
1098 pExtent->pGD = pGD;
1099 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1100 * life files don't have them. The spec is wrong in creative ways. */
1101 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1102 pGD, cbGD, NULL);
1103 AssertRC(rc);
1104 if (RT_FAILURE(rc))
1105 {
1106 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1107 goto out;
1108 }
1109 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1110 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1111
1112 if (pExtent->uSectorRGD)
1113 {
1114 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1115 if (!pRGD)
1116 {
1117 rc = VERR_NO_MEMORY;
1118 goto out;
1119 }
1120 pExtent->pRGD = pRGD;
1121 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1122 * life files don't have them. The spec is wrong in creative ways. */
1123 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1124 pRGD, cbGD, NULL);
1125 AssertRC(rc);
1126 if (RT_FAILURE(rc))
1127 {
1128 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1129 goto out;
1130 }
1131 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1132 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1133
1134 /* Check grain table and redundant grain table for consistency. */
1135 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1136 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1137 if (!pTmpGT1)
1138 {
1139 rc = VERR_NO_MEMORY;
1140 goto out;
1141 }
1142 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1143 if (!pTmpGT2)
1144 {
1145 RTMemTmpFree(pTmpGT1);
1146 rc = VERR_NO_MEMORY;
1147 goto out;
1148 }
1149
1150 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1151 i < pExtent->cGDEntries;
1152 i++, pGDTmp++, pRGDTmp++)
1153 {
1154 /* If no grain table is allocated skip the entry. */
1155 if (*pGDTmp == 0 && *pRGDTmp == 0)
1156 continue;
1157
1158 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1159 {
1160 /* Just one grain directory entry refers to a not yet allocated
1161 * grain table or both grain directory copies refer to the same
1162 * grain table. Not allowed. */
1163 RTMemTmpFree(pTmpGT1);
1164 RTMemTmpFree(pTmpGT2);
1165 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1166 goto out;
1167 }
1168 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1169 * life files don't have them. The spec is wrong in creative ways. */
1170 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1171 pTmpGT1, cbGT, NULL);
1172 if (RT_FAILURE(rc))
1173 {
1174 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1175 RTMemTmpFree(pTmpGT1);
1176 RTMemTmpFree(pTmpGT2);
1177 goto out;
1178 }
1179 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1180 * life files don't have them. The spec is wrong in creative ways. */
1181 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1182 pTmpGT2, cbGT, NULL);
1183 if (RT_FAILURE(rc))
1184 {
1185 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1186 RTMemTmpFree(pTmpGT1);
1187 RTMemTmpFree(pTmpGT2);
1188 goto out;
1189 }
1190 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1191 {
1192 RTMemTmpFree(pTmpGT1);
1193 RTMemTmpFree(pTmpGT2);
1194 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1195 goto out;
1196 }
1197 }
1198
1199 /** @todo figure out what to do for unclean VMDKs. */
1200 RTMemTmpFree(pTmpGT1);
1201 RTMemTmpFree(pTmpGT2);
1202 }
1203
1204 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1205 {
1206 uint32_t uLastGrainWritten = 0;
1207 uint32_t uLastGrainSector = 0;
1208 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1209 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1210 if (!pTmpGT)
1211 {
1212 rc = VERR_NO_MEMORY;
1213 goto out;
1214 }
1215 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1216 {
1217 /* If no grain table is allocated skip the entry. */
1218 if (*pGDTmp == 0)
1219 continue;
1220
1221 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1222 * life files don't have them. The spec is wrong in creative ways. */
1223 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1224 pTmpGT, cbGT, NULL);
1225 if (RT_FAILURE(rc))
1226 {
1227 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1228 RTMemTmpFree(pTmpGT);
1229 goto out;
1230 }
1231 uint32_t j;
1232 uint32_t *pGTTmp;
1233 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1234 {
1235 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1236
1237 /* If no grain is allocated skip the entry. */
1238 if (uGTTmp == 0)
1239 continue;
1240
1241 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1242 {
1243 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1244 RTMemTmpFree(pTmpGT);
1245 goto out;
1246 }
1247 uLastGrainSector = uGTTmp;
1248 uLastGrainWritten = i * pExtent->cGTEntries + j;
1249 }
1250 }
1251 RTMemTmpFree(pTmpGT);
1252
1253 /* streamOptimized extents need a grain decompress buffer. */
1254 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1255 if (!pExtent->pvGrain)
1256 {
1257 rc = VERR_NO_MEMORY;
1258 goto out;
1259 }
1260
1261 if (uLastGrainSector)
1262 {
1263 uint64_t uLBA = 0;
1264 uint32_t cbMarker = 0;
1265 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1266 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1267 if (RT_FAILURE(rc))
1268 goto out;
1269
1270 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1271 pExtent->uGrainSector = uLastGrainSector;
1272 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1273 }
1274 pExtent->uLastGrainWritten = uLastGrainWritten;
1275 pExtent->uLastGrainSector = uLastGrainSector;
1276 }
1277
1278out:
1279 if (RT_FAILURE(rc))
1280 vmdkFreeGrainDirectory(pExtent);
1281 return rc;
1282}
1283
1284static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1285 bool fPreAlloc)
1286{
1287 int rc = VINF_SUCCESS;
1288 unsigned i;
1289 uint32_t *pGD = NULL, *pRGD = NULL;
1290 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1291 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1292 size_t cbGTRounded;
1293 uint64_t cbOverhead;
1294
1295 if (fPreAlloc)
1296 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1297 else
1298 cbGTRounded = 0;
1299
1300 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1301 if (!pGD)
1302 {
1303 rc = VERR_NO_MEMORY;
1304 goto out;
1305 }
1306 pExtent->pGD = pGD;
1307 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1308 if (!pRGD)
1309 {
1310 rc = VERR_NO_MEMORY;
1311 goto out;
1312 }
1313 pExtent->pRGD = pRGD;
1314
1315 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1316 /* For streamOptimized extents put the end-of-stream marker at the end. */
1317 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1318 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1319 else
1320 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1321 if (RT_FAILURE(rc))
1322 goto out;
1323 pExtent->uSectorRGD = uStartSector;
1324 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1325
1326 if (fPreAlloc)
1327 {
1328 uint32_t uGTSectorLE;
1329 uint64_t uOffsetSectors;
1330
1331 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1332 for (i = 0; i < pExtent->cGDEntries; i++)
1333 {
1334 pRGD[i] = uOffsetSectors;
1335 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1336 /* Write the redundant grain directory entry to disk. */
1337 rc = vmdkFileWriteAt(pExtent->pFile,
1338 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1339 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1340 if (RT_FAILURE(rc))
1341 {
1342 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1343 goto out;
1344 }
1345 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1346 }
1347
1348 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1349 for (i = 0; i < pExtent->cGDEntries; i++)
1350 {
1351 pGD[i] = uOffsetSectors;
1352 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1353 /* Write the grain directory entry to disk. */
1354 rc = vmdkFileWriteAt(pExtent->pFile,
1355 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1356 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1357 if (RT_FAILURE(rc))
1358 {
1359 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1360 goto out;
1361 }
1362 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1363 }
1364 }
1365 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1366
1367 /* streamOptimized extents need a grain decompress buffer. */
1368 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1369 {
1370 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1371 if (!pExtent->pvGrain)
1372 {
1373 rc = VERR_NO_MEMORY;
1374 goto out;
1375 }
1376 }
1377
1378out:
1379 if (RT_FAILURE(rc))
1380 vmdkFreeGrainDirectory(pExtent);
1381 return rc;
1382}
1383
1384static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1385{
1386 if (pExtent->pGD)
1387 {
1388 RTMemFree(pExtent->pGD);
1389 pExtent->pGD = NULL;
1390 }
1391 if (pExtent->pRGD)
1392 {
1393 RTMemFree(pExtent->pRGD);
1394 pExtent->pRGD = NULL;
1395 }
1396}
1397
1398static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1399 char **ppszUnquoted, char **ppszNext)
1400{
1401 char *pszQ;
1402 char *pszUnquoted;
1403
1404 /* Skip over whitespace. */
1405 while (*pszStr == ' ' || *pszStr == '\t')
1406 pszStr++;
1407
1408 if (*pszStr != '"')
1409 {
1410 pszQ = (char *)pszStr;
1411 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1412 pszQ++;
1413 }
1414 else
1415 {
1416 pszStr++;
1417 pszQ = (char *)strchr(pszStr, '"');
1418 if (pszQ == NULL)
1419 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1420 }
1421
1422 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1423 if (!pszUnquoted)
1424 return VERR_NO_MEMORY;
1425 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1426 pszUnquoted[pszQ - pszStr] = '\0';
1427 *ppszUnquoted = pszUnquoted;
1428 if (ppszNext)
1429 *ppszNext = pszQ + 1;
1430 return VINF_SUCCESS;
1431}
1432
1433static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1434 const char *pszLine)
1435{
1436 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1437 ssize_t cbDiff = strlen(pszLine) + 1;
1438
1439 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1440 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1441 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1442
1443 memcpy(pEnd, pszLine, cbDiff);
1444 pDescriptor->cLines++;
1445 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1446 pDescriptor->fDirty = true;
1447
1448 return VINF_SUCCESS;
1449}
1450
1451static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1452 const char *pszKey, const char **ppszValue)
1453{
1454 size_t cbKey = strlen(pszKey);
1455 const char *pszValue;
1456
1457 while (uStart != 0)
1458 {
1459 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1460 {
1461 /* Key matches, check for a '=' (preceded by whitespace). */
1462 pszValue = pDescriptor->aLines[uStart] + cbKey;
1463 while (*pszValue == ' ' || *pszValue == '\t')
1464 pszValue++;
1465 if (*pszValue == '=')
1466 {
1467 *ppszValue = pszValue + 1;
1468 break;
1469 }
1470 }
1471 uStart = pDescriptor->aNextLines[uStart];
1472 }
1473 return !!uStart;
1474}
1475
1476static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1477 unsigned uStart,
1478 const char *pszKey, const char *pszValue)
1479{
1480 char *pszTmp;
1481 size_t cbKey = strlen(pszKey);
1482 unsigned uLast = 0;
1483
1484 while (uStart != 0)
1485 {
1486 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1487 {
1488 /* Key matches, check for a '=' (preceded by whitespace). */
1489 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1490 while (*pszTmp == ' ' || *pszTmp == '\t')
1491 pszTmp++;
1492 if (*pszTmp == '=')
1493 {
1494 pszTmp++;
1495 while (*pszTmp == ' ' || *pszTmp == '\t')
1496 pszTmp++;
1497 break;
1498 }
1499 }
1500 if (!pDescriptor->aNextLines[uStart])
1501 uLast = uStart;
1502 uStart = pDescriptor->aNextLines[uStart];
1503 }
1504 if (uStart)
1505 {
1506 if (pszValue)
1507 {
1508 /* Key already exists, replace existing value. */
1509 size_t cbOldVal = strlen(pszTmp);
1510 size_t cbNewVal = strlen(pszValue);
1511 ssize_t cbDiff = cbNewVal - cbOldVal;
1512 /* Check for buffer overflow. */
1513 if ( pDescriptor->aLines[pDescriptor->cLines]
1514 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1515 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1516
1517 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1518 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1519 memcpy(pszTmp, pszValue, cbNewVal + 1);
1520 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1521 pDescriptor->aLines[i] += cbDiff;
1522 }
1523 else
1524 {
1525 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1526 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1527 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1528 {
1529 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1530 if (pDescriptor->aNextLines[i])
1531 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1532 else
1533 pDescriptor->aNextLines[i-1] = 0;
1534 }
1535 pDescriptor->cLines--;
1536 /* Adjust starting line numbers of following descriptor sections. */
1537 if (uStart < pDescriptor->uFirstExtent)
1538 pDescriptor->uFirstExtent--;
1539 if (uStart < pDescriptor->uFirstDDB)
1540 pDescriptor->uFirstDDB--;
1541 }
1542 }
1543 else
1544 {
1545 /* Key doesn't exist, append after the last entry in this category. */
1546 if (!pszValue)
1547 {
1548 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1549 return VINF_SUCCESS;
1550 }
1551 cbKey = strlen(pszKey);
1552 size_t cbValue = strlen(pszValue);
1553 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1554 /* Check for buffer overflow. */
1555 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1556 || ( pDescriptor->aLines[pDescriptor->cLines]
1557 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1558 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1559 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1560 {
1561 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1562 if (pDescriptor->aNextLines[i - 1])
1563 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1564 else
1565 pDescriptor->aNextLines[i] = 0;
1566 }
1567 uStart = uLast + 1;
1568 pDescriptor->aNextLines[uLast] = uStart;
1569 pDescriptor->aNextLines[uStart] = 0;
1570 pDescriptor->cLines++;
1571 pszTmp = pDescriptor->aLines[uStart];
1572 memmove(pszTmp + cbDiff, pszTmp,
1573 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1574 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1575 pDescriptor->aLines[uStart][cbKey] = '=';
1576 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1577 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1578 pDescriptor->aLines[i] += cbDiff;
1579
1580 /* Adjust starting line numbers of following descriptor sections. */
1581 if (uStart <= pDescriptor->uFirstExtent)
1582 pDescriptor->uFirstExtent++;
1583 if (uStart <= pDescriptor->uFirstDDB)
1584 pDescriptor->uFirstDDB++;
1585 }
1586 pDescriptor->fDirty = true;
1587 return VINF_SUCCESS;
1588}
1589
1590static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1591 uint32_t *puValue)
1592{
1593 const char *pszValue;
1594
1595 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1596 &pszValue))
1597 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1598 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1599}
1600
1601static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1602 const char *pszKey, const char **ppszValue)
1603{
1604 const char *pszValue;
1605 char *pszValueUnquoted;
1606
1607 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1608 &pszValue))
1609 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1610 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1611 if (RT_FAILURE(rc))
1612 return rc;
1613 *ppszValue = pszValueUnquoted;
1614 return rc;
1615}
1616
1617static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1618 const char *pszKey, const char *pszValue)
1619{
1620 char *pszValueQuoted;
1621
1622 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1623 if (RT_FAILURE(rc))
1624 return rc;
1625 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1626 pszValueQuoted);
1627 RTStrFree(pszValueQuoted);
1628 return rc;
1629}
1630
1631static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1632 PVMDKDESCRIPTOR pDescriptor)
1633{
1634 unsigned uEntry = pDescriptor->uFirstExtent;
1635 ssize_t cbDiff;
1636
1637 if (!uEntry)
1638 return;
1639
1640 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1641 /* Move everything including \0 in the entry marking the end of buffer. */
1642 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1643 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1644 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1645 {
1646 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1647 if (pDescriptor->aNextLines[i])
1648 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1649 else
1650 pDescriptor->aNextLines[i - 1] = 0;
1651 }
1652 pDescriptor->cLines--;
1653 if (pDescriptor->uFirstDDB)
1654 pDescriptor->uFirstDDB--;
1655
1656 return;
1657}
1658
1659static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1660 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1661 VMDKETYPE enmType, const char *pszBasename,
1662 uint64_t uSectorOffset)
1663{
1664 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1665 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1666 char *pszTmp;
1667 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1668 char szExt[1024];
1669 ssize_t cbDiff;
1670
1671 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1672 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1673
1674 /* Find last entry in extent description. */
1675 while (uStart)
1676 {
1677 if (!pDescriptor->aNextLines[uStart])
1678 uLast = uStart;
1679 uStart = pDescriptor->aNextLines[uStart];
1680 }
1681
1682 if (enmType == VMDKETYPE_ZERO)
1683 {
1684 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1685 cNominalSectors, apszType[enmType]);
1686 }
1687 else if (enmType == VMDKETYPE_FLAT)
1688 {
1689 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1690 apszAccess[enmAccess], cNominalSectors,
1691 apszType[enmType], pszBasename, uSectorOffset);
1692 }
1693 else
1694 {
1695 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1696 apszAccess[enmAccess], cNominalSectors,
1697 apszType[enmType], pszBasename);
1698 }
1699 cbDiff = strlen(szExt) + 1;
1700
1701 /* Check for buffer overflow. */
1702 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1703 || ( pDescriptor->aLines[pDescriptor->cLines]
1704 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1705 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1706
1707 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1708 {
1709 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1710 if (pDescriptor->aNextLines[i - 1])
1711 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1712 else
1713 pDescriptor->aNextLines[i] = 0;
1714 }
1715 uStart = uLast + 1;
1716 pDescriptor->aNextLines[uLast] = uStart;
1717 pDescriptor->aNextLines[uStart] = 0;
1718 pDescriptor->cLines++;
1719 pszTmp = pDescriptor->aLines[uStart];
1720 memmove(pszTmp + cbDiff, pszTmp,
1721 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1722 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1723 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1724 pDescriptor->aLines[i] += cbDiff;
1725
1726 /* Adjust starting line numbers of following descriptor sections. */
1727 if (uStart <= pDescriptor->uFirstDDB)
1728 pDescriptor->uFirstDDB++;
1729
1730 pDescriptor->fDirty = true;
1731 return VINF_SUCCESS;
1732}
1733
1734static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1735 const char *pszKey, const char **ppszValue)
1736{
1737 const char *pszValue;
1738 char *pszValueUnquoted;
1739
1740 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1741 &pszValue))
1742 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1743 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1744 if (RT_FAILURE(rc))
1745 return rc;
1746 *ppszValue = pszValueUnquoted;
1747 return rc;
1748}
1749
1750static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1751 const char *pszKey, uint32_t *puValue)
1752{
1753 const char *pszValue;
1754 char *pszValueUnquoted;
1755
1756 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1757 &pszValue))
1758 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1759 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1760 if (RT_FAILURE(rc))
1761 return rc;
1762 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1763 RTMemTmpFree(pszValueUnquoted);
1764 return rc;
1765}
1766
1767static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1768 const char *pszKey, PRTUUID pUuid)
1769{
1770 const char *pszValue;
1771 char *pszValueUnquoted;
1772
1773 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1774 &pszValue))
1775 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1776 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1777 if (RT_FAILURE(rc))
1778 return rc;
1779 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1780 RTMemTmpFree(pszValueUnquoted);
1781 return rc;
1782}
1783
1784static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1785 const char *pszKey, const char *pszVal)
1786{
1787 int rc;
1788 char *pszValQuoted;
1789
1790 if (pszVal)
1791 {
1792 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1793 if (RT_FAILURE(rc))
1794 return rc;
1795 }
1796 else
1797 pszValQuoted = NULL;
1798 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1799 pszValQuoted);
1800 if (pszValQuoted)
1801 RTStrFree(pszValQuoted);
1802 return rc;
1803}
1804
1805static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1806 const char *pszKey, PCRTUUID pUuid)
1807{
1808 char *pszUuid;
1809
1810 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1811 if (RT_FAILURE(rc))
1812 return rc;
1813 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1814 pszUuid);
1815 RTStrFree(pszUuid);
1816 return rc;
1817}
1818
1819static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1820 const char *pszKey, uint32_t uValue)
1821{
1822 char *pszValue;
1823
1824 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1828 pszValue);
1829 RTStrFree(pszValue);
1830 return rc;
1831}
1832
1833static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1834 size_t cbDescData,
1835 PVMDKDESCRIPTOR pDescriptor)
1836{
1837 int rc = VINF_SUCCESS;
1838 unsigned cLine = 0, uLastNonEmptyLine = 0;
1839 char *pTmp = pDescData;
1840
1841 pDescriptor->cbDescAlloc = cbDescData;
1842 while (*pTmp != '\0')
1843 {
1844 pDescriptor->aLines[cLine++] = pTmp;
1845 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1846 {
1847 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1848 goto out;
1849 }
1850
1851 while (*pTmp != '\0' && *pTmp != '\n')
1852 {
1853 if (*pTmp == '\r')
1854 {
1855 if (*(pTmp + 1) != '\n')
1856 {
1857 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1858 goto out;
1859 }
1860 else
1861 {
1862 /* Get rid of CR character. */
1863 *pTmp = '\0';
1864 }
1865 }
1866 pTmp++;
1867 }
1868 /* Get rid of LF character. */
1869 if (*pTmp == '\n')
1870 {
1871 *pTmp = '\0';
1872 pTmp++;
1873 }
1874 }
1875 pDescriptor->cLines = cLine;
1876 /* Pointer right after the end of the used part of the buffer. */
1877 pDescriptor->aLines[cLine] = pTmp;
1878
1879 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1880 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1881 {
1882 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1883 goto out;
1884 }
1885
1886 /* Initialize those, because we need to be able to reopen an image. */
1887 pDescriptor->uFirstDesc = 0;
1888 pDescriptor->uFirstExtent = 0;
1889 pDescriptor->uFirstDDB = 0;
1890 for (unsigned i = 0; i < cLine; i++)
1891 {
1892 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1893 {
1894 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1895 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1896 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1897 {
1898 /* An extent descriptor. */
1899 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1900 {
1901 /* Incorrect ordering of entries. */
1902 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1903 goto out;
1904 }
1905 if (!pDescriptor->uFirstExtent)
1906 {
1907 pDescriptor->uFirstExtent = i;
1908 uLastNonEmptyLine = 0;
1909 }
1910 }
1911 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1912 {
1913 /* A disk database entry. */
1914 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1915 {
1916 /* Incorrect ordering of entries. */
1917 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1918 goto out;
1919 }
1920 if (!pDescriptor->uFirstDDB)
1921 {
1922 pDescriptor->uFirstDDB = i;
1923 uLastNonEmptyLine = 0;
1924 }
1925 }
1926 else
1927 {
1928 /* A normal entry. */
1929 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1930 {
1931 /* Incorrect ordering of entries. */
1932 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1933 goto out;
1934 }
1935 if (!pDescriptor->uFirstDesc)
1936 {
1937 pDescriptor->uFirstDesc = i;
1938 uLastNonEmptyLine = 0;
1939 }
1940 }
1941 if (uLastNonEmptyLine)
1942 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1943 uLastNonEmptyLine = i;
1944 }
1945 }
1946
1947out:
1948 return rc;
1949}
1950
1951static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1952 PCPDMMEDIAGEOMETRY pPCHSGeometry)
1953{
1954 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1955 VMDK_DDB_GEO_PCHS_CYLINDERS,
1956 pPCHSGeometry->cCylinders);
1957 if (RT_FAILURE(rc))
1958 return rc;
1959 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1960 VMDK_DDB_GEO_PCHS_HEADS,
1961 pPCHSGeometry->cHeads);
1962 if (RT_FAILURE(rc))
1963 return rc;
1964 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1965 VMDK_DDB_GEO_PCHS_SECTORS,
1966 pPCHSGeometry->cSectors);
1967 return rc;
1968}
1969
1970static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1971 PCPDMMEDIAGEOMETRY pLCHSGeometry)
1972{
1973 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1974 VMDK_DDB_GEO_LCHS_CYLINDERS,
1975 pLCHSGeometry->cCylinders);
1976 if (RT_FAILURE(rc))
1977 return rc;
1978 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1979 VMDK_DDB_GEO_LCHS_HEADS,
1980 pLCHSGeometry->cHeads);
1981 if (RT_FAILURE(rc))
1982 return rc;
1983 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1984 VMDK_DDB_GEO_LCHS_SECTORS,
1985 pLCHSGeometry->cSectors);
1986 return rc;
1987}
1988
1989static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1990 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1991{
1992 int rc;
1993
1994 pDescriptor->uFirstDesc = 0;
1995 pDescriptor->uFirstExtent = 0;
1996 pDescriptor->uFirstDDB = 0;
1997 pDescriptor->cLines = 0;
1998 pDescriptor->cbDescAlloc = cbDescData;
1999 pDescriptor->fDirty = false;
2000 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2001 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2002
2003 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2004 if (RT_FAILURE(rc))
2005 goto out;
2006 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2007 if (RT_FAILURE(rc))
2008 goto out;
2009 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2010 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2011 if (RT_FAILURE(rc))
2012 goto out;
2013 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2014 if (RT_FAILURE(rc))
2015 goto out;
2016 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2017 if (RT_FAILURE(rc))
2018 goto out;
2019 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2020 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2021 if (RT_FAILURE(rc))
2022 goto out;
2023 /* The trailing space is created by VMware, too. */
2024 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2025 if (RT_FAILURE(rc))
2026 goto out;
2027 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2028 if (RT_FAILURE(rc))
2029 goto out;
2030 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2031 if (RT_FAILURE(rc))
2032 goto out;
2033 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2034 if (RT_FAILURE(rc))
2035 goto out;
2036 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2037
2038 /* Now that the framework is in place, use the normal functions to insert
2039 * the remaining keys. */
2040 char szBuf[9];
2041 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2042 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2043 "CID", szBuf);
2044 if (RT_FAILURE(rc))
2045 goto out;
2046 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2047 "parentCID", "ffffffff");
2048 if (RT_FAILURE(rc))
2049 goto out;
2050
2051 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2052 if (RT_FAILURE(rc))
2053 goto out;
2054
2055out:
2056 return rc;
2057}
2058
2059static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2060 size_t cbDescData)
2061{
2062 int rc;
2063 unsigned cExtents;
2064 unsigned uLine;
2065 unsigned i;
2066
2067 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2068 &pImage->Descriptor);
2069 if (RT_FAILURE(rc))
2070 return rc;
2071
2072 /* Check version, must be 1. */
2073 uint32_t uVersion;
2074 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2075 if (RT_FAILURE(rc))
2076 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2077 if (uVersion != 1)
2078 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2079
2080 /* Get image creation type and determine image flags. */
2081 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2082 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2083 &pszCreateType);
2084 if (RT_FAILURE(rc))
2085 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2086 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2087 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2088 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2089 else if ( !strcmp(pszCreateType, "partitionedDevice")
2090 || !strcmp(pszCreateType, "fullDevice"))
2091 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2092 else if (!strcmp(pszCreateType, "streamOptimized"))
2093 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2094 else if (!strcmp(pszCreateType, "vmfs"))
2095 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2096 RTStrFree((char *)(void *)pszCreateType);
2097
2098 /* Count the number of extent config entries. */
2099 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2100 uLine != 0;
2101 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2102 /* nothing */;
2103
2104 if (!pImage->pDescData && cExtents != 1)
2105 {
2106 /* Monolithic image, must have only one extent (already opened). */
2107 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2108 }
2109
2110 if (pImage->pDescData)
2111 {
2112 /* Non-monolithic image, extents need to be allocated. */
2113 rc = vmdkCreateExtents(pImage, cExtents);
2114 if (RT_FAILURE(rc))
2115 return rc;
2116 }
2117
2118 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2119 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2120 {
2121 char *pszLine = pImage->Descriptor.aLines[uLine];
2122
2123 /* Access type of the extent. */
2124 if (!strncmp(pszLine, "RW", 2))
2125 {
2126 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2127 pszLine += 2;
2128 }
2129 else if (!strncmp(pszLine, "RDONLY", 6))
2130 {
2131 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2132 pszLine += 6;
2133 }
2134 else if (!strncmp(pszLine, "NOACCESS", 8))
2135 {
2136 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2137 pszLine += 8;
2138 }
2139 else
2140 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2141 if (*pszLine++ != ' ')
2142 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2143
2144 /* Nominal size of the extent. */
2145 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2146 &pImage->pExtents[i].cNominalSectors);
2147 if (RT_FAILURE(rc))
2148 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2149 if (*pszLine++ != ' ')
2150 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2151
2152 /* Type of the extent. */
2153#ifdef VBOX_WITH_VMDK_ESX
2154 /** @todo Add the ESX extent types. Not necessary for now because
2155 * the ESX extent types are only used inside an ESX server. They are
2156 * automatically converted if the VMDK is exported. */
2157#endif /* VBOX_WITH_VMDK_ESX */
2158 if (!strncmp(pszLine, "SPARSE", 6))
2159 {
2160 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2161 pszLine += 6;
2162 }
2163 else if (!strncmp(pszLine, "FLAT", 4))
2164 {
2165 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2166 pszLine += 4;
2167 }
2168 else if (!strncmp(pszLine, "ZERO", 4))
2169 {
2170 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2171 pszLine += 4;
2172 }
2173 else if (!strncmp(pszLine, "VMFS", 4))
2174 {
2175 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2176 pszLine += 4;
2177 }
2178 else
2179 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2180 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2181 {
2182 /* This one has no basename or offset. */
2183 if (*pszLine == ' ')
2184 pszLine++;
2185 if (*pszLine != '\0')
2186 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2187 pImage->pExtents[i].pszBasename = NULL;
2188 }
2189 else
2190 {
2191 /* All other extent types have basename and optional offset. */
2192 if (*pszLine++ != ' ')
2193 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2194
2195 /* Basename of the image. Surrounded by quotes. */
2196 char *pszBasename;
2197 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2198 if (RT_FAILURE(rc))
2199 return rc;
2200 pImage->pExtents[i].pszBasename = pszBasename;
2201 if (*pszLine == ' ')
2202 {
2203 pszLine++;
2204 if (*pszLine != '\0')
2205 {
2206 /* Optional offset in extent specified. */
2207 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2208 &pImage->pExtents[i].uSectorOffset);
2209 if (RT_FAILURE(rc))
2210 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2211 }
2212 }
2213
2214 if (*pszLine != '\0')
2215 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2216 }
2217 }
2218
2219 /* Determine PCHS geometry (autogenerate if necessary). */
2220 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2221 VMDK_DDB_GEO_PCHS_CYLINDERS,
2222 &pImage->PCHSGeometry.cCylinders);
2223 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2224 pImage->PCHSGeometry.cCylinders = 0;
2225 else if (RT_FAILURE(rc))
2226 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2227 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2228 VMDK_DDB_GEO_PCHS_HEADS,
2229 &pImage->PCHSGeometry.cHeads);
2230 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2231 pImage->PCHSGeometry.cHeads = 0;
2232 else if (RT_FAILURE(rc))
2233 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2234 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2235 VMDK_DDB_GEO_PCHS_SECTORS,
2236 &pImage->PCHSGeometry.cSectors);
2237 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2238 pImage->PCHSGeometry.cSectors = 0;
2239 else if (RT_FAILURE(rc))
2240 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2241 if ( pImage->PCHSGeometry.cCylinders == 0
2242 || pImage->PCHSGeometry.cHeads == 0
2243 || pImage->PCHSGeometry.cHeads > 16
2244 || pImage->PCHSGeometry.cSectors == 0
2245 || pImage->PCHSGeometry.cSectors > 63)
2246 {
2247 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2248 * as the total image size isn't known yet). */
2249 pImage->PCHSGeometry.cCylinders = 0;
2250 pImage->PCHSGeometry.cHeads = 16;
2251 pImage->PCHSGeometry.cSectors = 63;
2252 }
2253
2254 /* Determine LCHS geometry (set to 0 if not specified). */
2255 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2256 VMDK_DDB_GEO_LCHS_CYLINDERS,
2257 &pImage->LCHSGeometry.cCylinders);
2258 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2259 pImage->LCHSGeometry.cCylinders = 0;
2260 else if (RT_FAILURE(rc))
2261 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2262 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2263 VMDK_DDB_GEO_LCHS_HEADS,
2264 &pImage->LCHSGeometry.cHeads);
2265 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2266 pImage->LCHSGeometry.cHeads = 0;
2267 else if (RT_FAILURE(rc))
2268 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2269 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2270 VMDK_DDB_GEO_LCHS_SECTORS,
2271 &pImage->LCHSGeometry.cSectors);
2272 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2273 pImage->LCHSGeometry.cSectors = 0;
2274 else if (RT_FAILURE(rc))
2275 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2276 if ( pImage->LCHSGeometry.cCylinders == 0
2277 || pImage->LCHSGeometry.cHeads == 0
2278 || pImage->LCHSGeometry.cSectors == 0)
2279 {
2280 pImage->LCHSGeometry.cCylinders = 0;
2281 pImage->LCHSGeometry.cHeads = 0;
2282 pImage->LCHSGeometry.cSectors = 0;
2283 }
2284
2285 /* Get image UUID. */
2286 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2287 &pImage->ImageUuid);
2288 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2289 {
2290 /* Image without UUID. Probably created by VMware and not yet used
2291 * by VirtualBox. Can only be added for images opened in read/write
2292 * mode, so don't bother producing a sensible UUID otherwise. */
2293 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2294 RTUuidClear(&pImage->ImageUuid);
2295 else
2296 {
2297 rc = RTUuidCreate(&pImage->ImageUuid);
2298 if (RT_FAILURE(rc))
2299 return rc;
2300 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2301 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2302 if (RT_FAILURE(rc))
2303 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2304 }
2305 }
2306 else if (RT_FAILURE(rc))
2307 return rc;
2308
2309 /* Get image modification UUID. */
2310 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2311 VMDK_DDB_MODIFICATION_UUID,
2312 &pImage->ModificationUuid);
2313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2314 {
2315 /* Image without UUID. Probably created by VMware and not yet used
2316 * by VirtualBox. Can only be added for images opened in read/write
2317 * mode, so don't bother producing a sensible UUID otherwise. */
2318 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2319 RTUuidClear(&pImage->ModificationUuid);
2320 else
2321 {
2322 rc = RTUuidCreate(&pImage->ModificationUuid);
2323 if (RT_FAILURE(rc))
2324 return rc;
2325 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2326 VMDK_DDB_MODIFICATION_UUID,
2327 &pImage->ModificationUuid);
2328 if (RT_FAILURE(rc))
2329 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2330 }
2331 }
2332 else if (RT_FAILURE(rc))
2333 return rc;
2334
2335 /* Get UUID of parent image. */
2336 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2337 &pImage->ParentUuid);
2338 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2339 {
2340 /* Image without UUID. Probably created by VMware and not yet used
2341 * by VirtualBox. Can only be added for images opened in read/write
2342 * mode, so don't bother producing a sensible UUID otherwise. */
2343 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2344 RTUuidClear(&pImage->ParentUuid);
2345 else
2346 {
2347 rc = RTUuidClear(&pImage->ParentUuid);
2348 if (RT_FAILURE(rc))
2349 return rc;
2350 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2351 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2352 if (RT_FAILURE(rc))
2353 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2354 }
2355 }
2356 else if (RT_FAILURE(rc))
2357 return rc;
2358
2359 /* Get parent image modification UUID. */
2360 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2361 VMDK_DDB_PARENT_MODIFICATION_UUID,
2362 &pImage->ParentModificationUuid);
2363 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2364 {
2365 /* Image without UUID. Probably created by VMware and not yet used
2366 * by VirtualBox. Can only be added for images opened in read/write
2367 * mode, so don't bother producing a sensible UUID otherwise. */
2368 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2369 RTUuidClear(&pImage->ParentModificationUuid);
2370 else
2371 {
2372 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2373 if (RT_FAILURE(rc))
2374 return rc;
2375 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2376 VMDK_DDB_PARENT_MODIFICATION_UUID,
2377 &pImage->ParentModificationUuid);
2378 if (RT_FAILURE(rc))
2379 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2380 }
2381 }
2382 else if (RT_FAILURE(rc))
2383 return rc;
2384
2385 return VINF_SUCCESS;
2386}
2387
2388/**
2389 * Internal: write/update the descriptor part of the image.
2390 */
2391static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2392{
2393 int rc = VINF_SUCCESS;
2394 uint64_t cbLimit;
2395 uint64_t uOffset;
2396 PVMDKFILE pDescFile;
2397
2398 if (pImage->pDescData)
2399 {
2400 /* Separate descriptor file. */
2401 uOffset = 0;
2402 cbLimit = 0;
2403 pDescFile = pImage->pFile;
2404 }
2405 else
2406 {
2407 /* Embedded descriptor file. */
2408 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2409 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2410 cbLimit += uOffset;
2411 pDescFile = pImage->pExtents[0].pFile;
2412 }
2413 /* Bail out if there is no file to write to. */
2414 if (pDescFile == NULL)
2415 return VERR_INVALID_PARAMETER;
2416 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2417 {
2418 const char *psz = pImage->Descriptor.aLines[i];
2419 size_t cb = strlen(psz);
2420
2421 if (cbLimit && uOffset + cb + 1 > cbLimit)
2422 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2423 rc = vmdkFileWriteAt(pDescFile, uOffset, psz, cb, NULL);
2424 if (RT_FAILURE(rc))
2425 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2426 uOffset += cb;
2427 rc = vmdkFileWriteAt(pDescFile, uOffset, "\n", 1, NULL);
2428 if (RT_FAILURE(rc))
2429 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2430 uOffset++;
2431 }
2432 if (cbLimit)
2433 {
2434 /* Inefficient, but simple. */
2435 while (uOffset < cbLimit)
2436 {
2437 rc = vmdkFileWriteAt(pDescFile, uOffset, "", 1, NULL);
2438 if (RT_FAILURE(rc))
2439 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2440 uOffset++;
2441 }
2442 }
2443 else
2444 {
2445 rc = vmdkFileSetSize(pDescFile, uOffset);
2446 if (RT_FAILURE(rc))
2447 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2448 }
2449 pImage->Descriptor.fDirty = false;
2450 return rc;
2451}
2452
2453/**
2454 * Internal: validate the consistency check values in a binary header.
2455 */
2456static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2457{
2458 int rc = VINF_SUCCESS;
2459 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2460 {
2461 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2462 return rc;
2463 }
2464 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2465 {
2466 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2467 return rc;
2468 }
2469 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2470 && ( pHeader->singleEndLineChar != '\n'
2471 || pHeader->nonEndLineChar != ' '
2472 || pHeader->doubleEndLineChar1 != '\r'
2473 || pHeader->doubleEndLineChar2 != '\n') )
2474 {
2475 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2476 return rc;
2477 }
2478 return rc;
2479}
2480
2481/**
2482 * Internal: read metadata belonging to an extent with binary header, i.e.
2483 * as found in monolithic files.
2484 */
2485static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2486{
2487 SparseExtentHeader Header;
2488 uint64_t cSectorsPerGDE;
2489
2490 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2491 AssertRC(rc);
2492 if (RT_FAILURE(rc))
2493 {
2494 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2495 goto out;
2496 }
2497 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2498 if (RT_FAILURE(rc))
2499 goto out;
2500 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2501 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2502 {
2503 /* Read the footer, which isn't compressed and comes before the
2504 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2505 * VMware reality. Theory and practice have very little in common. */
2506 uint64_t cbSize;
2507 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2508 AssertRC(rc);
2509 if (RT_FAILURE(rc))
2510 {
2511 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2512 goto out;
2513 }
2514 cbSize = RT_ALIGN_64(cbSize, 512);
2515 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2516 AssertRC(rc);
2517 if (RT_FAILURE(rc))
2518 {
2519 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2520 goto out;
2521 }
2522 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2523 if (RT_FAILURE(rc))
2524 goto out;
2525 pExtent->fFooter = true;
2526 }
2527 pExtent->uVersion = RT_LE2H_U32(Header.version);
2528 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2529 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2530 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2531 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2532 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2533 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2534 {
2535 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2536 goto out;
2537 }
2538 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2539 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2540 {
2541 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2542 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2543 }
2544 else
2545 {
2546 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2547 pExtent->uSectorRGD = 0;
2548 }
2549 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2550 {
2551 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2552 goto out;
2553 }
2554 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2555 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2556 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2557 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2558 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2559 {
2560 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2561 goto out;
2562 }
2563 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2564 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2565
2566 /* Fix up the number of descriptor sectors, as some flat images have
2567 * really just one, and this causes failures when inserting the UUID
2568 * values and other extra information. */
2569 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2570 {
2571 /* Do it the easy way - just fix it for flat images which have no
2572 * other complicated metadata which needs space too. */
2573 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2574 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2575 pExtent->cDescriptorSectors = 4;
2576 }
2577
2578out:
2579 if (RT_FAILURE(rc))
2580 vmdkFreeExtentData(pImage, pExtent, false);
2581
2582 return rc;
2583}
2584
2585/**
2586 * Internal: read additional metadata belonging to an extent. For those
2587 * extents which have no additional metadata just verify the information.
2588 */
2589static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2590{
2591 int rc = VINF_SUCCESS;
2592 uint64_t cbExtentSize;
2593
2594 /* The image must be a multiple of a sector in size and contain the data
2595 * area (flat images only). If not, it means the image is at least
2596 * truncated, or even seriously garbled. */
2597 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2598 if (RT_FAILURE(rc))
2599 {
2600 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2601 goto out;
2602 }
2603/* disabled the size check again as there are too many too short vmdks out there */
2604#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2605 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2606 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2607 {
2608 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2609 goto out;
2610 }
2611#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2612 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2613 goto out;
2614
2615 /* The spec says that this must be a power of two and greater than 8,
2616 * but probably they meant not less than 8. */
2617 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2618 || pExtent->cSectorsPerGrain < 8)
2619 {
2620 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2621 goto out;
2622 }
2623
2624 /* This code requires that a grain table must hold a power of two multiple
2625 * of the number of entries per GT cache entry. */
2626 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2627 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2628 {
2629 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2630 goto out;
2631 }
2632
2633 rc = vmdkReadGrainDirectory(pExtent);
2634
2635out:
2636 if (RT_FAILURE(rc))
2637 vmdkFreeExtentData(pImage, pExtent, false);
2638
2639 return rc;
2640}
2641
2642/**
2643 * Internal: write/update the metadata for a sparse extent.
2644 */
2645static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2646{
2647 SparseExtentHeader Header;
2648
2649 memset(&Header, '\0', sizeof(Header));
2650 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2651 Header.version = RT_H2LE_U32(pExtent->uVersion);
2652 Header.flags = RT_H2LE_U32(RT_BIT(0));
2653 if (pExtent->pRGD)
2654 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2655 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2656 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2657 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2658 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2659 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2660 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2661 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2662 if (pExtent->fFooter && uOffset == 0)
2663 {
2664 if (pExtent->pRGD)
2665 {
2666 Assert(pExtent->uSectorRGD);
2667 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2668 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2669 }
2670 else
2671 {
2672 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2673 }
2674 }
2675 else
2676 {
2677 if (pExtent->pRGD)
2678 {
2679 Assert(pExtent->uSectorRGD);
2680 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2681 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2682 }
2683 else
2684 {
2685 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2686 }
2687 }
2688 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2689 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2690 Header.singleEndLineChar = '\n';
2691 Header.nonEndLineChar = ' ';
2692 Header.doubleEndLineChar1 = '\r';
2693 Header.doubleEndLineChar2 = '\n';
2694 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2695
2696 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2697 AssertRC(rc);
2698 if (RT_FAILURE(rc))
2699 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2700 return rc;
2701}
2702
2703#ifdef VBOX_WITH_VMDK_ESX
2704/**
2705 * Internal: unused code to read the metadata of a sparse ESX extent.
2706 *
2707 * Such extents never leave ESX server, so this isn't ever used.
2708 */
2709static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2710{
2711 COWDisk_Header Header;
2712 uint64_t cSectorsPerGDE;
2713
2714 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2715 AssertRC(rc);
2716 if (RT_FAILURE(rc))
2717 goto out;
2718 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2719 || RT_LE2H_U32(Header.version) != 1
2720 || RT_LE2H_U32(Header.flags) != 3)
2721 {
2722 rc = VERR_VD_VMDK_INVALID_HEADER;
2723 goto out;
2724 }
2725 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2726 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2727 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2728 /* The spec says that this must be between 1 sector and 1MB. This code
2729 * assumes it's a power of two, so check that requirement, too. */
2730 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2731 || pExtent->cSectorsPerGrain == 0
2732 || pExtent->cSectorsPerGrain > 2048)
2733 {
2734 rc = VERR_VD_VMDK_INVALID_HEADER;
2735 goto out;
2736 }
2737 pExtent->uDescriptorSector = 0;
2738 pExtent->cDescriptorSectors = 0;
2739 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2740 pExtent->uSectorRGD = 0;
2741 pExtent->cOverheadSectors = 0;
2742 pExtent->cGTEntries = 4096;
2743 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2744 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2745 {
2746 rc = VERR_VD_VMDK_INVALID_HEADER;
2747 goto out;
2748 }
2749 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2750 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2751 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2752 {
2753 /* Inconsistency detected. Computed number of GD entries doesn't match
2754 * stored value. Better be safe than sorry. */
2755 rc = VERR_VD_VMDK_INVALID_HEADER;
2756 goto out;
2757 }
2758 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2759 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2760
2761 rc = vmdkReadGrainDirectory(pExtent);
2762
2763out:
2764 if (RT_FAILURE(rc))
2765 vmdkFreeExtentData(pImage, pExtent, false);
2766
2767 return rc;
2768}
2769#endif /* VBOX_WITH_VMDK_ESX */
2770
2771/**
2772 * Internal: free the memory used by the extent data structure, optionally
2773 * deleting the referenced files.
2774 */
2775static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2776 bool fDelete)
2777{
2778 vmdkFreeGrainDirectory(pExtent);
2779 if (pExtent->pDescData)
2780 {
2781 RTMemFree(pExtent->pDescData);
2782 pExtent->pDescData = NULL;
2783 }
2784 if (pExtent->pFile != NULL)
2785 {
2786 /* Do not delete raw extents, these have full and base names equal. */
2787 vmdkFileClose(pImage, &pExtent->pFile,
2788 fDelete
2789 && pExtent->pszFullname
2790 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2791 }
2792 if (pExtent->pszBasename)
2793 {
2794 RTMemTmpFree((void *)pExtent->pszBasename);
2795 pExtent->pszBasename = NULL;
2796 }
2797 if (pExtent->pszFullname)
2798 {
2799 RTStrFree((char *)(void *)pExtent->pszFullname);
2800 pExtent->pszFullname = NULL;
2801 }
2802 if (pExtent->pvGrain)
2803 {
2804 RTMemFree(pExtent->pvGrain);
2805 pExtent->pvGrain = NULL;
2806 }
2807}
2808
2809/**
2810 * Internal: allocate grain table cache if necessary for this image.
2811 */
2812static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2813{
2814 PVMDKEXTENT pExtent;
2815
2816 /* Allocate grain table cache if any sparse extent is present. */
2817 for (unsigned i = 0; i < pImage->cExtents; i++)
2818 {
2819 pExtent = &pImage->pExtents[i];
2820 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
2821#ifdef VBOX_WITH_VMDK_ESX
2822 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
2823#endif /* VBOX_WITH_VMDK_ESX */
2824 )
2825 {
2826 /* Allocate grain table cache. */
2827 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2828 if (!pImage->pGTCache)
2829 return VERR_NO_MEMORY;
2830 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2831 {
2832 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2833 pGCE->uExtent = UINT32_MAX;
2834 }
2835 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2836 break;
2837 }
2838 }
2839
2840 return VINF_SUCCESS;
2841}
2842
2843/**
2844 * Internal: allocate the given number of extents.
2845 */
2846static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2847{
2848 int rc = VINF_SUCCESS;
2849 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2850 if (pImage)
2851 {
2852 for (unsigned i = 0; i < cExtents; i++)
2853 {
2854 pExtents[i].pFile = NULL;
2855 pExtents[i].pszBasename = NULL;
2856 pExtents[i].pszFullname = NULL;
2857 pExtents[i].pGD = NULL;
2858 pExtents[i].pRGD = NULL;
2859 pExtents[i].pDescData = NULL;
2860 pExtents[i].uVersion = 1;
2861 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2862 pExtents[i].uExtent = i;
2863 pExtents[i].pImage = pImage;
2864 }
2865 pImage->pExtents = pExtents;
2866 pImage->cExtents = cExtents;
2867 }
2868 else
2869 rc = VERR_NO_MEMORY;
2870
2871 return rc;
2872}
2873
2874/**
2875 * Internal: Open an image, constructing all necessary data structures.
2876 */
2877static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
2878{
2879 int rc;
2880 uint32_t u32Magic;
2881 PVMDKFILE pFile;
2882 PVMDKEXTENT pExtent;
2883
2884 pImage->uOpenFlags = uOpenFlags;
2885
2886 /* Try to get error interface. */
2887 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
2888 if (pImage->pInterfaceError)
2889 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
2890
2891 /* Try to get async I/O interface. */
2892 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
2893 if (pImage->pInterfaceAsyncIO)
2894 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
2895
2896 /*
2897 * Open the image.
2898 * We don't have to check for asynchronous access because
2899 * we only support raw access and the opened file is a description
2900 * file were no data is stored.
2901 */
2902 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
2903 uOpenFlags & VD_OPEN_FLAGS_READONLY
2904 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
2905 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
2906 if (RT_FAILURE(rc))
2907 {
2908 /* Do NOT signal an appropriate error here, as the VD layer has the
2909 * choice of retrying the open if it failed. */
2910 goto out;
2911 }
2912 pImage->pFile = pFile;
2913
2914 /* Read magic (if present). */
2915 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
2916 if (RT_FAILURE(rc))
2917 {
2918 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
2919 goto out;
2920 }
2921
2922 /* Handle the file according to its magic number. */
2923 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
2924 {
2925 /* Async I/IO is not supported with these files yet. So fail if opened in async I/O mode. */
2926 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
2927 {
2928 rc = VERR_NOT_SUPPORTED;
2929 goto out;
2930 }
2931
2932 /* It's a hosted single-extent image. */
2933 rc = vmdkCreateExtents(pImage, 1);
2934 if (RT_FAILURE(rc))
2935 goto out;
2936 /* The opened file is passed to the extent. No separate descriptor
2937 * file, so no need to keep anything open for the image. */
2938 pExtent = &pImage->pExtents[0];
2939 pExtent->pFile = pFile;
2940 pImage->pFile = NULL;
2941 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2942 if (!pExtent->pszFullname)
2943 {
2944 rc = VERR_NO_MEMORY;
2945 goto out;
2946 }
2947 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
2948 if (RT_FAILURE(rc))
2949 goto out;
2950
2951 /* As we're dealing with a monolithic image here, there must
2952 * be a descriptor embedded in the image file. */
2953 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
2954 {
2955 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
2956 goto out;
2957 }
2958 /* HACK: extend the descriptor if it is unusually small and it fits in
2959 * the unused space after the image header. Allows opening VMDK files
2960 * with extremely small descriptor in read/write mode. */
2961 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2962 && pExtent->cDescriptorSectors < 3
2963 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2964 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2965 {
2966 pExtent->cDescriptorSectors = 4;
2967 pExtent->fMetaDirty = true;
2968 }
2969 /* Read the descriptor from the extent. */
2970 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2971 if (!pExtent->pDescData)
2972 {
2973 rc = VERR_NO_MEMORY;
2974 goto out;
2975 }
2976 rc = vmdkFileReadAt(pExtent->pFile,
2977 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2978 pExtent->pDescData,
2979 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
2980 AssertRC(rc);
2981 if (RT_FAILURE(rc))
2982 {
2983 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
2984 goto out;
2985 }
2986
2987 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2988 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2989 if (RT_FAILURE(rc))
2990 goto out;
2991
2992 rc = vmdkReadMetaExtent(pImage, pExtent);
2993 if (RT_FAILURE(rc))
2994 goto out;
2995
2996 /* Mark the extent as unclean if opened in read-write mode. */
2997 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
2998 {
2999 pExtent->fUncleanShutdown = true;
3000 pExtent->fMetaDirty = true;
3001 }
3002 }
3003 else
3004 {
3005 /* Allocate at least 10K, and make sure that there is 5K free space
3006 * in case new entries need to be added to the descriptor. Never
3007 * alocate more than 128K, because that's no valid descriptor file
3008 * and will result in the correct "truncated read" error handling. */
3009 uint64_t cbSize;
3010 rc = vmdkFileGetSize(pFile, &cbSize);
3011 if (RT_FAILURE(rc))
3012 goto out;
3013 if (cbSize % VMDK_SECTOR2BYTE(10))
3014 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3015 else
3016 cbSize += VMDK_SECTOR2BYTE(10);
3017 cbSize = RT_MIN(cbSize, _128K);
3018 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3019 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3020 if (!pImage->pDescData)
3021 {
3022 rc = VERR_NO_MEMORY;
3023 goto out;
3024 }
3025
3026 size_t cbRead;
3027 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3028 pImage->cbDescAlloc, &cbRead);
3029 if (RT_FAILURE(rc))
3030 {
3031 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3032 goto out;
3033 }
3034 if (cbRead == pImage->cbDescAlloc)
3035 {
3036 /* Likely the read is truncated. Better fail a bit too early
3037 * (normally the descriptor is much smaller than our buffer). */
3038 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3039 goto out;
3040 }
3041
3042 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3043 pImage->cbDescAlloc);
3044 if (RT_FAILURE(rc))
3045 goto out;
3046
3047 /*
3048 * We have to check for the asynchronous open flag. The
3049 * extents are parsed and the type of all are known now.
3050 * Check if every extent is either FLAT or ZERO.
3051 */
3052 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3053 {
3054 unsigned cFlatExtents = 0;
3055
3056 for (unsigned i = 0; i < pImage->cExtents; i++)
3057 {
3058 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3059
3060 if (( pExtent->enmType != VMDKETYPE_FLAT
3061 && pExtent->enmType != VMDKETYPE_ZERO
3062 && pExtent->enmType != VMDKETYPE_VMFS)
3063 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3064 {
3065 /*
3066 * Opened image contains at least one none flat or zero extent.
3067 * Return error but don't set error message as the caller
3068 * has the chance to open in non async I/O mode.
3069 */
3070 rc = VERR_NOT_SUPPORTED;
3071 goto out;
3072 }
3073 if (pExtent->enmType == VMDKETYPE_FLAT)
3074 cFlatExtents++;
3075 }
3076 }
3077
3078 for (unsigned i = 0; i < pImage->cExtents; i++)
3079 {
3080 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3081
3082 if (pExtent->pszBasename)
3083 {
3084 /* Hack to figure out whether the specified name in the
3085 * extent descriptor is absolute. Doesn't always work, but
3086 * should be good enough for now. */
3087 char *pszFullname;
3088 /** @todo implement proper path absolute check. */
3089 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3090 {
3091 pszFullname = RTStrDup(pExtent->pszBasename);
3092 if (!pszFullname)
3093 {
3094 rc = VERR_NO_MEMORY;
3095 goto out;
3096 }
3097 }
3098 else
3099 {
3100 size_t cbDirname;
3101 char *pszDirname = RTStrDup(pImage->pszFilename);
3102 if (!pszDirname)
3103 {
3104 rc = VERR_NO_MEMORY;
3105 goto out;
3106 }
3107 RTPathStripFilename(pszDirname);
3108 cbDirname = strlen(pszDirname);
3109 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3110 RTPATH_SLASH, pExtent->pszBasename);
3111 RTStrFree(pszDirname);
3112 if (RT_FAILURE(rc))
3113 goto out;
3114 }
3115 pExtent->pszFullname = pszFullname;
3116 }
3117 else
3118 pExtent->pszFullname = NULL;
3119
3120 switch (pExtent->enmType)
3121 {
3122 case VMDKETYPE_HOSTED_SPARSE:
3123 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3124 uOpenFlags & VD_OPEN_FLAGS_READONLY
3125 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3126 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3127 if (RT_FAILURE(rc))
3128 {
3129 /* Do NOT signal an appropriate error here, as the VD
3130 * layer has the choice of retrying the open if it
3131 * failed. */
3132 goto out;
3133 }
3134 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3135 if (RT_FAILURE(rc))
3136 goto out;
3137 rc = vmdkReadMetaExtent(pImage, pExtent);
3138 if (RT_FAILURE(rc))
3139 goto out;
3140
3141 /* Mark extent as unclean if opened in read-write mode. */
3142 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3143 {
3144 pExtent->fUncleanShutdown = true;
3145 pExtent->fMetaDirty = true;
3146 }
3147 break;
3148 case VMDKETYPE_VMFS:
3149 case VMDKETYPE_FLAT:
3150 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3151 uOpenFlags & VD_OPEN_FLAGS_READONLY
3152 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3153 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3154 if (RT_FAILURE(rc))
3155 {
3156 /* Do NOT signal an appropriate error here, as the VD
3157 * layer has the choice of retrying the open if it
3158 * failed. */
3159 goto out;
3160 }
3161 break;
3162 case VMDKETYPE_ZERO:
3163 /* Nothing to do. */
3164 break;
3165 default:
3166 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3167 }
3168 }
3169 }
3170
3171 /* Make sure this is not reached accidentally with an error status. */
3172 AssertRC(rc);
3173
3174 /* Determine PCHS geometry if not set. */
3175 if (pImage->PCHSGeometry.cCylinders == 0)
3176 {
3177 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3178 / pImage->PCHSGeometry.cHeads
3179 / pImage->PCHSGeometry.cSectors;
3180 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3181 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3182 {
3183 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3184 AssertRC(rc);
3185 }
3186 }
3187
3188 /* Update the image metadata now in case has changed. */
3189 rc = vmdkFlushImage(pImage);
3190 if (RT_FAILURE(rc))
3191 goto out;
3192
3193 /* Figure out a few per-image constants from the extents. */
3194 pImage->cbSize = 0;
3195 for (unsigned i = 0; i < pImage->cExtents; i++)
3196 {
3197 pExtent = &pImage->pExtents[i];
3198 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3199#ifdef VBOX_WITH_VMDK_ESX
3200 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3201#endif /* VBOX_WITH_VMDK_ESX */
3202 )
3203 {
3204 /* Here used to be a check whether the nominal size of an extent
3205 * is a multiple of the grain size. The spec says that this is
3206 * always the case, but unfortunately some files out there in the
3207 * wild violate the spec (e.g. ReactOS 0.3.1). */
3208 }
3209 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3210 }
3211
3212 for (unsigned i = 0; i < pImage->cExtents; i++)
3213 {
3214 pExtent = &pImage->pExtents[i];
3215 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3216 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3217 {
3218 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3219 break;
3220 }
3221 }
3222
3223 rc = vmdkAllocateGrainTableCache(pImage);
3224 if (RT_FAILURE(rc))
3225 goto out;
3226
3227out:
3228 if (RT_FAILURE(rc))
3229 vmdkFreeImage(pImage, false);
3230 return rc;
3231}
3232
3233/**
3234 * Internal: create VMDK images for raw disk/partition access.
3235 */
3236static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3237 uint64_t cbSize)
3238{
3239 int rc = VINF_SUCCESS;
3240 PVMDKEXTENT pExtent;
3241
3242 if (pRaw->fRawDisk)
3243 {
3244 /* Full raw disk access. This requires setting up a descriptor
3245 * file and open the (flat) raw disk. */
3246 rc = vmdkCreateExtents(pImage, 1);
3247 if (RT_FAILURE(rc))
3248 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3249 pExtent = &pImage->pExtents[0];
3250 /* Create raw disk descriptor file. */
3251 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3252 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3253 false);
3254 if (RT_FAILURE(rc))
3255 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3256
3257 /* Set up basename for extent description. Cannot use StrDup. */
3258 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3259 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3260 if (!pszBasename)
3261 return VERR_NO_MEMORY;
3262 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3263 pExtent->pszBasename = pszBasename;
3264 /* For raw disks the full name is identical to the base name. */
3265 pExtent->pszFullname = RTStrDup(pszBasename);
3266 if (!pExtent->pszFullname)
3267 return VERR_NO_MEMORY;
3268 pExtent->enmType = VMDKETYPE_FLAT;
3269 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3270 pExtent->uSectorOffset = 0;
3271 pExtent->enmAccess = VMDKACCESS_READWRITE;
3272 pExtent->fMetaDirty = false;
3273
3274 /* Open flat image, the raw disk. */
3275 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3276 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3277 if (RT_FAILURE(rc))
3278 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3279 }
3280 else
3281 {
3282 /* Raw partition access. This requires setting up a descriptor
3283 * file, write the partition information to a flat extent and
3284 * open all the (flat) raw disk partitions. */
3285
3286 /* First pass over the partitions to determine how many
3287 * extents we need. One partition can require up to 4 extents.
3288 * One to skip over unpartitioned space, one for the
3289 * partitioning data, one to skip over unpartitioned space
3290 * and one for the partition data. */
3291 unsigned cExtents = 0;
3292 uint64_t uStart = 0;
3293 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3294 {
3295 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3296 if (pPart->cbPartitionData)
3297 {
3298 if (uStart > pPart->uPartitionDataStart)
3299 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
3300 else if (uStart != pPart->uPartitionDataStart)
3301 cExtents++;
3302 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3303 cExtents++;
3304 }
3305 if (pPart->cbPartition)
3306 {
3307 if (uStart > pPart->uPartitionStart)
3308 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
3309 else if (uStart != pPart->uPartitionStart)
3310 cExtents++;
3311 uStart = pPart->uPartitionStart + pPart->cbPartition;
3312 cExtents++;
3313 }
3314 }
3315 /* Another extent for filling up the rest of the image. */
3316 if (uStart != cbSize)
3317 cExtents++;
3318
3319 rc = vmdkCreateExtents(pImage, cExtents);
3320 if (RT_FAILURE(rc))
3321 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3322
3323 /* Create raw partition descriptor file. */
3324 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3325 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3326 false);
3327 if (RT_FAILURE(rc))
3328 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3329
3330 /* Create base filename for the partition table extent. */
3331 /** @todo remove fixed buffer without creating memory leaks. */
3332 char pszPartition[1024];
3333 const char *pszBase = RTPathFilename(pImage->pszFilename);
3334 const char *pszExt = RTPathExt(pszBase);
3335 if (pszExt == NULL)
3336 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3337 char *pszBaseBase = RTStrDup(pszBase);
3338 if (!pszBaseBase)
3339 return VERR_NO_MEMORY;
3340 RTPathStripExt(pszBaseBase);
3341 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3342 pszBaseBase, pszExt);
3343 RTStrFree(pszBaseBase);
3344
3345 /* Second pass over the partitions, now define all extents. */
3346 uint64_t uPartOffset = 0;
3347 cExtents = 0;
3348 uStart = 0;
3349 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3350 {
3351 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3352 if (pPart->cbPartitionData)
3353 {
3354 if (uStart != pPart->uPartitionDataStart)
3355 {
3356 pExtent = &pImage->pExtents[cExtents++];
3357 pExtent->pszBasename = NULL;
3358 pExtent->pszFullname = NULL;
3359 pExtent->enmType = VMDKETYPE_ZERO;
3360 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionDataStart - uStart);
3361 pExtent->uSectorOffset = 0;
3362 pExtent->enmAccess = VMDKACCESS_READWRITE;
3363 pExtent->fMetaDirty = false;
3364 }
3365 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3366 pExtent = &pImage->pExtents[cExtents++];
3367 /* Set up basename for extent description. Can't use StrDup. */
3368 size_t cbBasename = strlen(pszPartition) + 1;
3369 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3370 if (!pszBasename)
3371 return VERR_NO_MEMORY;
3372 memcpy(pszBasename, pszPartition, cbBasename);
3373 pExtent->pszBasename = pszBasename;
3374
3375 /* Set up full name for partition extent. */
3376 size_t cbDirname;
3377 char *pszDirname = RTStrDup(pImage->pszFilename);
3378 if (!pszDirname)
3379 return VERR_NO_MEMORY;
3380 RTPathStripFilename(pszDirname);
3381 cbDirname = strlen(pszDirname);
3382 char *pszFullname;
3383 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3384 RTPATH_SLASH, pExtent->pszBasename);
3385 RTStrFree(pszDirname);
3386 if (RT_FAILURE(rc))
3387 return rc;
3388 pExtent->pszFullname = pszFullname;
3389 pExtent->enmType = VMDKETYPE_FLAT;
3390 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3391 pExtent->uSectorOffset = uPartOffset;
3392 pExtent->enmAccess = VMDKACCESS_READWRITE;
3393 pExtent->fMetaDirty = false;
3394
3395 /* Create partition table flat image. */
3396 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3397 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3398 false);
3399 if (RT_FAILURE(rc))
3400 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3401 rc = vmdkFileWriteAt(pExtent->pFile,
3402 VMDK_SECTOR2BYTE(uPartOffset),
3403 pPart->pvPartitionData,
3404 pPart->cbPartitionData, NULL);
3405 if (RT_FAILURE(rc))
3406 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3407 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3408 }
3409 if (pPart->cbPartition)
3410 {
3411 if (uStart != pPart->uPartitionStart)
3412 {
3413 pExtent = &pImage->pExtents[cExtents++];
3414 pExtent->pszBasename = NULL;
3415 pExtent->pszFullname = NULL;
3416 pExtent->enmType = VMDKETYPE_ZERO;
3417 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionStart - uStart);
3418 pExtent->uSectorOffset = 0;
3419 pExtent->enmAccess = VMDKACCESS_READWRITE;
3420 pExtent->fMetaDirty = false;
3421 }
3422 uStart = pPart->uPartitionStart + pPart->cbPartition;
3423 pExtent = &pImage->pExtents[cExtents++];
3424 if (pPart->pszRawDevice)
3425 {
3426 /* Set up basename for extent descr. Can't use StrDup. */
3427 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3428 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3429 if (!pszBasename)
3430 return VERR_NO_MEMORY;
3431 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3432 pExtent->pszBasename = pszBasename;
3433 /* For raw disks full name is identical to base name. */
3434 pExtent->pszFullname = RTStrDup(pszBasename);
3435 if (!pExtent->pszFullname)
3436 return VERR_NO_MEMORY;
3437 pExtent->enmType = VMDKETYPE_FLAT;
3438 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3439 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uPartitionStartOffset);
3440 pExtent->enmAccess = VMDKACCESS_READWRITE;
3441 pExtent->fMetaDirty = false;
3442
3443 /* Open flat image, the raw partition. */
3444 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3445 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3446 false);
3447 if (RT_FAILURE(rc))
3448 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3449 }
3450 else
3451 {
3452 pExtent->pszBasename = NULL;
3453 pExtent->pszFullname = NULL;
3454 pExtent->enmType = VMDKETYPE_ZERO;
3455 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3456 pExtent->uSectorOffset = 0;
3457 pExtent->enmAccess = VMDKACCESS_READWRITE;
3458 pExtent->fMetaDirty = false;
3459 }
3460 }
3461 }
3462 /* Another extent for filling up the rest of the image. */
3463 if (uStart != cbSize)
3464 {
3465 pExtent = &pImage->pExtents[cExtents++];
3466 pExtent->pszBasename = NULL;
3467 pExtent->pszFullname = NULL;
3468 pExtent->enmType = VMDKETYPE_ZERO;
3469 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3470 pExtent->uSectorOffset = 0;
3471 pExtent->enmAccess = VMDKACCESS_READWRITE;
3472 pExtent->fMetaDirty = false;
3473 }
3474 }
3475
3476 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3477 pRaw->fRawDisk ?
3478 "fullDevice" : "partitionedDevice");
3479 if (RT_FAILURE(rc))
3480 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3481 return rc;
3482}
3483
3484/**
3485 * Internal: create a regular (i.e. file-backed) VMDK image.
3486 */
3487static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3488 unsigned uImageFlags,
3489 PFNVMPROGRESS pfnProgress, void *pvUser,
3490 unsigned uPercentStart, unsigned uPercentSpan)
3491{
3492 int rc = VINF_SUCCESS;
3493 unsigned cExtents = 1;
3494 uint64_t cbOffset = 0;
3495 uint64_t cbRemaining = cbSize;
3496
3497 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3498 {
3499 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3500 /* Do proper extent computation: need one smaller extent if the total
3501 * size isn't evenly divisible by the split size. */
3502 if (cbSize % VMDK_2G_SPLIT_SIZE)
3503 cExtents++;
3504 }
3505 rc = vmdkCreateExtents(pImage, cExtents);
3506 if (RT_FAILURE(rc))
3507 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3508
3509 /* Basename strings needed for constructing the extent names. */
3510 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3511 AssertPtr(pszBasenameSubstr);
3512 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3513
3514 /* Create searate descriptor file if necessary. */
3515 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3516 {
3517 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3518 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3519 false);
3520 if (RT_FAILURE(rc))
3521 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3522 }
3523 else
3524 pImage->pFile = NULL;
3525
3526 /* Set up all extents. */
3527 for (unsigned i = 0; i < cExtents; i++)
3528 {
3529 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3530 uint64_t cbExtent = cbRemaining;
3531
3532 /* Set up fullname/basename for extent description. Cannot use StrDup
3533 * for basename, as it is not guaranteed that the memory can be freed
3534 * with RTMemTmpFree, which must be used as in other code paths
3535 * StrDup is not usable. */
3536 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3537 {
3538 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3539 if (!pszBasename)
3540 return VERR_NO_MEMORY;
3541 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3542 pExtent->pszBasename = pszBasename;
3543 }
3544 else
3545 {
3546 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3547 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3548 RTPathStripExt(pszBasenameBase);
3549 char *pszTmp;
3550 size_t cbTmp;
3551 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3552 {
3553 if (cExtents == 1)
3554 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3555 pszBasenameExt);
3556 else
3557 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3558 i+1, pszBasenameExt);
3559 }
3560 else
3561 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3562 pszBasenameExt);
3563 RTStrFree(pszBasenameBase);
3564 if (RT_FAILURE(rc))
3565 return rc;
3566 cbTmp = strlen(pszTmp) + 1;
3567 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3568 if (!pszBasename)
3569 return VERR_NO_MEMORY;
3570 memcpy(pszBasename, pszTmp, cbTmp);
3571 RTStrFree(pszTmp);
3572 pExtent->pszBasename = pszBasename;
3573 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3574 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3575 }
3576 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3577 RTPathStripFilename(pszBasedirectory);
3578 char *pszFullname;
3579 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3580 RTPATH_SLASH, pExtent->pszBasename);
3581 RTStrFree(pszBasedirectory);
3582 if (RT_FAILURE(rc))
3583 return rc;
3584 pExtent->pszFullname = pszFullname;
3585
3586 /* Create file for extent. */
3587 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3588 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3589 false);
3590 if (RT_FAILURE(rc))
3591 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3592 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3593 {
3594 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3595 if (RT_FAILURE(rc))
3596 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3597
3598 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3599 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3600 * file and the guest could complain about an ATA timeout. */
3601
3602 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3603 * Currently supported file systems are ext4 and ocfs2. */
3604
3605 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3606 const size_t cbBuf = 128 * _1K;
3607 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3608 if (!pvBuf)
3609 return VERR_NO_MEMORY;
3610
3611 uint64_t uOff = 0;
3612 /* Write data to all image blocks. */
3613 while (uOff < cbExtent)
3614 {
3615 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3616
3617 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3618 if (RT_FAILURE(rc))
3619 {
3620 RTMemFree(pvBuf);
3621 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3622 }
3623
3624 uOff += cbChunk;
3625
3626 if (pfnProgress)
3627 {
3628 rc = pfnProgress(NULL /* WARNING! pVM=NULL */,
3629 uPercentStart + uOff * uPercentSpan / cbExtent,
3630 pvUser);
3631 if (RT_FAILURE(rc))
3632 {
3633 RTMemFree(pvBuf);
3634 return rc;
3635 }
3636 }
3637 }
3638 RTMemTmpFree(pvBuf);
3639 }
3640
3641 /* Place descriptor file information (where integrated). */
3642 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3643 {
3644 pExtent->uDescriptorSector = 1;
3645 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3646 /* The descriptor is part of the (only) extent. */
3647 pExtent->pDescData = pImage->pDescData;
3648 pImage->pDescData = NULL;
3649 }
3650
3651 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3652 {
3653 uint64_t cSectorsPerGDE, cSectorsPerGD;
3654 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3655 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3656 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3657 pExtent->cGTEntries = 512;
3658 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3659 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3660 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3661 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3662 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3663 {
3664 /* The spec says version is 1 for all VMDKs, but the vast
3665 * majority of streamOptimized VMDKs actually contain
3666 * version 3 - so go with the majority. Both are acepted. */
3667 pExtent->uVersion = 3;
3668 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3669 }
3670 }
3671 else
3672 {
3673 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3674 pExtent->enmType = VMDKETYPE_VMFS;
3675 else
3676 pExtent->enmType = VMDKETYPE_FLAT;
3677 }
3678
3679 pExtent->enmAccess = VMDKACCESS_READWRITE;
3680 pExtent->fUncleanShutdown = true;
3681 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3682 pExtent->uSectorOffset = 0;
3683 pExtent->fMetaDirty = true;
3684
3685 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3686 {
3687 rc = vmdkCreateGrainDirectory(pExtent,
3688 RT_MAX( pExtent->uDescriptorSector
3689 + pExtent->cDescriptorSectors,
3690 1),
3691 true);
3692 if (RT_FAILURE(rc))
3693 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3694 }
3695
3696 if (RT_SUCCESS(rc) && pfnProgress)
3697 pfnProgress(NULL /* WARNING! pVM=NULL */,
3698 uPercentStart + i * uPercentSpan / cExtents,
3699 pvUser);
3700
3701 cbRemaining -= cbExtent;
3702 cbOffset += cbExtent;
3703 }
3704
3705 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3706 {
3707 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3708 * controller type is set in an image. */
3709 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3710 if (RT_FAILURE(rc))
3711 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3712 }
3713
3714 const char *pszDescType = NULL;
3715 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3716 {
3717 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3718 pszDescType = "vmfs";
3719 else
3720 pszDescType = (cExtents == 1)
3721 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3722 }
3723 else
3724 {
3725 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3726 pszDescType = "streamOptimized";
3727 else
3728 {
3729 pszDescType = (cExtents == 1)
3730 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3731 }
3732 }
3733 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3734 pszDescType);
3735 if (RT_FAILURE(rc))
3736 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3737 return rc;
3738}
3739
3740/**
3741 * Internal: The actual code for creating any VMDK variant currently in
3742 * existence on hosted environments.
3743 */
3744static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3745 unsigned uImageFlags, const char *pszComment,
3746 PCPDMMEDIAGEOMETRY pPCHSGeometry,
3747 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3748 PFNVMPROGRESS pfnProgress, void *pvUser,
3749 unsigned uPercentStart, unsigned uPercentSpan)
3750{
3751 int rc;
3752
3753 pImage->uImageFlags = uImageFlags;
3754
3755 /* Try to get error interface. */
3756 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3757 if (pImage->pInterfaceError)
3758 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3759
3760 /* Try to get async I/O interface. */
3761 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
3762 if (pImage->pInterfaceAsyncIO)
3763 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
3764
3765 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3766 &pImage->Descriptor);
3767 if (RT_FAILURE(rc))
3768 {
3769 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
3770 goto out;
3771 }
3772
3773 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3774 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3775 {
3776 /* Raw disk image (includes raw partition). */
3777 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3778 /* As the comment is misused, zap it so that no garbage comment
3779 * is set below. */
3780 pszComment = NULL;
3781 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3782 }
3783 else
3784 {
3785 /* Regular fixed or sparse image (monolithic or split). */
3786 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3787 pfnProgress, pvUser, uPercentStart,
3788 uPercentSpan * 95 / 100);
3789 }
3790
3791 if (RT_FAILURE(rc))
3792 goto out;
3793
3794 if (RT_SUCCESS(rc) && pfnProgress)
3795 pfnProgress(NULL /* WARNING! pVM=NULL */,
3796 uPercentStart + uPercentSpan * 98 / 100, pvUser);
3797
3798 pImage->cbSize = cbSize;
3799
3800 for (unsigned i = 0; i < pImage->cExtents; i++)
3801 {
3802 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3803
3804 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3805 pExtent->cNominalSectors, pExtent->enmType,
3806 pExtent->pszBasename, pExtent->uSectorOffset);
3807 if (RT_FAILURE(rc))
3808 {
3809 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3810 goto out;
3811 }
3812 }
3813 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
3814
3815 if ( pPCHSGeometry->cCylinders != 0
3816 && pPCHSGeometry->cHeads != 0
3817 && pPCHSGeometry->cSectors != 0)
3818 {
3819 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
3820 if (RT_FAILURE(rc))
3821 goto out;
3822 }
3823 if ( pLCHSGeometry->cCylinders != 0
3824 && pLCHSGeometry->cHeads != 0
3825 && pLCHSGeometry->cSectors != 0)
3826 {
3827 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
3828 if (RT_FAILURE(rc))
3829 goto out;
3830 }
3831
3832 pImage->LCHSGeometry = *pLCHSGeometry;
3833 pImage->PCHSGeometry = *pPCHSGeometry;
3834
3835 pImage->ImageUuid = *pUuid;
3836 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3837 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3838 if (RT_FAILURE(rc))
3839 {
3840 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3841 goto out;
3842 }
3843 RTUuidClear(&pImage->ParentUuid);
3844 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3845 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3846 if (RT_FAILURE(rc))
3847 {
3848 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3849 goto out;
3850 }
3851 RTUuidClear(&pImage->ModificationUuid);
3852 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3853 VMDK_DDB_MODIFICATION_UUID,
3854 &pImage->ModificationUuid);
3855 if (RT_FAILURE(rc))
3856 {
3857 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3858 goto out;
3859 }
3860 RTUuidClear(&pImage->ParentModificationUuid);
3861 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3862 VMDK_DDB_PARENT_MODIFICATION_UUID,
3863 &pImage->ParentModificationUuid);
3864 if (RT_FAILURE(rc))
3865 {
3866 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3867 goto out;
3868 }
3869
3870 rc = vmdkAllocateGrainTableCache(pImage);
3871 if (RT_FAILURE(rc))
3872 goto out;
3873
3874 rc = vmdkSetImageComment(pImage, pszComment);
3875 if (RT_FAILURE(rc))
3876 {
3877 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
3878 goto out;
3879 }
3880
3881 if (RT_SUCCESS(rc) && pfnProgress)
3882 pfnProgress(NULL /* WARNING! pVM=NULL */,
3883 uPercentStart + uPercentSpan * 99 / 100, pvUser);
3884
3885 rc = vmdkFlushImage(pImage);
3886
3887out:
3888 if (RT_SUCCESS(rc) && pfnProgress)
3889 pfnProgress(NULL /* WARNING! pVM=NULL */,
3890 uPercentStart + uPercentSpan, pvUser);
3891
3892 if (RT_FAILURE(rc))
3893 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
3894 return rc;
3895}
3896
3897/**
3898 * Internal: Update image comment.
3899 */
3900static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
3901{
3902 char *pszCommentEncoded;
3903 if (pszComment)
3904 {
3905 pszCommentEncoded = vmdkEncodeString(pszComment);
3906 if (!pszCommentEncoded)
3907 return VERR_NO_MEMORY;
3908 }
3909 else
3910 pszCommentEncoded = NULL;
3911 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
3912 "ddb.comment", pszCommentEncoded);
3913 if (pszComment)
3914 RTStrFree(pszCommentEncoded);
3915 if (RT_FAILURE(rc))
3916 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
3917 return VINF_SUCCESS;
3918}
3919
3920/**
3921 * Internal. Free all allocated space for representing an image, and optionally
3922 * delete the image from disk.
3923 */
3924static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
3925{
3926 AssertPtr(pImage);
3927
3928 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3929 {
3930 /* Mark all extents as clean. */
3931 for (unsigned i = 0; i < pImage->cExtents; i++)
3932 {
3933 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
3934#ifdef VBOX_WITH_VMDK_ESX
3935 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
3936#endif /* VBOX_WITH_VMDK_ESX */
3937 )
3938 && pImage->pExtents[i].fUncleanShutdown)
3939 {
3940 pImage->pExtents[i].fUncleanShutdown = false;
3941 pImage->pExtents[i].fMetaDirty = true;
3942 }
3943 }
3944 }
3945 (void)vmdkFlushImage(pImage);
3946
3947 if (pImage->pExtents != NULL)
3948 {
3949 for (unsigned i = 0 ; i < pImage->cExtents; i++)
3950 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
3951 RTMemFree(pImage->pExtents);
3952 pImage->pExtents = NULL;
3953 }
3954 pImage->cExtents = 0;
3955 if (pImage->pFile != NULL)
3956 vmdkFileClose(pImage, &pImage->pFile, fDelete);
3957 vmdkFileCheckAllClose(pImage);
3958 if (pImage->pGTCache)
3959 {
3960 RTMemFree(pImage->pGTCache);
3961 pImage->pGTCache = NULL;
3962 }
3963 if (pImage->pDescData)
3964 {
3965 RTMemFree(pImage->pDescData);
3966 pImage->pDescData = NULL;
3967 }
3968}
3969
3970/**
3971 * Internal. Flush image data (and metadata) to disk.
3972 */
3973static int vmdkFlushImage(PVMDKIMAGE pImage)
3974{
3975 PVMDKEXTENT pExtent;
3976 int rc = VINF_SUCCESS;
3977
3978 /* Update descriptor if changed. */
3979 if (pImage->Descriptor.fDirty)
3980 {
3981 rc = vmdkWriteDescriptor(pImage);
3982 if (RT_FAILURE(rc))
3983 goto out;
3984 }
3985
3986 for (unsigned i = 0; i < pImage->cExtents; i++)
3987 {
3988 pExtent = &pImage->pExtents[i];
3989 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
3990 {
3991 switch (pExtent->enmType)
3992 {
3993 case VMDKETYPE_HOSTED_SPARSE:
3994 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
3995 if (RT_FAILURE(rc))
3996 goto out;
3997 if (pExtent->fFooter)
3998 {
3999 uint64_t cbSize;
4000 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4001 if (RT_FAILURE(rc))
4002 goto out;
4003 cbSize = RT_ALIGN_64(cbSize, 512);
4004 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4005 if (RT_FAILURE(rc))
4006 goto out;
4007 }
4008 break;
4009#ifdef VBOX_WITH_VMDK_ESX
4010 case VMDKETYPE_ESX_SPARSE:
4011 /** @todo update the header. */
4012 break;
4013#endif /* VBOX_WITH_VMDK_ESX */
4014 case VMDKETYPE_VMFS:
4015 case VMDKETYPE_FLAT:
4016 /* Nothing to do. */
4017 break;
4018 case VMDKETYPE_ZERO:
4019 default:
4020 AssertMsgFailed(("extent with type %d marked as dirty\n",
4021 pExtent->enmType));
4022 break;
4023 }
4024 }
4025 switch (pExtent->enmType)
4026 {
4027 case VMDKETYPE_HOSTED_SPARSE:
4028#ifdef VBOX_WITH_VMDK_ESX
4029 case VMDKETYPE_ESX_SPARSE:
4030#endif /* VBOX_WITH_VMDK_ESX */
4031 case VMDKETYPE_VMFS:
4032 case VMDKETYPE_FLAT:
4033 /** @todo implement proper path absolute check. */
4034 if ( pExtent->pFile != NULL
4035 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4036 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4037 rc = vmdkFileFlush(pExtent->pFile);
4038 break;
4039 case VMDKETYPE_ZERO:
4040 /* No need to do anything for this extent. */
4041 break;
4042 default:
4043 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4044 break;
4045 }
4046 }
4047
4048out:
4049 return rc;
4050}
4051
4052/**
4053 * Internal. Find extent corresponding to the sector number in the disk.
4054 */
4055static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4056 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4057{
4058 PVMDKEXTENT pExtent = NULL;
4059 int rc = VINF_SUCCESS;
4060
4061 for (unsigned i = 0; i < pImage->cExtents; i++)
4062 {
4063 if (offSector < pImage->pExtents[i].cNominalSectors)
4064 {
4065 pExtent = &pImage->pExtents[i];
4066 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4067 break;
4068 }
4069 offSector -= pImage->pExtents[i].cNominalSectors;
4070 }
4071
4072 if (pExtent)
4073 *ppExtent = pExtent;
4074 else
4075 rc = VERR_IO_SECTOR_NOT_FOUND;
4076
4077 return rc;
4078}
4079
4080/**
4081 * Internal. Hash function for placing the grain table hash entries.
4082 */
4083static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4084 unsigned uExtent)
4085{
4086 /** @todo this hash function is quite simple, maybe use a better one which
4087 * scrambles the bits better. */
4088 return (uSector + uExtent) % pCache->cEntries;
4089}
4090
4091/**
4092 * Internal. Get sector number in the extent file from the relative sector
4093 * number in the extent.
4094 */
4095static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4096 uint64_t uSector, uint64_t *puExtentSector)
4097{
4098 uint64_t uGDIndex, uGTSector, uGTBlock;
4099 uint32_t uGTHash, uGTBlockIndex;
4100 PVMDKGTCACHEENTRY pGTCacheEntry;
4101 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4102 int rc;
4103
4104 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4105 if (uGDIndex >= pExtent->cGDEntries)
4106 return VERR_OUT_OF_RANGE;
4107 uGTSector = pExtent->pGD[uGDIndex];
4108 if (!uGTSector)
4109 {
4110 /* There is no grain table referenced by this grain directory
4111 * entry. So there is absolutely no data in this area. */
4112 *puExtentSector = 0;
4113 return VINF_SUCCESS;
4114 }
4115
4116 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4117 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4118 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4119 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4120 || pGTCacheEntry->uGTBlock != uGTBlock)
4121 {
4122 /* Cache miss, fetch data from disk. */
4123 rc = vmdkFileReadAt(pExtent->pFile,
4124 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4125 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4126 if (RT_FAILURE(rc))
4127 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4128 pGTCacheEntry->uExtent = pExtent->uExtent;
4129 pGTCacheEntry->uGTBlock = uGTBlock;
4130 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4131 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4132 }
4133 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4134 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4135 if (uGrainSector)
4136 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4137 else
4138 *puExtentSector = 0;
4139 return VINF_SUCCESS;
4140}
4141
4142/**
4143 * Internal. Allocates a new grain table (if necessary), writes the grain
4144 * and updates the grain table. The cache is also updated by this operation.
4145 * This is separate from vmdkGetSector, because that should be as fast as
4146 * possible. Most code from vmdkGetSector also appears here.
4147 */
4148static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4149 uint64_t uSector, const void *pvBuf,
4150 uint64_t cbWrite)
4151{
4152 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4153 uint64_t cbExtentSize;
4154 uint32_t uGTHash, uGTBlockIndex;
4155 PVMDKGTCACHEENTRY pGTCacheEntry;
4156 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4157 int rc;
4158
4159 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4160 if (uGDIndex >= pExtent->cGDEntries)
4161 return VERR_OUT_OF_RANGE;
4162 uGTSector = pExtent->pGD[uGDIndex];
4163 if (pExtent->pRGD)
4164 uRGTSector = pExtent->pRGD[uGDIndex];
4165 else
4166 uRGTSector = 0; /**< avoid compiler warning */
4167 if (!uGTSector)
4168 {
4169 /* There is no grain table referenced by this grain directory
4170 * entry. So there is absolutely no data in this area. Allocate
4171 * a new grain table and put the reference to it in the GDs. */
4172 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4173 if (RT_FAILURE(rc))
4174 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4175 Assert(!(cbExtentSize % 512));
4176 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4177 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4178 /* For writable streamOptimized extents the final sector is the
4179 * end-of-stream marker. Will be re-added after the grain table.
4180 * If the file has a footer it also will be re-added before EOS. */
4181 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4182 {
4183 uint64_t uEOSOff = 0;
4184 uGTSector--;
4185 if (pExtent->fFooter)
4186 {
4187 uGTSector--;
4188 uEOSOff = 512;
4189 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4190 if (RT_FAILURE(rc))
4191 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4192 }
4193 pExtent->uLastGrainSector = 0;
4194 uint8_t aEOS[512];
4195 memset(aEOS, '\0', sizeof(aEOS));
4196 rc = vmdkFileWriteAt(pExtent->pFile,
4197 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4198 aEOS, sizeof(aEOS), NULL);
4199 if (RT_FAILURE(rc))
4200 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4201 }
4202 /* Normally the grain table is preallocated for hosted sparse extents
4203 * that support more than 32 bit sector numbers. So this shouldn't
4204 * ever happen on a valid extent. */
4205 if (uGTSector > UINT32_MAX)
4206 return VERR_VD_VMDK_INVALID_HEADER;
4207 /* Write grain table by writing the required number of grain table
4208 * cache chunks. Avoids dynamic memory allocation, but is a bit
4209 * slower. But as this is a pretty infrequently occurring case it
4210 * should be acceptable. */
4211 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4212 for (unsigned i = 0;
4213 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4214 i++)
4215 {
4216 rc = vmdkFileWriteAt(pExtent->pFile,
4217 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4218 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4219 if (RT_FAILURE(rc))
4220 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4221 }
4222 if (pExtent->pRGD)
4223 {
4224 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4225 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4226 if (RT_FAILURE(rc))
4227 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4228 Assert(!(cbExtentSize % 512));
4229 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4230 /* For writable streamOptimized extents the final sector is the
4231 * end-of-stream marker. Will be re-added after the grain table.
4232 * If the file has a footer it also will be re-added before EOS. */
4233 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4234 {
4235 uint64_t uEOSOff = 0;
4236 uRGTSector--;
4237 if (pExtent->fFooter)
4238 {
4239 uRGTSector--;
4240 uEOSOff = 512;
4241 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4242 if (RT_FAILURE(rc))
4243 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4244 }
4245 pExtent->uLastGrainSector = 0;
4246 uint8_t aEOS[512];
4247 memset(aEOS, '\0', sizeof(aEOS));
4248 rc = vmdkFileWriteAt(pExtent->pFile,
4249 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4250 aEOS, sizeof(aEOS), NULL);
4251 if (RT_FAILURE(rc))
4252 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4253 }
4254 /* Normally the redundant grain table is preallocated for hosted
4255 * sparse extents that support more than 32 bit sector numbers. So
4256 * this shouldn't ever happen on a valid extent. */
4257 if (uRGTSector > UINT32_MAX)
4258 return VERR_VD_VMDK_INVALID_HEADER;
4259 /* Write backup grain table by writing the required number of grain
4260 * table cache chunks. Avoids dynamic memory allocation, but is a
4261 * bit slower. But as this is a pretty infrequently occurring case
4262 * it should be acceptable. */
4263 for (unsigned i = 0;
4264 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4265 i++)
4266 {
4267 rc = vmdkFileWriteAt(pExtent->pFile,
4268 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4269 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4270 if (RT_FAILURE(rc))
4271 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4272 }
4273 }
4274
4275 /* Update the grain directory on disk (doing it before writing the
4276 * grain table will result in a garbled extent if the operation is
4277 * aborted for some reason. Otherwise the worst that can happen is
4278 * some unused sectors in the extent. */
4279 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4280 rc = vmdkFileWriteAt(pExtent->pFile,
4281 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4282 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4283 if (RT_FAILURE(rc))
4284 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4285 if (pExtent->pRGD)
4286 {
4287 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4288 rc = vmdkFileWriteAt(pExtent->pFile,
4289 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4290 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4291 if (RT_FAILURE(rc))
4292 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4293 }
4294
4295 /* As the final step update the in-memory copy of the GDs. */
4296 pExtent->pGD[uGDIndex] = uGTSector;
4297 if (pExtent->pRGD)
4298 pExtent->pRGD[uGDIndex] = uRGTSector;
4299 }
4300
4301 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4302 if (RT_FAILURE(rc))
4303 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4304 Assert(!(cbExtentSize % 512));
4305
4306 /* Write the data. Always a full grain, or we're in big trouble. */
4307 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4308 {
4309 /* For streamOptimized extents this is a little more difficult, as the
4310 * cached data also needs to be updated, to handle updating the last
4311 * written block properly. Also we're trying to avoid unnecessary gaps.
4312 * Additionally the end-of-stream marker needs to be written. */
4313 if (!pExtent->uLastGrainSector)
4314 {
4315 cbExtentSize -= 512;
4316 if (pExtent->fFooter)
4317 cbExtentSize -= 512;
4318 }
4319 else
4320 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4321 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4322 uint32_t cbGrain = 0;
4323 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4324 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4325 if (RT_FAILURE(rc))
4326 {
4327 pExtent->uGrainSector = 0;
4328 pExtent->uLastGrainSector = 0;
4329 AssertRC(rc);
4330 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4331 }
4332 cbGrain = RT_ALIGN(cbGrain, 512);
4333 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4334 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4335 pExtent->cbLastGrainWritten = cbGrain;
4336 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4337 pExtent->uGrainSector = uSector;
4338
4339 uint64_t uEOSOff = 0;
4340 if (pExtent->fFooter)
4341 {
4342 uEOSOff = 512;
4343 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4344 if (RT_FAILURE(rc))
4345 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4346 }
4347 uint8_t aEOS[512];
4348 memset(aEOS, '\0', sizeof(aEOS));
4349 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4350 aEOS, sizeof(aEOS), NULL);
4351 if (RT_FAILURE(rc))
4352 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4353 }
4354 else
4355 {
4356 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4357 if (RT_FAILURE(rc))
4358 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4359 }
4360
4361 /* Update the grain table (and the cache). */
4362 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4363 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4364 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4365 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4366 || pGTCacheEntry->uGTBlock != uGTBlock)
4367 {
4368 /* Cache miss, fetch data from disk. */
4369 rc = vmdkFileReadAt(pExtent->pFile,
4370 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4371 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4372 if (RT_FAILURE(rc))
4373 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4374 pGTCacheEntry->uExtent = pExtent->uExtent;
4375 pGTCacheEntry->uGTBlock = uGTBlock;
4376 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4377 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4378 }
4379 else
4380 {
4381 /* Cache hit. Convert grain table block back to disk format, otherwise
4382 * the code below will write garbage for all but the updated entry. */
4383 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4384 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4385 }
4386 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4387 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4388 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4389 /* Update grain table on disk. */
4390 rc = vmdkFileWriteAt(pExtent->pFile,
4391 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4392 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4393 if (RT_FAILURE(rc))
4394 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4395 if (pExtent->pRGD)
4396 {
4397 /* Update backup grain table on disk. */
4398 rc = vmdkFileWriteAt(pExtent->pFile,
4399 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4400 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4401 if (RT_FAILURE(rc))
4402 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4403 }
4404#ifdef VBOX_WITH_VMDK_ESX
4405 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4406 {
4407 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4408 pExtent->fMetaDirty = true;
4409 }
4410#endif /* VBOX_WITH_VMDK_ESX */
4411 return rc;
4412}
4413
4414
4415/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
4416static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
4417{
4418 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
4419 int rc = VINF_SUCCESS;
4420 PVMDKIMAGE pImage;
4421
4422 if ( !pszFilename
4423 || !*pszFilename
4424 || strchr(pszFilename, '"'))
4425 {
4426 rc = VERR_INVALID_PARAMETER;
4427 goto out;
4428 }
4429
4430 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4431 if (!pImage)
4432 {
4433 rc = VERR_NO_MEMORY;
4434 goto out;
4435 }
4436 pImage->pszFilename = pszFilename;
4437 pImage->pFile = NULL;
4438 pImage->pExtents = NULL;
4439 pImage->pFiles = NULL;
4440 pImage->pGTCache = NULL;
4441 pImage->pDescData = NULL;
4442 pImage->pVDIfsDisk = pVDIfsDisk;
4443 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
4444 * much as possible in vmdkOpenImage. */
4445 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
4446 vmdkFreeImage(pImage, false);
4447 RTMemFree(pImage);
4448
4449out:
4450 LogFlowFunc(("returns %Rrc\n", rc));
4451 return rc;
4452}
4453
4454/** @copydoc VBOXHDDBACKEND::pfnOpen */
4455static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
4456 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
4457 void **ppBackendData)
4458{
4459 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
4460 int rc;
4461 PVMDKIMAGE pImage;
4462
4463 /* Check open flags. All valid flags are supported. */
4464 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4465 {
4466 rc = VERR_INVALID_PARAMETER;
4467 goto out;
4468 }
4469
4470 /* Check remaining arguments. */
4471 if ( !VALID_PTR(pszFilename)
4472 || !*pszFilename
4473 || strchr(pszFilename, '"'))
4474 {
4475 rc = VERR_INVALID_PARAMETER;
4476 goto out;
4477 }
4478
4479
4480 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4481 if (!pImage)
4482 {
4483 rc = VERR_NO_MEMORY;
4484 goto out;
4485 }
4486 pImage->pszFilename = pszFilename;
4487 pImage->pFile = NULL;
4488 pImage->pExtents = NULL;
4489 pImage->pFiles = NULL;
4490 pImage->pGTCache = NULL;
4491 pImage->pDescData = NULL;
4492 pImage->pVDIfsDisk = pVDIfsDisk;
4493
4494 rc = vmdkOpenImage(pImage, uOpenFlags);
4495 if (RT_SUCCESS(rc))
4496 *ppBackendData = pImage;
4497
4498out:
4499 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4500 return rc;
4501}
4502
4503/** @copydoc VBOXHDDBACKEND::pfnCreate */
4504static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
4505 unsigned uImageFlags, const char *pszComment,
4506 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4507 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4508 unsigned uOpenFlags, unsigned uPercentStart,
4509 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
4510 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
4511 void **ppBackendData)
4512{
4513 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
4514 int rc;
4515 PVMDKIMAGE pImage;
4516
4517 PFNVMPROGRESS pfnProgress = NULL;
4518 void *pvUser = NULL;
4519 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
4520 VDINTERFACETYPE_PROGRESS);
4521 PVDINTERFACEPROGRESS pCbProgress = NULL;
4522 if (pIfProgress)
4523 {
4524 pCbProgress = VDGetInterfaceProgress(pIfProgress);
4525 pfnProgress = pCbProgress->pfnProgress;
4526 pvUser = pIfProgress->pvUser;
4527 }
4528
4529 /* Check open flags. All valid flags are supported. */
4530 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4531 {
4532 rc = VERR_INVALID_PARAMETER;
4533 goto out;
4534 }
4535
4536 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
4537 if ( !cbSize
4538 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
4539 {
4540 rc = VERR_VD_INVALID_SIZE;
4541 goto out;
4542 }
4543
4544 /* Check remaining arguments. */
4545 if ( !VALID_PTR(pszFilename)
4546 || !*pszFilename
4547 || strchr(pszFilename, '"')
4548 || !VALID_PTR(pPCHSGeometry)
4549 || !VALID_PTR(pLCHSGeometry)
4550#ifndef VBOX_WITH_VMDK_ESX
4551 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
4552 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4553#endif
4554 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4555 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
4556 {
4557 rc = VERR_INVALID_PARAMETER;
4558 goto out;
4559 }
4560
4561 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4562 if (!pImage)
4563 {
4564 rc = VERR_NO_MEMORY;
4565 goto out;
4566 }
4567 pImage->pszFilename = pszFilename;
4568 pImage->pFile = NULL;
4569 pImage->pExtents = NULL;
4570 pImage->pFiles = NULL;
4571 pImage->pGTCache = NULL;
4572 pImage->pDescData = NULL;
4573 pImage->pVDIfsDisk = NULL;
4574 /* Descriptors for split images can be pretty large, especially if the
4575 * filename is long. So prepare for the worst, and allocate quite some
4576 * memory for the descriptor in this case. */
4577 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4578 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
4579 else
4580 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
4581 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
4582 if (!pImage->pDescData)
4583 {
4584 rc = VERR_NO_MEMORY;
4585 goto out;
4586 }
4587
4588 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
4589 pPCHSGeometry, pLCHSGeometry, pUuid,
4590 pfnProgress, pvUser, uPercentStart, uPercentSpan);
4591 if (RT_SUCCESS(rc))
4592 {
4593 /* So far the image is opened in read/write mode. Make sure the
4594 * image is opened in read-only mode if the caller requested that. */
4595 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
4596 {
4597 vmdkFreeImage(pImage, false);
4598 rc = vmdkOpenImage(pImage, uOpenFlags);
4599 if (RT_FAILURE(rc))
4600 goto out;
4601 }
4602 *ppBackendData = pImage;
4603 }
4604 else
4605 {
4606 RTMemFree(pImage->pDescData);
4607 RTMemFree(pImage);
4608 }
4609
4610out:
4611 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4612 return rc;
4613}
4614
4615/**
4616 * Replaces a fragment of a string with the specified string.
4617 *
4618 * @returns Pointer to the allocated UTF-8 string.
4619 * @param pszWhere UTF-8 string to search in.
4620 * @param pszWhat UTF-8 string to search for.
4621 * @param pszByWhat UTF-8 string to replace the found string with.
4622 */
4623static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
4624{
4625 AssertPtr(pszWhere);
4626 AssertPtr(pszWhat);
4627 AssertPtr(pszByWhat);
4628 const char *pszFoundStr = strstr(pszWhere, pszWhat);
4629 if (!pszFoundStr)
4630 return NULL;
4631 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
4632 char *pszNewStr = (char *)RTMemAlloc(cFinal);
4633 if (pszNewStr)
4634 {
4635 char *pszTmp = pszNewStr;
4636 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
4637 pszTmp += pszFoundStr - pszWhere;
4638 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
4639 pszTmp += strlen(pszByWhat);
4640 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
4641 }
4642 return pszNewStr;
4643}
4644
4645/** @copydoc VBOXHDDBACKEND::pfnRename */
4646static int vmdkRename(void *pBackendData, const char *pszFilename)
4647{
4648 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
4649
4650 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4651 int rc = VINF_SUCCESS;
4652 char **apszOldName = NULL;
4653 char **apszNewName = NULL;
4654 char **apszNewLines = NULL;
4655 char *pszOldDescName = NULL;
4656 bool fImageFreed = false;
4657 bool fEmbeddedDesc = false;
4658 unsigned cExtents = pImage->cExtents;
4659 char *pszNewBaseName = NULL;
4660 char *pszOldBaseName = NULL;
4661 char *pszNewFullName = NULL;
4662 char *pszOldFullName = NULL;
4663 const char *pszOldImageName;
4664 unsigned i, line;
4665 VMDKDESCRIPTOR DescriptorCopy;
4666 VMDKEXTENT ExtentCopy;
4667
4668 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
4669
4670 /* Check arguments. */
4671 if ( !pImage
4672 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
4673 || !VALID_PTR(pszFilename)
4674 || !*pszFilename)
4675 {
4676 rc = VERR_INVALID_PARAMETER;
4677 goto out;
4678 }
4679
4680 /*
4681 * Allocate an array to store both old and new names of renamed files
4682 * in case we have to roll back the changes. Arrays are initialized
4683 * with zeros. We actually save stuff when and if we change it.
4684 */
4685 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4686 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4687 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
4688 if (!apszOldName || !apszNewName || !apszNewLines)
4689 {
4690 rc = VERR_NO_MEMORY;
4691 goto out;
4692 }
4693
4694 /* Save the descriptor size and position. */
4695 if (pImage->pDescData)
4696 {
4697 /* Separate descriptor file. */
4698 fEmbeddedDesc = false;
4699 }
4700 else
4701 {
4702 /* Embedded descriptor file. */
4703 ExtentCopy = pImage->pExtents[0];
4704 fEmbeddedDesc = true;
4705 }
4706 /* Save the descriptor content. */
4707 DescriptorCopy.cLines = pImage->Descriptor.cLines;
4708 for (i = 0; i < DescriptorCopy.cLines; i++)
4709 {
4710 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
4711 if (!DescriptorCopy.aLines[i])
4712 {
4713 rc = VERR_NO_MEMORY;
4714 goto out;
4715 }
4716 }
4717
4718 /* Prepare both old and new base names used for string replacement. */
4719 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
4720 RTPathStripExt(pszNewBaseName);
4721 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
4722 RTPathStripExt(pszOldBaseName);
4723 /* Prepare both old and new full names used for string replacement. */
4724 pszNewFullName = RTStrDup(pszFilename);
4725 RTPathStripExt(pszNewFullName);
4726 pszOldFullName = RTStrDup(pImage->pszFilename);
4727 RTPathStripExt(pszOldFullName);
4728
4729 /* --- Up to this point we have not done any damage yet. --- */
4730
4731 /* Save the old name for easy access to the old descriptor file. */
4732 pszOldDescName = RTStrDup(pImage->pszFilename);
4733 /* Save old image name. */
4734 pszOldImageName = pImage->pszFilename;
4735
4736 /* Update the descriptor with modified extent names. */
4737 for (i = 0, line = pImage->Descriptor.uFirstExtent;
4738 i < cExtents;
4739 i++, line = pImage->Descriptor.aNextLines[line])
4740 {
4741 /* Assume that vmdkStrReplace will fail. */
4742 rc = VERR_NO_MEMORY;
4743 /* Update the descriptor. */
4744 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
4745 pszOldBaseName, pszNewBaseName);
4746 if (!apszNewLines[i])
4747 goto rollback;
4748 pImage->Descriptor.aLines[line] = apszNewLines[i];
4749 }
4750 /* Make sure the descriptor gets written back. */
4751 pImage->Descriptor.fDirty = true;
4752 /* Flush the descriptor now, in case it is embedded. */
4753 (void)vmdkFlushImage(pImage);
4754
4755 /* Close and rename/move extents. */
4756 for (i = 0; i < cExtents; i++)
4757 {
4758 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4759 /* Compose new name for the extent. */
4760 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
4761 pszOldFullName, pszNewFullName);
4762 if (!apszNewName[i])
4763 goto rollback;
4764 /* Close the extent file. */
4765 vmdkFileClose(pImage, &pExtent->pFile, false);
4766 /* Rename the extent file. */
4767 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
4768 if (RT_FAILURE(rc))
4769 goto rollback;
4770 /* Remember the old name. */
4771 apszOldName[i] = RTStrDup(pExtent->pszFullname);
4772 }
4773 /* Release all old stuff. */
4774 vmdkFreeImage(pImage, false);
4775
4776 fImageFreed = true;
4777
4778 /* Last elements of new/old name arrays are intended for
4779 * storing descriptor's names.
4780 */
4781 apszNewName[cExtents] = RTStrDup(pszFilename);
4782 /* Rename the descriptor file if it's separate. */
4783 if (!fEmbeddedDesc)
4784 {
4785 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
4786 if (RT_FAILURE(rc))
4787 goto rollback;
4788 /* Save old name only if we may need to change it back. */
4789 apszOldName[cExtents] = RTStrDup(pszFilename);
4790 }
4791
4792 /* Update pImage with the new information. */
4793 pImage->pszFilename = pszFilename;
4794
4795 /* Open the new image. */
4796 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4797 if (RT_SUCCESS(rc))
4798 goto out;
4799
4800rollback:
4801 /* Roll back all changes in case of failure. */
4802 if (RT_FAILURE(rc))
4803 {
4804 int rrc;
4805 if (!fImageFreed)
4806 {
4807 /*
4808 * Some extents may have been closed, close the rest. We will
4809 * re-open the whole thing later.
4810 */
4811 vmdkFreeImage(pImage, false);
4812 }
4813 /* Rename files back. */
4814 for (i = 0; i <= cExtents; i++)
4815 {
4816 if (apszOldName[i])
4817 {
4818 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
4819 AssertRC(rrc);
4820 }
4821 }
4822 /* Restore the old descriptor. */
4823 PVMDKFILE pFile;
4824 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
4825 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
4826 AssertRC(rrc);
4827 if (fEmbeddedDesc)
4828 {
4829 ExtentCopy.pFile = pFile;
4830 pImage->pExtents = &ExtentCopy;
4831 }
4832 else
4833 {
4834 /* Shouldn't be null for separate descriptor.
4835 * There will be no access to the actual content.
4836 */
4837 pImage->pDescData = pszOldDescName;
4838 pImage->pFile = pFile;
4839 }
4840 pImage->Descriptor = DescriptorCopy;
4841 vmdkWriteDescriptor(pImage);
4842 vmdkFileClose(pImage, &pFile, false);
4843 /* Get rid of the stuff we implanted. */
4844 pImage->pExtents = NULL;
4845 pImage->pFile = NULL;
4846 pImage->pDescData = NULL;
4847 /* Re-open the image back. */
4848 pImage->pszFilename = pszOldImageName;
4849 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4850 AssertRC(rrc);
4851 }
4852
4853out:
4854 for (i = 0; i < DescriptorCopy.cLines; i++)
4855 if (DescriptorCopy.aLines[i])
4856 RTStrFree(DescriptorCopy.aLines[i]);
4857 if (apszOldName)
4858 {
4859 for (i = 0; i <= cExtents; i++)
4860 if (apszOldName[i])
4861 RTStrFree(apszOldName[i]);
4862 RTMemTmpFree(apszOldName);
4863 }
4864 if (apszNewName)
4865 {
4866 for (i = 0; i <= cExtents; i++)
4867 if (apszNewName[i])
4868 RTStrFree(apszNewName[i]);
4869 RTMemTmpFree(apszNewName);
4870 }
4871 if (apszNewLines)
4872 {
4873 for (i = 0; i < cExtents; i++)
4874 if (apszNewLines[i])
4875 RTStrFree(apszNewLines[i]);
4876 RTMemTmpFree(apszNewLines);
4877 }
4878 if (pszOldDescName)
4879 RTStrFree(pszOldDescName);
4880 if (pszOldBaseName)
4881 RTStrFree(pszOldBaseName);
4882 if (pszNewBaseName)
4883 RTStrFree(pszNewBaseName);
4884 if (pszOldFullName)
4885 RTStrFree(pszOldFullName);
4886 if (pszNewFullName)
4887 RTStrFree(pszNewFullName);
4888 LogFlowFunc(("returns %Rrc\n", rc));
4889 return rc;
4890}
4891
4892/** @copydoc VBOXHDDBACKEND::pfnClose */
4893static int vmdkClose(void *pBackendData, bool fDelete)
4894{
4895 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
4896 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4897 int rc = VINF_SUCCESS;
4898
4899 /* Freeing a never allocated image (e.g. because the open failed) is
4900 * not signalled as an error. After all nothing bad happens. */
4901 if (pImage)
4902 {
4903 vmdkFreeImage(pImage, fDelete);
4904 RTMemFree(pImage);
4905 }
4906
4907 LogFlowFunc(("returns %Rrc\n", rc));
4908 return rc;
4909}
4910
4911/** @copydoc VBOXHDDBACKEND::pfnRead */
4912static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
4913 size_t cbToRead, size_t *pcbActuallyRead)
4914{
4915 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
4916 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4917 PVMDKEXTENT pExtent;
4918 uint64_t uSectorExtentRel;
4919 uint64_t uSectorExtentAbs;
4920 int rc;
4921
4922 AssertPtr(pImage);
4923 Assert(uOffset % 512 == 0);
4924 Assert(cbToRead % 512 == 0);
4925
4926 if ( uOffset + cbToRead > pImage->cbSize
4927 || cbToRead == 0)
4928 {
4929 rc = VERR_INVALID_PARAMETER;
4930 goto out;
4931 }
4932
4933 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
4934 &pExtent, &uSectorExtentRel);
4935 if (RT_FAILURE(rc))
4936 goto out;
4937
4938 /* Check access permissions as defined in the extent descriptor. */
4939 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
4940 {
4941 rc = VERR_VD_VMDK_INVALID_STATE;
4942 goto out;
4943 }
4944
4945 /* Clip read range to remain in this extent. */
4946 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
4947
4948 /* Handle the read according to the current extent type. */
4949 switch (pExtent->enmType)
4950 {
4951 case VMDKETYPE_HOSTED_SPARSE:
4952#ifdef VBOX_WITH_VMDK_ESX
4953 case VMDKETYPE_ESX_SPARSE:
4954#endif /* VBOX_WITH_VMDK_ESX */
4955 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
4956 &uSectorExtentAbs);
4957 if (RT_FAILURE(rc))
4958 goto out;
4959 /* Clip read range to at most the rest of the grain. */
4960 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
4961 Assert(!(cbToRead % 512));
4962 if (uSectorExtentAbs == 0)
4963 rc = VERR_VD_BLOCK_FREE;
4964 else
4965 {
4966 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4967 {
4968 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
4969 uSectorExtentAbs -= uSectorInGrain;
4970 uint64_t uLBA;
4971 if (pExtent->uGrainSector != uSectorExtentAbs)
4972 {
4973 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
4974 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
4975 if (RT_FAILURE(rc))
4976 {
4977 pExtent->uGrainSector = 0;
4978 AssertRC(rc);
4979 goto out;
4980 }
4981 pExtent->uGrainSector = uSectorExtentAbs;
4982 Assert(uLBA == uSectorExtentRel);
4983 }
4984 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
4985 }
4986 else
4987 {
4988 rc = vmdkFileReadAt(pExtent->pFile,
4989 VMDK_SECTOR2BYTE(uSectorExtentAbs),
4990 pvBuf, cbToRead, NULL);
4991 }
4992 }
4993 break;
4994 case VMDKETYPE_VMFS:
4995 case VMDKETYPE_FLAT:
4996 rc = vmdkFileReadAt(pExtent->pFile,
4997 VMDK_SECTOR2BYTE(uSectorExtentRel),
4998 pvBuf, cbToRead, NULL);
4999 break;
5000 case VMDKETYPE_ZERO:
5001 memset(pvBuf, '\0', cbToRead);
5002 break;
5003 }
5004 if (pcbActuallyRead)
5005 *pcbActuallyRead = cbToRead;
5006
5007out:
5008 LogFlowFunc(("returns %Rrc\n", rc));
5009 return rc;
5010}
5011
5012/** @copydoc VBOXHDDBACKEND::pfnWrite */
5013static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5014 size_t cbToWrite, size_t *pcbWriteProcess,
5015 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5016{
5017 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5018 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5019 PVMDKEXTENT pExtent;
5020 uint64_t uSectorExtentRel;
5021 uint64_t uSectorExtentAbs;
5022 int rc;
5023
5024 AssertPtr(pImage);
5025 Assert(uOffset % 512 == 0);
5026 Assert(cbToWrite % 512 == 0);
5027
5028 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5029 {
5030 rc = VERR_VD_IMAGE_READ_ONLY;
5031 goto out;
5032 }
5033
5034 if (cbToWrite == 0)
5035 {
5036 rc = VERR_INVALID_PARAMETER;
5037 goto out;
5038 }
5039
5040 /* No size check here, will do that later when the extent is located.
5041 * There are sparse images out there which according to the spec are
5042 * invalid, because the total size is not a multiple of the grain size.
5043 * Also for sparse images which are stitched together in odd ways (not at
5044 * grain boundaries, and with the nominal size not being a multiple of the
5045 * grain size), this would prevent writing to the last grain. */
5046
5047 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5048 &pExtent, &uSectorExtentRel);
5049 if (RT_FAILURE(rc))
5050 goto out;
5051
5052 /* Check access permissions as defined in the extent descriptor. */
5053 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5054 {
5055 rc = VERR_VD_VMDK_INVALID_STATE;
5056 goto out;
5057 }
5058
5059 /* Handle the write according to the current extent type. */
5060 switch (pExtent->enmType)
5061 {
5062 case VMDKETYPE_HOSTED_SPARSE:
5063#ifdef VBOX_WITH_VMDK_ESX
5064 case VMDKETYPE_ESX_SPARSE:
5065#endif /* VBOX_WITH_VMDK_ESX */
5066 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5067 &uSectorExtentAbs);
5068 if (RT_FAILURE(rc))
5069 goto out;
5070 /* Clip write range to at most the rest of the grain. */
5071 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5072 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5073 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5074 {
5075 rc = VERR_VD_VMDK_INVALID_WRITE;
5076 goto out;
5077 }
5078 if (uSectorExtentAbs == 0)
5079 {
5080 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5081 {
5082 /* Full block write to a previously unallocated block.
5083 * Check if the caller wants to avoid the automatic alloc. */
5084 if (!(fWrite & VD_WRITE_NO_ALLOC))
5085 {
5086 /* Allocate GT and find out where to store the grain. */
5087 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5088 uSectorExtentRel, pvBuf, cbToWrite);
5089 }
5090 else
5091 rc = VERR_VD_BLOCK_FREE;
5092 *pcbPreRead = 0;
5093 *pcbPostRead = 0;
5094 }
5095 else
5096 {
5097 /* Clip write range to remain in this extent. */
5098 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5099 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5100 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5101 rc = VERR_VD_BLOCK_FREE;
5102 }
5103 }
5104 else
5105 {
5106 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5107 {
5108 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5109 uSectorExtentAbs -= uSectorInGrain;
5110 uint64_t uLBA = uSectorExtentRel;
5111 if ( pExtent->uGrainSector != uSectorExtentAbs
5112 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5113 {
5114 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5115 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5116 if (RT_FAILURE(rc))
5117 {
5118 pExtent->uGrainSector = 0;
5119 pExtent->uLastGrainSector = 0;
5120 AssertRC(rc);
5121 goto out;
5122 }
5123 pExtent->uGrainSector = uSectorExtentAbs;
5124 pExtent->uLastGrainSector = uSectorExtentAbs;
5125 Assert(uLBA == uSectorExtentRel);
5126 }
5127 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5128 uint32_t cbGrain = 0;
5129 rc = vmdkFileDeflateAt(pExtent->pFile,
5130 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5131 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5132 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5133 if (RT_FAILURE(rc))
5134 {
5135 pExtent->uGrainSector = 0;
5136 pExtent->uLastGrainSector = 0;
5137 AssertRC(rc);
5138 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5139 }
5140 cbGrain = RT_ALIGN(cbGrain, 512);
5141 pExtent->uLastGrainSector = uSectorExtentAbs;
5142 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5143 pExtent->cbLastGrainWritten = cbGrain;
5144
5145 uint64_t uEOSOff = 0;
5146 if (pExtent->fFooter)
5147 {
5148 uEOSOff = 512;
5149 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5150 if (RT_FAILURE(rc))
5151 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5152 }
5153 uint8_t aEOS[512];
5154 memset(aEOS, '\0', sizeof(aEOS));
5155 rc = vmdkFileWriteAt(pExtent->pFile,
5156 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5157 aEOS, sizeof(aEOS), NULL);
5158 if (RT_FAILURE(rc))
5159 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5160 }
5161 else
5162 {
5163 rc = vmdkFileWriteAt(pExtent->pFile,
5164 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5165 pvBuf, cbToWrite, NULL);
5166 }
5167 }
5168 break;
5169 case VMDKETYPE_VMFS:
5170 case VMDKETYPE_FLAT:
5171 /* Clip write range to remain in this extent. */
5172 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5173 rc = vmdkFileWriteAt(pExtent->pFile,
5174 VMDK_SECTOR2BYTE(uSectorExtentRel),
5175 pvBuf, cbToWrite, NULL);
5176 break;
5177 case VMDKETYPE_ZERO:
5178 /* Clip write range to remain in this extent. */
5179 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5180 break;
5181 }
5182 if (pcbWriteProcess)
5183 *pcbWriteProcess = cbToWrite;
5184
5185out:
5186 LogFlowFunc(("returns %Rrc\n", rc));
5187 return rc;
5188}
5189
5190/** @copydoc VBOXHDDBACKEND::pfnFlush */
5191static int vmdkFlush(void *pBackendData)
5192{
5193 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5194 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5195 int rc;
5196
5197 AssertPtr(pImage);
5198
5199 rc = vmdkFlushImage(pImage);
5200 LogFlowFunc(("returns %Rrc\n", rc));
5201 return rc;
5202}
5203
5204/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5205static unsigned vmdkGetVersion(void *pBackendData)
5206{
5207 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5208 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5209
5210 AssertPtr(pImage);
5211
5212 if (pImage)
5213 return VMDK_IMAGE_VERSION;
5214 else
5215 return 0;
5216}
5217
5218/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5219static uint64_t vmdkGetSize(void *pBackendData)
5220{
5221 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5222 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5223
5224 AssertPtr(pImage);
5225
5226 if (pImage)
5227 return pImage->cbSize;
5228 else
5229 return 0;
5230}
5231
5232/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5233static uint64_t vmdkGetFileSize(void *pBackendData)
5234{
5235 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5236 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5237 uint64_t cb = 0;
5238
5239 AssertPtr(pImage);
5240
5241 if (pImage)
5242 {
5243 uint64_t cbFile;
5244 if (pImage->pFile != NULL)
5245 {
5246 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5247 if (RT_SUCCESS(rc))
5248 cb += cbFile;
5249 }
5250 for (unsigned i = 0; i < pImage->cExtents; i++)
5251 {
5252 if (pImage->pExtents[i].pFile != NULL)
5253 {
5254 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5255 if (RT_SUCCESS(rc))
5256 cb += cbFile;
5257 }
5258 }
5259 }
5260
5261 LogFlowFunc(("returns %lld\n", cb));
5262 return cb;
5263}
5264
5265/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5266static int vmdkGetPCHSGeometry(void *pBackendData,
5267 PPDMMEDIAGEOMETRY pPCHSGeometry)
5268{
5269 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5270 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5271 int rc;
5272
5273 AssertPtr(pImage);
5274
5275 if (pImage)
5276 {
5277 if (pImage->PCHSGeometry.cCylinders)
5278 {
5279 *pPCHSGeometry = pImage->PCHSGeometry;
5280 rc = VINF_SUCCESS;
5281 }
5282 else
5283 rc = VERR_VD_GEOMETRY_NOT_SET;
5284 }
5285 else
5286 rc = VERR_VD_NOT_OPENED;
5287
5288 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5289 return rc;
5290}
5291
5292/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5293static int vmdkSetPCHSGeometry(void *pBackendData,
5294 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5295{
5296 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5297 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5298 int rc;
5299
5300 AssertPtr(pImage);
5301
5302 if (pImage)
5303 {
5304 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5305 {
5306 rc = VERR_VD_IMAGE_READ_ONLY;
5307 goto out;
5308 }
5309 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5310 if (RT_FAILURE(rc))
5311 goto out;
5312
5313 pImage->PCHSGeometry = *pPCHSGeometry;
5314 rc = VINF_SUCCESS;
5315 }
5316 else
5317 rc = VERR_VD_NOT_OPENED;
5318
5319out:
5320 LogFlowFunc(("returns %Rrc\n", rc));
5321 return rc;
5322}
5323
5324/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
5325static int vmdkGetLCHSGeometry(void *pBackendData,
5326 PPDMMEDIAGEOMETRY pLCHSGeometry)
5327{
5328 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5329 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5330 int rc;
5331
5332 AssertPtr(pImage);
5333
5334 if (pImage)
5335 {
5336 if (pImage->LCHSGeometry.cCylinders)
5337 {
5338 *pLCHSGeometry = pImage->LCHSGeometry;
5339 rc = VINF_SUCCESS;
5340 }
5341 else
5342 rc = VERR_VD_GEOMETRY_NOT_SET;
5343 }
5344 else
5345 rc = VERR_VD_NOT_OPENED;
5346
5347 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5348 return rc;
5349}
5350
5351/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5352static int vmdkSetLCHSGeometry(void *pBackendData,
5353 PCPDMMEDIAGEOMETRY pLCHSGeometry)
5354{
5355 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5356 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5357 int rc;
5358
5359 AssertPtr(pImage);
5360
5361 if (pImage)
5362 {
5363 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5364 {
5365 rc = VERR_VD_IMAGE_READ_ONLY;
5366 goto out;
5367 }
5368 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5369 if (RT_FAILURE(rc))
5370 goto out;
5371
5372 pImage->LCHSGeometry = *pLCHSGeometry;
5373 rc = VINF_SUCCESS;
5374 }
5375 else
5376 rc = VERR_VD_NOT_OPENED;
5377
5378out:
5379 LogFlowFunc(("returns %Rrc\n", rc));
5380 return rc;
5381}
5382
5383/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
5384static unsigned vmdkGetImageFlags(void *pBackendData)
5385{
5386 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5387 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5388 unsigned uImageFlags;
5389
5390 AssertPtr(pImage);
5391
5392 if (pImage)
5393 uImageFlags = pImage->uImageFlags;
5394 else
5395 uImageFlags = 0;
5396
5397 LogFlowFunc(("returns %#x\n", uImageFlags));
5398 return uImageFlags;
5399}
5400
5401/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
5402static unsigned vmdkGetOpenFlags(void *pBackendData)
5403{
5404 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5405 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5406 unsigned uOpenFlags;
5407
5408 AssertPtr(pImage);
5409
5410 if (pImage)
5411 uOpenFlags = pImage->uOpenFlags;
5412 else
5413 uOpenFlags = 0;
5414
5415 LogFlowFunc(("returns %#x\n", uOpenFlags));
5416 return uOpenFlags;
5417}
5418
5419/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
5420static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
5421{
5422 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
5423 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5424 int rc;
5425
5426 /* Image must be opened and the new flags must be valid. Just readonly and
5427 * info flags are supported. */
5428 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
5429 {
5430 rc = VERR_INVALID_PARAMETER;
5431 goto out;
5432 }
5433
5434 /* Implement this operation via reopening the image. */
5435 vmdkFreeImage(pImage, false);
5436 rc = vmdkOpenImage(pImage, uOpenFlags);
5437
5438out:
5439 LogFlowFunc(("returns %Rrc\n", rc));
5440 return rc;
5441}
5442
5443/** @copydoc VBOXHDDBACKEND::pfnGetComment */
5444static int vmdkGetComment(void *pBackendData, char *pszComment,
5445 size_t cbComment)
5446{
5447 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
5448 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5449 int rc;
5450
5451 AssertPtr(pImage);
5452
5453 if (pImage)
5454 {
5455 const char *pszCommentEncoded = NULL;
5456 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
5457 "ddb.comment", &pszCommentEncoded);
5458 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
5459 pszCommentEncoded = NULL;
5460 else if (RT_FAILURE(rc))
5461 goto out;
5462
5463 if (pszComment && pszCommentEncoded)
5464 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
5465 else
5466 {
5467 if (pszComment)
5468 *pszComment = '\0';
5469 rc = VINF_SUCCESS;
5470 }
5471 if (pszCommentEncoded)
5472 RTStrFree((char *)(void *)pszCommentEncoded);
5473 }
5474 else
5475 rc = VERR_VD_NOT_OPENED;
5476
5477out:
5478 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
5479 return rc;
5480}
5481
5482/** @copydoc VBOXHDDBACKEND::pfnSetComment */
5483static int vmdkSetComment(void *pBackendData, const char *pszComment)
5484{
5485 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
5486 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5487 int rc;
5488
5489 AssertPtr(pImage);
5490
5491 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5492 {
5493 rc = VERR_VD_IMAGE_READ_ONLY;
5494 goto out;
5495 }
5496
5497 if (pImage)
5498 rc = vmdkSetImageComment(pImage, pszComment);
5499 else
5500 rc = VERR_VD_NOT_OPENED;
5501
5502out:
5503 LogFlowFunc(("returns %Rrc\n", rc));
5504 return rc;
5505}
5506
5507/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
5508static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
5509{
5510 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5511 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5512 int rc;
5513
5514 AssertPtr(pImage);
5515
5516 if (pImage)
5517 {
5518 *pUuid = pImage->ImageUuid;
5519 rc = VINF_SUCCESS;
5520 }
5521 else
5522 rc = VERR_VD_NOT_OPENED;
5523
5524 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5525 return rc;
5526}
5527
5528/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
5529static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
5530{
5531 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5532 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5533 int rc;
5534
5535 LogFlowFunc(("%RTuuid\n", pUuid));
5536 AssertPtr(pImage);
5537
5538 if (pImage)
5539 {
5540 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5541 {
5542 pImage->ImageUuid = *pUuid;
5543 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5544 VMDK_DDB_IMAGE_UUID, pUuid);
5545 if (RT_FAILURE(rc))
5546 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
5547 rc = VINF_SUCCESS;
5548 }
5549 else
5550 rc = VERR_VD_IMAGE_READ_ONLY;
5551 }
5552 else
5553 rc = VERR_VD_NOT_OPENED;
5554
5555 LogFlowFunc(("returns %Rrc\n", rc));
5556 return rc;
5557}
5558
5559/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
5560static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
5561{
5562 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5563 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5564 int rc;
5565
5566 AssertPtr(pImage);
5567
5568 if (pImage)
5569 {
5570 *pUuid = pImage->ModificationUuid;
5571 rc = VINF_SUCCESS;
5572 }
5573 else
5574 rc = VERR_VD_NOT_OPENED;
5575
5576 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5577 return rc;
5578}
5579
5580/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
5581static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
5582{
5583 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5584 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5585 int rc;
5586
5587 AssertPtr(pImage);
5588
5589 if (pImage)
5590 {
5591 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5592 {
5593 pImage->ModificationUuid = *pUuid;
5594 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5595 VMDK_DDB_MODIFICATION_UUID, pUuid);
5596 if (RT_FAILURE(rc))
5597 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
5598 rc = VINF_SUCCESS;
5599 }
5600 else
5601 rc = VERR_VD_IMAGE_READ_ONLY;
5602 }
5603 else
5604 rc = VERR_VD_NOT_OPENED;
5605
5606 LogFlowFunc(("returns %Rrc\n", rc));
5607 return rc;
5608}
5609
5610/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
5611static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
5612{
5613 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5614 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5615 int rc;
5616
5617 AssertPtr(pImage);
5618
5619 if (pImage)
5620 {
5621 *pUuid = pImage->ParentUuid;
5622 rc = VINF_SUCCESS;
5623 }
5624 else
5625 rc = VERR_VD_NOT_OPENED;
5626
5627 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5628 return rc;
5629}
5630
5631/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
5632static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
5633{
5634 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5635 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5636 int rc;
5637
5638 AssertPtr(pImage);
5639
5640 if (pImage)
5641 {
5642 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5643 {
5644 pImage->ParentUuid = *pUuid;
5645 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5646 VMDK_DDB_PARENT_UUID, pUuid);
5647 if (RT_FAILURE(rc))
5648 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5649 rc = VINF_SUCCESS;
5650 }
5651 else
5652 rc = VERR_VD_IMAGE_READ_ONLY;
5653 }
5654 else
5655 rc = VERR_VD_NOT_OPENED;
5656
5657 LogFlowFunc(("returns %Rrc\n", rc));
5658 return rc;
5659}
5660
5661/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
5662static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
5663{
5664 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5665 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5666 int rc;
5667
5668 AssertPtr(pImage);
5669
5670 if (pImage)
5671 {
5672 *pUuid = pImage->ParentModificationUuid;
5673 rc = VINF_SUCCESS;
5674 }
5675 else
5676 rc = VERR_VD_NOT_OPENED;
5677
5678 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5679 return rc;
5680}
5681
5682/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
5683static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
5684{
5685 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5686 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5687 int rc;
5688
5689 AssertPtr(pImage);
5690
5691 if (pImage)
5692 {
5693 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5694 {
5695 pImage->ParentModificationUuid = *pUuid;
5696 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5697 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
5698 if (RT_FAILURE(rc))
5699 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5700 rc = VINF_SUCCESS;
5701 }
5702 else
5703 rc = VERR_VD_IMAGE_READ_ONLY;
5704 }
5705 else
5706 rc = VERR_VD_NOT_OPENED;
5707
5708 LogFlowFunc(("returns %Rrc\n", rc));
5709 return rc;
5710}
5711
5712/** @copydoc VBOXHDDBACKEND::pfnDump */
5713static void vmdkDump(void *pBackendData)
5714{
5715 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5716
5717 AssertPtr(pImage);
5718 if (pImage)
5719 {
5720 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
5721 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
5722 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
5723 VMDK_BYTE2SECTOR(pImage->cbSize));
5724 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
5725 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
5726 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
5727 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
5728 }
5729}
5730
5731
5732static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5733{
5734 int rc = VERR_NOT_IMPLEMENTED;
5735 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5736 return rc;
5737}
5738
5739static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5740{
5741 int rc = VERR_NOT_IMPLEMENTED;
5742 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5743 return rc;
5744}
5745
5746static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
5747{
5748 int rc = VERR_NOT_IMPLEMENTED;
5749 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5750 return rc;
5751}
5752
5753static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
5754{
5755 int rc = VERR_NOT_IMPLEMENTED;
5756 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5757 return rc;
5758}
5759
5760static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
5761{
5762 int rc = VERR_NOT_IMPLEMENTED;
5763 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5764 return rc;
5765}
5766
5767static bool vmdkIsAsyncIOSupported(void *pvBackendData)
5768{
5769 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5770 bool fAsyncIOSupported = false;
5771
5772 if (pImage)
5773 {
5774 unsigned cFlatExtents = 0;
5775
5776 /* We only support async I/O support if the image only consists of FLAT or ZERO extents.
5777 *
5778 * @todo: At the moment we only support async I/O if there is at most one FLAT extent
5779 * More than one doesn't work yet with the async I/O interface.
5780 */
5781 fAsyncIOSupported = true;
5782 for (unsigned i = 0; i < pImage->cExtents; i++)
5783 {
5784 if (( pImage->pExtents[i].enmType != VMDKETYPE_FLAT
5785 && pImage->pExtents[i].enmType != VMDKETYPE_ZERO
5786 && pImage->pExtents[i].enmType != VMDKETYPE_VMFS)
5787 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
5788 {
5789 fAsyncIOSupported = false;
5790 break; /* Stop search */
5791 }
5792 if (pImage->pExtents[i].enmType == VMDKETYPE_FLAT)
5793 cFlatExtents++;
5794 }
5795 }
5796
5797 return fAsyncIOSupported;
5798}
5799
5800static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
5801 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5802{
5803 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5804 PVMDKEXTENT pExtent = NULL;
5805 int rc = VINF_SUCCESS;
5806 unsigned cSegments = 0;
5807 PPDMDATASEG paSegCurrent = paSeg;
5808 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5809 size_t uOffsetInCurrentSegment = 0;
5810 size_t cbReadLeft = cbRead;
5811 uint64_t uOffCurr = uOffset;
5812
5813 AssertPtr(pImage);
5814 Assert(uOffset % 512 == 0);
5815 Assert(cbRead % 512 == 0);
5816
5817 if ( uOffset + cbRead > pImage->cbSize
5818 || cbRead == 0)
5819 {
5820 rc = VERR_INVALID_PARAMETER;
5821 goto out;
5822 }
5823
5824 while (cbReadLeft && cSeg)
5825 {
5826 size_t cbToRead;
5827 uint64_t uSectorExtentRel;
5828
5829 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
5830 &pExtent, &uSectorExtentRel);
5831 if (RT_FAILURE(rc))
5832 goto out;
5833
5834 /* Check access permissions as defined in the extent descriptor. */
5835 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5836 {
5837 rc = VERR_VD_VMDK_INVALID_STATE;
5838 goto out;
5839 }
5840
5841 /* Clip read range to remain in this extent. */
5842 cbToRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5843 /* Clip read range to remain into current data segment. */
5844 cbToRead = RT_MIN(cbToRead, cbLeftInCurrentSegment);
5845
5846 switch (pExtent->enmType)
5847 {
5848 case VMDKETYPE_VMFS:
5849 case VMDKETYPE_FLAT:
5850 {
5851 /* Check for enough room first. */
5852 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
5853 {
5854 /* We reached maximum, resize array. Try to realloc memory first. */
5855 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
5856
5857 if (!paSegmentsNew)
5858 {
5859 /* We failed. Allocate completely new. */
5860 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
5861 if (!paSegmentsNew)
5862 {
5863 /* Damn, we are out of memory. */
5864 rc = VERR_NO_MEMORY;
5865 goto out;
5866 }
5867
5868 /* Copy task handles over. */
5869 for (unsigned i = 0; i < cSegments; i++)
5870 paSegmentsNew[i] = pImage->paSegments[i];
5871
5872 /* Free old memory. */
5873 RTMemFree(pImage->paSegments);
5874 }
5875
5876 pImage->cSegments = cSegments + 10;
5877 pImage->paSegments = paSegmentsNew;
5878 }
5879
5880 pImage->paSegments[cSegments].cbSeg = cbToRead;
5881 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
5882 cSegments++;
5883 break;
5884 }
5885 case VMDKETYPE_ZERO:
5886 /* Nothing left to do. */
5887 break;
5888 default:
5889 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
5890 }
5891
5892 cbReadLeft -= cbToRead;
5893 uOffCurr += cbToRead;
5894 cbLeftInCurrentSegment -= cbToRead;
5895 uOffsetInCurrentSegment += cbToRead;
5896 /* Go to next extent if there is no space left in current one. */
5897 if (!cbLeftInCurrentSegment)
5898 {
5899 uOffsetInCurrentSegment = 0;
5900 paSegCurrent++;
5901 cSeg--;
5902 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5903 }
5904 }
5905
5906 AssertMsg(cbReadLeft == 0, ("No segment left but there is still data to write\n"));
5907
5908 if (cSegments == 0)
5909 {
5910 /* The request was completely in a ZERO extent nothing to do. */
5911 rc = VINF_VD_ASYNC_IO_FINISHED;
5912 }
5913 else
5914 {
5915 /* Start the write */
5916 void *pTask;
5917 rc = pImage->pInterfaceAsyncIOCallbacks->pfnReadAsync(pImage->pInterfaceAsyncIO->pvUser,
5918 pExtent->pFile->pStorage, uOffset,
5919 pImage->paSegments, cSegments, cbRead,
5920 pvUser, &pTask);
5921 }
5922
5923out:
5924 LogFlowFunc(("returns %Rrc\n", rc));
5925 return rc;
5926}
5927
5928static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
5929 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5930{
5931 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5932 PVMDKEXTENT pExtent = NULL;
5933 int rc = VINF_SUCCESS;
5934 unsigned cSegments = 0;
5935 PPDMDATASEG paSegCurrent = paSeg;
5936 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5937 size_t uOffsetInCurrentSegment = 0;
5938 size_t cbWriteLeft = cbWrite;
5939 uint64_t uOffCurr = uOffset;
5940
5941 AssertPtr(pImage);
5942 Assert(uOffset % 512 == 0);
5943 Assert(cbWrite % 512 == 0);
5944
5945 if ( uOffset + cbWrite > pImage->cbSize
5946 || cbWrite == 0)
5947 {
5948 rc = VERR_INVALID_PARAMETER;
5949 goto out;
5950 }
5951
5952 while (cbWriteLeft && cSeg)
5953 {
5954 size_t cbToWrite;
5955 uint64_t uSectorExtentRel;
5956
5957 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
5958 &pExtent, &uSectorExtentRel);
5959 if (RT_FAILURE(rc))
5960 goto out;
5961
5962 /* Check access permissions as defined in the extent descriptor. */
5963 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5964 {
5965 rc = VERR_VD_VMDK_INVALID_STATE;
5966 goto out;
5967 }
5968
5969 /* Clip write range to remain in this extent. */
5970 cbToWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5971 /* Clip write range to remain into current data segment. */
5972 cbToWrite = RT_MIN(cbToWrite, cbLeftInCurrentSegment);
5973
5974 switch (pExtent->enmType)
5975 {
5976 case VMDKETYPE_VMFS:
5977 case VMDKETYPE_FLAT:
5978 {
5979 /* Check for enough room first. */
5980 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
5981 {
5982 /* We reached maximum, resize array. Try to realloc memory first. */
5983 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
5984
5985 if (!paSegmentsNew)
5986 {
5987 /* We failed. Allocate completely new. */
5988 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
5989 if (!paSegmentsNew)
5990 {
5991 /* Damn, we are out of memory. */
5992 rc = VERR_NO_MEMORY;
5993 goto out;
5994 }
5995
5996 /* Copy task handles over. */
5997 for (unsigned i = 0; i < cSegments; i++)
5998 paSegmentsNew[i] = pImage->paSegments[i];
5999
6000 /* Free old memory. */
6001 RTMemFree(pImage->paSegments);
6002 }
6003
6004 pImage->cSegments = cSegments + 10;
6005 pImage->paSegments = paSegmentsNew;
6006 }
6007
6008 pImage->paSegments[cSegments].cbSeg = cbToWrite;
6009 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
6010 cSegments++;
6011 break;
6012 }
6013 case VMDKETYPE_ZERO:
6014 /* Nothing left to do. */
6015 break;
6016 default:
6017 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
6018 }
6019
6020 cbWriteLeft -= cbToWrite;
6021 uOffCurr += cbToWrite;
6022 cbLeftInCurrentSegment -= cbToWrite;
6023 uOffsetInCurrentSegment += cbToWrite;
6024 /* Go to next extent if there is no space left in current one. */
6025 if (!cbLeftInCurrentSegment)
6026 {
6027 uOffsetInCurrentSegment = 0;
6028 paSegCurrent++;
6029 cSeg--;
6030 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
6031 }
6032 }
6033
6034 AssertMsg(cbWriteLeft == 0, ("No segment left but there is still data to write\n"));
6035
6036 if (cSegments == 0)
6037 {
6038 /* The request was completely in a ZERO extent nothing to do. */
6039 rc = VINF_VD_ASYNC_IO_FINISHED;
6040 }
6041 else
6042 {
6043 /* Start the write */
6044 void *pTask;
6045 rc = pImage->pInterfaceAsyncIOCallbacks->pfnWriteAsync(pImage->pInterfaceAsyncIO->pvUser,
6046 pExtent->pFile->pStorage, uOffset,
6047 pImage->paSegments, cSegments, cbWrite,
6048 pvUser, &pTask);
6049 }
6050
6051out:
6052 LogFlowFunc(("returns %Rrc\n", rc));
6053 return rc;
6054}
6055
6056
6057VBOXHDDBACKEND g_VmdkBackend =
6058{
6059 /* pszBackendName */
6060 "VMDK",
6061 /* cbSize */
6062 sizeof(VBOXHDDBACKEND),
6063 /* uBackendCaps */
6064 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6065 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6066 /* papszFileExtensions */
6067 s_apszVmdkFileExtensions,
6068 /* paConfigInfo */
6069 NULL,
6070 /* hPlugin */
6071 NIL_RTLDRMOD,
6072 /* pfnCheckIfValid */
6073 vmdkCheckIfValid,
6074 /* pfnOpen */
6075 vmdkOpen,
6076 /* pfnCreate */
6077 vmdkCreate,
6078 /* pfnRename */
6079 vmdkRename,
6080 /* pfnClose */
6081 vmdkClose,
6082 /* pfnRead */
6083 vmdkRead,
6084 /* pfnWrite */
6085 vmdkWrite,
6086 /* pfnFlush */
6087 vmdkFlush,
6088 /* pfnGetVersion */
6089 vmdkGetVersion,
6090 /* pfnGetSize */
6091 vmdkGetSize,
6092 /* pfnGetFileSize */
6093 vmdkGetFileSize,
6094 /* pfnGetPCHSGeometry */
6095 vmdkGetPCHSGeometry,
6096 /* pfnSetPCHSGeometry */
6097 vmdkSetPCHSGeometry,
6098 /* pfnGetLCHSGeometry */
6099 vmdkGetLCHSGeometry,
6100 /* pfnSetLCHSGeometry */
6101 vmdkSetLCHSGeometry,
6102 /* pfnGetImageFlags */
6103 vmdkGetImageFlags,
6104 /* pfnGetOpenFlags */
6105 vmdkGetOpenFlags,
6106 /* pfnSetOpenFlags */
6107 vmdkSetOpenFlags,
6108 /* pfnGetComment */
6109 vmdkGetComment,
6110 /* pfnSetComment */
6111 vmdkSetComment,
6112 /* pfnGetUuid */
6113 vmdkGetUuid,
6114 /* pfnSetUuid */
6115 vmdkSetUuid,
6116 /* pfnGetModificationUuid */
6117 vmdkGetModificationUuid,
6118 /* pfnSetModificationUuid */
6119 vmdkSetModificationUuid,
6120 /* pfnGetParentUuid */
6121 vmdkGetParentUuid,
6122 /* pfnSetParentUuid */
6123 vmdkSetParentUuid,
6124 /* pfnGetParentModificationUuid */
6125 vmdkGetParentModificationUuid,
6126 /* pfnSetParentModificationUuid */
6127 vmdkSetParentModificationUuid,
6128 /* pfnDump */
6129 vmdkDump,
6130 /* pfnGetTimeStamp */
6131 vmdkGetTimeStamp,
6132 /* pfnGetParentTimeStamp */
6133 vmdkGetParentTimeStamp,
6134 /* pfnSetParentTimeStamp */
6135 vmdkSetParentTimeStamp,
6136 /* pfnGetParentFilename */
6137 vmdkGetParentFilename,
6138 /* pfnSetParentFilename */
6139 vmdkSetParentFilename,
6140 /* pfnIsAsyncIOSupported */
6141 vmdkIsAsyncIOSupported,
6142 /* pfnAsyncRead */
6143 vmdkAsyncRead,
6144 /* pfnAsyncWrite */
6145 vmdkAsyncWrite,
6146 /* pfnComposeLocation */
6147 genericFileComposeLocation,
6148 /* pfnComposeName */
6149 genericFileComposeName
6150};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette