VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 226.2 KB
Line 
1/* $Id: VmdkHDDCore.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/file.h>
30#include <iprt/path.h>
31#include <iprt/string.h>
32#include <iprt/rand.h>
33#include <iprt/zip.h>
34
35
36/*******************************************************************************
37* Constants And Macros, Structures and Typedefs *
38*******************************************************************************/
39
40/** Maximum encoded string size (including NUL) we allow for VMDK images.
41 * Deliberately not set high to avoid running out of descriptor space. */
42#define VMDK_ENCODED_COMMENT_MAX 1024
43
44/** VMDK descriptor DDB entry for PCHS cylinders. */
45#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
46
47/** VMDK descriptor DDB entry for PCHS heads. */
48#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
49
50/** VMDK descriptor DDB entry for PCHS sectors. */
51#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
52
53/** VMDK descriptor DDB entry for LCHS cylinders. */
54#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
55
56/** VMDK descriptor DDB entry for LCHS heads. */
57#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
58
59/** VMDK descriptor DDB entry for LCHS sectors. */
60#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
61
62/** VMDK descriptor DDB entry for image UUID. */
63#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
64
65/** VMDK descriptor DDB entry for image modification UUID. */
66#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
67
68/** VMDK descriptor DDB entry for parent image UUID. */
69#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
70
71/** VMDK descriptor DDB entry for parent image modification UUID. */
72#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
73
74/** No compression for streamOptimized files. */
75#define VMDK_COMPRESSION_NONE 0
76
77/** Deflate compression for streamOptimized files. */
78#define VMDK_COMPRESSION_DEFLATE 1
79
80/** Marker that the actual GD value is stored in the footer. */
81#define VMDK_GD_AT_END 0xffffffffffffffffULL
82
83/** Marker for end-of-stream in streamOptimized images. */
84#define VMDK_MARKER_EOS 0
85
86/** Marker for grain table block in streamOptimized images. */
87#define VMDK_MARKER_GT 1
88
89/** Marker for grain directory block in streamOptimized images. */
90#define VMDK_MARKER_GD 2
91
92/** Marker for footer in streamOptimized images. */
93#define VMDK_MARKER_FOOTER 3
94
95/** Dummy marker for "don't check the marker value". */
96#define VMDK_MARKER_IGNORE 0xffffffffU
97
98/**
99 * Magic number for hosted images created by VMware Workstation 4, VMware
100 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
101 */
102#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
103
104/**
105 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
106 * this header is also used for monolithic flat images.
107 */
108#pragma pack(1)
109typedef struct SparseExtentHeader
110{
111 uint32_t magicNumber;
112 uint32_t version;
113 uint32_t flags;
114 uint64_t capacity;
115 uint64_t grainSize;
116 uint64_t descriptorOffset;
117 uint64_t descriptorSize;
118 uint32_t numGTEsPerGT;
119 uint64_t rgdOffset;
120 uint64_t gdOffset;
121 uint64_t overHead;
122 bool uncleanShutdown;
123 char singleEndLineChar;
124 char nonEndLineChar;
125 char doubleEndLineChar1;
126 char doubleEndLineChar2;
127 uint16_t compressAlgorithm;
128 uint8_t pad[433];
129} SparseExtentHeader;
130#pragma pack()
131
132/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
133 * divisible by the default grain size (64K) */
134#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
135
136/** VMDK streamOptimized file format marker. The type field may or may not
137 * be actually valid, but there's always data to read there. */
138#pragma pack(1)
139typedef struct VMDKMARKER
140{
141 uint64_t uSector;
142 uint32_t cbSize;
143 uint32_t uType;
144} VMDKMARKER;
145#pragma pack()
146
147
148#ifdef VBOX_WITH_VMDK_ESX
149
150/** @todo the ESX code is not tested, not used, and lacks error messages. */
151
152/**
153 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
154 */
155#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
156
157#pragma pack(1)
158typedef struct COWDisk_Header
159{
160 uint32_t magicNumber;
161 uint32_t version;
162 uint32_t flags;
163 uint32_t numSectors;
164 uint32_t grainSize;
165 uint32_t gdOffset;
166 uint32_t numGDEntries;
167 uint32_t freeSector;
168 /* The spec incompletely documents quite a few further fields, but states
169 * that they are unused by the current format. Replace them by padding. */
170 char reserved1[1604];
171 uint32_t savedGeneration;
172 char reserved2[8];
173 uint32_t uncleanShutdown;
174 char padding[396];
175} COWDisk_Header;
176#pragma pack()
177#endif /* VBOX_WITH_VMDK_ESX */
178
179
180/** Convert sector number/size to byte offset/size. */
181#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
182
183/** Convert byte offset/size to sector number/size. */
184#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
185
186/**
187 * VMDK extent type.
188 */
189typedef enum VMDKETYPE
190{
191 /** Hosted sparse extent. */
192 VMDKETYPE_HOSTED_SPARSE = 1,
193 /** Flat extent. */
194 VMDKETYPE_FLAT,
195 /** Zero extent. */
196 VMDKETYPE_ZERO,
197 /** VMFS extent, used by ESX. */
198 VMDKETYPE_VMFS
199#ifdef VBOX_WITH_VMDK_ESX
200 ,
201 /** ESX sparse extent. */
202 VMDKETYPE_ESX_SPARSE
203#endif /* VBOX_WITH_VMDK_ESX */
204} VMDKETYPE, *PVMDKETYPE;
205
206/**
207 * VMDK access type for a extent.
208 */
209typedef enum VMDKACCESS
210{
211 /** No access allowed. */
212 VMDKACCESS_NOACCESS = 0,
213 /** Read-only access. */
214 VMDKACCESS_READONLY,
215 /** Read-write access. */
216 VMDKACCESS_READWRITE
217} VMDKACCESS, *PVMDKACCESS;
218
219/** Forward declaration for PVMDKIMAGE. */
220typedef struct VMDKIMAGE *PVMDKIMAGE;
221
222/**
223 * Extents files entry. Used for opening a particular file only once.
224 */
225typedef struct VMDKFILE
226{
227 /** Pointer to filename. Local copy. */
228 const char *pszFilename;
229 /** File open flags for consistency checking. */
230 unsigned fOpen;
231 /** File handle. */
232 RTFILE File;
233 /** Handle for asnychronous access if requested.*/
234 PVDIOSTORAGE pStorage;
235 /** Flag whether to use File or pStorage. */
236 bool fAsyncIO;
237 /** Reference counter. */
238 unsigned uReferences;
239 /** Flag whether the file should be deleted on last close. */
240 bool fDelete;
241 /** Pointer to the image we belong to. */
242 PVMDKIMAGE pImage;
243 /** Pointer to next file descriptor. */
244 struct VMDKFILE *pNext;
245 /** Pointer to the previous file descriptor. */
246 struct VMDKFILE *pPrev;
247} VMDKFILE, *PVMDKFILE;
248
249/**
250 * VMDK extent data structure.
251 */
252typedef struct VMDKEXTENT
253{
254 /** File handle. */
255 PVMDKFILE pFile;
256 /** Base name of the image extent. */
257 const char *pszBasename;
258 /** Full name of the image extent. */
259 const char *pszFullname;
260 /** Number of sectors in this extent. */
261 uint64_t cSectors;
262 /** Number of sectors per block (grain in VMDK speak). */
263 uint64_t cSectorsPerGrain;
264 /** Starting sector number of descriptor. */
265 uint64_t uDescriptorSector;
266 /** Size of descriptor in sectors. */
267 uint64_t cDescriptorSectors;
268 /** Starting sector number of grain directory. */
269 uint64_t uSectorGD;
270 /** Starting sector number of redundant grain directory. */
271 uint64_t uSectorRGD;
272 /** Total number of metadata sectors. */
273 uint64_t cOverheadSectors;
274 /** Nominal size (i.e. as described by the descriptor) of this extent. */
275 uint64_t cNominalSectors;
276 /** Sector offset (i.e. as described by the descriptor) of this extent. */
277 uint64_t uSectorOffset;
278 /** Number of entries in a grain table. */
279 uint32_t cGTEntries;
280 /** Number of sectors reachable via a grain directory entry. */
281 uint32_t cSectorsPerGDE;
282 /** Number of entries in the grain directory. */
283 uint32_t cGDEntries;
284 /** Pointer to the next free sector. Legacy information. Do not use. */
285 uint32_t uFreeSector;
286 /** Number of this extent in the list of images. */
287 uint32_t uExtent;
288 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
289 char *pDescData;
290 /** Pointer to the grain directory. */
291 uint32_t *pGD;
292 /** Pointer to the redundant grain directory. */
293 uint32_t *pRGD;
294 /** VMDK version of this extent. 1=1.0/1.1 */
295 uint32_t uVersion;
296 /** Type of this extent. */
297 VMDKETYPE enmType;
298 /** Access to this extent. */
299 VMDKACCESS enmAccess;
300 /** Flag whether this extent is marked as unclean. */
301 bool fUncleanShutdown;
302 /** Flag whether the metadata in the extent header needs to be updated. */
303 bool fMetaDirty;
304 /** Flag whether there is a footer in this extent. */
305 bool fFooter;
306 /** Compression type for this extent. */
307 uint16_t uCompression;
308 /** Last grain which has been written to. Only for streamOptimized extents. */
309 uint32_t uLastGrainWritten;
310 /** Sector number of last grain which has been written to. Only for
311 * streamOptimized extents. */
312 uint32_t uLastGrainSector;
313 /** Data size of last grain which has been written to. Only for
314 * streamOptimized extents. */
315 uint32_t cbLastGrainWritten;
316 /** Starting sector of the decompressed grain buffer. */
317 uint32_t uGrainSector;
318 /** Decompressed grain buffer for streamOptimized extents. */
319 void *pvGrain;
320 /** Reference to the image in which this extent is used. Do not use this
321 * on a regular basis to avoid passing pImage references to functions
322 * explicitly. */
323 struct VMDKIMAGE *pImage;
324} VMDKEXTENT, *PVMDKEXTENT;
325
326/**
327 * Grain table cache size. Allocated per image.
328 */
329#define VMDK_GT_CACHE_SIZE 256
330
331/**
332 * Grain table block size. Smaller than an actual grain table block to allow
333 * more grain table blocks to be cached without having to allocate excessive
334 * amounts of memory for the cache.
335 */
336#define VMDK_GT_CACHELINE_SIZE 128
337
338
339/**
340 * Maximum number of lines in a descriptor file. Not worth the effort of
341 * making it variable. Descriptor files are generally very short (~20 lines),
342 * with the exception of sparse files split in 2G chunks, which need for the
343 * maximum size (almost 2T) exactly 1025 lines for the disk database.
344 */
345#define VMDK_DESCRIPTOR_LINES_MAX 1100U
346
347/**
348 * Parsed descriptor information. Allows easy access and update of the
349 * descriptor (whether separate file or not). Free form text files suck.
350 */
351typedef struct VMDKDESCRIPTOR
352{
353 /** Line number of first entry of the disk descriptor. */
354 unsigned uFirstDesc;
355 /** Line number of first entry in the extent description. */
356 unsigned uFirstExtent;
357 /** Line number of first disk database entry. */
358 unsigned uFirstDDB;
359 /** Total number of lines. */
360 unsigned cLines;
361 /** Total amount of memory available for the descriptor. */
362 size_t cbDescAlloc;
363 /** Set if descriptor has been changed and not yet written to disk. */
364 bool fDirty;
365 /** Array of pointers to the data in the descriptor. */
366 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
367 /** Array of line indices pointing to the next non-comment line. */
368 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
369} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
370
371
372/**
373 * Cache entry for translating extent/sector to a sector number in that
374 * extent.
375 */
376typedef struct VMDKGTCACHEENTRY
377{
378 /** Extent number for which this entry is valid. */
379 uint32_t uExtent;
380 /** GT data block number. */
381 uint64_t uGTBlock;
382 /** Data part of the cache entry. */
383 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
384} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
385
386/**
387 * Cache data structure for blocks of grain table entries. For now this is a
388 * fixed size direct mapping cache, but this should be adapted to the size of
389 * the sparse image and maybe converted to a set-associative cache. The
390 * implementation below implements a write-through cache with write allocate.
391 */
392typedef struct VMDKGTCACHE
393{
394 /** Cache entries. */
395 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
396 /** Number of cache entries (currently unused). */
397 unsigned cEntries;
398} VMDKGTCACHE, *PVMDKGTCACHE;
399
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Pointer to the image extents. */
407 PVMDKEXTENT pExtents;
408 /** Number of image extents. */
409 unsigned cExtents;
410 /** Pointer to the files list, for opening a file referenced multiple
411 * times only once (happens mainly with raw partition access). */
412 PVMDKFILE pFiles;
413
414 /** Base image name. */
415 const char *pszFilename;
416 /** Descriptor file if applicable. */
417 PVMDKFILE pFile;
418
419 /** Pointer to the per-disk VD interface list. */
420 PVDINTERFACE pVDIfsDisk;
421 /** Pointer to the per-image VD interface list. */
422 PVDINTERFACE pVDIfsImage;
423
424 /** Error interface. */
425 PVDINTERFACE pInterfaceError;
426 /** Error interface callbacks. */
427 PVDINTERFACEERROR pInterfaceErrorCallbacks;
428
429 /** I/O interface. */
430 PVDINTERFACE pInterfaceIO;
431 /** I/O interface callbacks. */
432 PVDINTERFACEIO pInterfaceIOCallbacks;
433 /**
434 * Pointer to an array of segment entries for async I/O.
435 * This is an optimization because the task number to submit is not known
436 * and allocating/freeing an array in the read/write functions every time
437 * is too expensive.
438 */
439 PPDMDATASEG paSegments;
440 /** Entries available in the segments array. */
441 unsigned cSegments;
442
443 /** Open flags passed by VBoxHD layer. */
444 unsigned uOpenFlags;
445 /** Image flags defined during creation or determined during open. */
446 unsigned uImageFlags;
447 /** Total size of the image. */
448 uint64_t cbSize;
449 /** Physical geometry of this image. */
450 PDMMEDIAGEOMETRY PCHSGeometry;
451 /** Logical geometry of this image. */
452 PDMMEDIAGEOMETRY LCHSGeometry;
453 /** Image UUID. */
454 RTUUID ImageUuid;
455 /** Image modification UUID. */
456 RTUUID ModificationUuid;
457 /** Parent image UUID. */
458 RTUUID ParentUuid;
459 /** Parent image modification UUID. */
460 RTUUID ParentModificationUuid;
461
462 /** Pointer to grain table cache, if this image contains sparse extents. */
463 PVMDKGTCACHE pGTCache;
464 /** Pointer to the descriptor (NULL if no separate descriptor file). */
465 char *pDescData;
466 /** Allocation size of the descriptor file. */
467 size_t cbDescAlloc;
468 /** Parsed descriptor file content. */
469 VMDKDESCRIPTOR Descriptor;
470} VMDKIMAGE;
471
472
473/** State for the input callout of the inflate reader. */
474typedef struct VMDKINFLATESTATE
475{
476 /* File where the data is stored. */
477 PVMDKFILE File;
478 /* Total size of the data to read. */
479 size_t cbSize;
480 /* Offset in the file to read. */
481 uint64_t uFileOffset;
482 /* Current read position. */
483 ssize_t iOffset;
484} VMDKINFLATESTATE;
485
486/** State for the output callout of the deflate writer. */
487typedef struct VMDKDEFLATESTATE
488{
489 /* File where the data is to be stored. */
490 PVMDKFILE File;
491 /* Offset in the file to write at. */
492 uint64_t uFileOffset;
493 /* Current write position. */
494 ssize_t iOffset;
495} VMDKDEFLATESTATE;
496
497/*******************************************************************************
498 * Static Variables *
499 *******************************************************************************/
500
501/** NULL-terminated array of supported file extensions. */
502static const char *const s_apszVmdkFileExtensions[] =
503{
504 "vmdk",
505 NULL
506};
507
508/*******************************************************************************
509* Internal Functions *
510*******************************************************************************/
511
512static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
513
514static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
515 bool fDelete);
516
517static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
518static int vmdkFlushImage(PVMDKIMAGE pImage);
519static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
520static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
521
522
523/**
524 * Internal: signal an error to the frontend.
525 */
526DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
527 const char *pszFormat, ...)
528{
529 va_list va;
530 va_start(va, pszFormat);
531 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
532 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
533 pszFormat, va);
534 va_end(va);
535 return rc;
536}
537
538/**
539 * Internal: open a file (using a file descriptor cache to ensure each file
540 * is only opened once - anything else can cause locking problems).
541 */
542static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
543 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
544{
545 int rc = VINF_SUCCESS;
546 PVMDKFILE pVmdkFile;
547
548 for (pVmdkFile = pImage->pFiles;
549 pVmdkFile != NULL;
550 pVmdkFile = pVmdkFile->pNext)
551 {
552 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
553 {
554 Assert(fOpen == pVmdkFile->fOpen);
555 pVmdkFile->uReferences++;
556
557 *ppVmdkFile = pVmdkFile;
558
559 return rc;
560 }
561 }
562
563 /* If we get here, there's no matching entry in the cache. */
564 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
565 if (!VALID_PTR(pVmdkFile))
566 {
567 *ppVmdkFile = NULL;
568 return VERR_NO_MEMORY;
569 }
570
571 pVmdkFile->pszFilename = RTStrDup(pszFilename);
572 if (!VALID_PTR(pVmdkFile->pszFilename))
573 {
574 RTMemFree(pVmdkFile);
575 *ppVmdkFile = NULL;
576 return VERR_NO_MEMORY;
577 }
578 pVmdkFile->fOpen = fOpen;
579
580#ifndef VBOX_WITH_NEW_IO_CODE
581 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
582 {
583 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
584 pszFilename,
585 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
586 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
587 : 0,
588 NULL,
589 pImage->pVDIfsDisk,
590 &pVmdkFile->pStorage);
591 pVmdkFile->fAsyncIO = true;
592 }
593 else
594 {
595 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
596 pVmdkFile->fAsyncIO = false;
597 }
598#else
599 unsigned uOpenFlags = 0;
600
601 if ((fOpen & RTFILE_O_ACCESS_MASK) == RTFILE_O_READ)
602 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY;
603 if ((fOpen & RTFILE_O_ACTION_MASK) == RTFILE_O_CREATE)
604 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_CREATE;
605
606 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
607 pszFilename,
608 uOpenFlags,
609 &pVmdkFile->pStorage);
610#endif
611 if (RT_SUCCESS(rc))
612 {
613 pVmdkFile->uReferences = 1;
614 pVmdkFile->pImage = pImage;
615 pVmdkFile->pNext = pImage->pFiles;
616 if (pImage->pFiles)
617 pImage->pFiles->pPrev = pVmdkFile;
618 pImage->pFiles = pVmdkFile;
619 *ppVmdkFile = pVmdkFile;
620 }
621 else
622 {
623 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
624 RTMemFree(pVmdkFile);
625 *ppVmdkFile = NULL;
626 }
627
628 return rc;
629}
630
631/**
632 * Internal: close a file, updating the file descriptor cache.
633 */
634static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
635{
636 int rc = VINF_SUCCESS;
637 PVMDKFILE pVmdkFile = *ppVmdkFile;
638
639 AssertPtr(pVmdkFile);
640
641 pVmdkFile->fDelete |= fDelete;
642 Assert(pVmdkFile->uReferences);
643 pVmdkFile->uReferences--;
644 if (pVmdkFile->uReferences == 0)
645 {
646 PVMDKFILE pPrev;
647 PVMDKFILE pNext;
648
649 /* Unchain the element from the list. */
650 pPrev = pVmdkFile->pPrev;
651 pNext = pVmdkFile->pNext;
652
653 if (pNext)
654 pNext->pPrev = pPrev;
655 if (pPrev)
656 pPrev->pNext = pNext;
657 else
658 pImage->pFiles = pNext;
659
660#ifndef VBOX_WITH_NEW_IO_CODE
661 if (pVmdkFile->fAsyncIO)
662 {
663 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
664 pVmdkFile->pStorage);
665 }
666 else
667 {
668 rc = RTFileClose(pVmdkFile->File);
669 }
670#else
671 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
672 pVmdkFile->pStorage);
673#endif
674 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
675 rc = RTFileDelete(pVmdkFile->pszFilename);
676 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
677 RTMemFree(pVmdkFile);
678 }
679
680 *ppVmdkFile = NULL;
681 return rc;
682}
683
684/**
685 * Internal: read from a file distinguishing between async and normal operation
686 */
687DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
688 uint64_t uOffset, void *pvBuf,
689 size_t cbToRead, size_t *pcbRead)
690{
691 PVMDKIMAGE pImage = pVmdkFile->pImage;
692
693#ifndef VBOX_WITH_NEW_IO_CODE
694 if (pVmdkFile->fAsyncIO)
695 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
696 pVmdkFile->pStorage, uOffset,
697 cbToRead, pvBuf, pcbRead);
698 else
699 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
700#else
701 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
702 pVmdkFile->pStorage, uOffset,
703 cbToRead, pvBuf, pcbRead);
704#endif
705}
706
707/**
708 * Internal: write to a file distinguishing between async and normal operation
709 */
710DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
711 uint64_t uOffset, const void *pvBuf,
712 size_t cbToWrite, size_t *pcbWritten)
713{
714 PVMDKIMAGE pImage = pVmdkFile->pImage;
715
716#ifndef VBOX_WITH_NEW_IO_CODE
717 if (pVmdkFile->fAsyncIO)
718 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
719 pVmdkFile->pStorage, uOffset,
720 cbToWrite, pvBuf, pcbWritten);
721 else
722 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
723#else
724 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
725 pVmdkFile->pStorage, uOffset,
726 cbToWrite, pvBuf, pcbWritten);
727#endif
728}
729
730/**
731 * Internal: get the size of a file distinguishing beween async and normal operation
732 */
733DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
734{
735 PVMDKIMAGE pImage = pVmdkFile->pImage;
736
737#ifndef VBOX_WITH_NEW_IO_CODE
738 if (pVmdkFile->fAsyncIO)
739 {
740 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
741 pVmdkFile->pStorage,
742 pcbSize);
743 }
744 else
745 return RTFileGetSize(pVmdkFile->File, pcbSize);
746#else
747 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
748 pVmdkFile->pStorage,
749 pcbSize);
750#endif
751}
752
753/**
754 * Internal: set the size of a file distinguishing beween async and normal operation
755 */
756DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
757{
758 PVMDKIMAGE pImage = pVmdkFile->pImage;
759
760#ifndef VBOX_WITH_NEW_IO_CODE
761 if (pVmdkFile->fAsyncIO)
762 {
763 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
764 pVmdkFile->pStorage,
765 cbSize);
766 }
767 else
768 return RTFileSetSize(pVmdkFile->File, cbSize);
769#else
770 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
771 pVmdkFile->pStorage,
772 cbSize);
773#endif
774}
775
776/**
777 * Internal: flush a file distinguishing between async and normal operation
778 */
779DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
780{
781 PVMDKIMAGE pImage = pVmdkFile->pImage;
782
783#ifndef VBOX_WITH_NEW_IO_CODE
784 if (pVmdkFile->fAsyncIO)
785 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
786 pVmdkFile->pStorage);
787 else
788 return RTFileFlush(pVmdkFile->File);
789#else
790 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
791 pVmdkFile->pStorage);
792#endif
793}
794
795
796DECLINLINE(int) vmdkFileFlushAsync(PVMDKFILE pVmdkFile, PVDIOCTX pIoCtx)
797{
798 PVMDKIMAGE pImage = pVmdkFile->pImage;
799
800 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
801 pVmdkFile->pStorage, pIoCtx);
802}
803
804
805static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
806{
807 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
808
809 Assert(cbBuf);
810 if (pInflateState->iOffset < 0)
811 {
812 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
813 if (pcbBuf)
814 *pcbBuf = 1;
815 pInflateState->iOffset = 0;
816 return VINF_SUCCESS;
817 }
818 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
819 int rc = vmdkFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
820 if (RT_FAILURE(rc))
821 return rc;
822 pInflateState->uFileOffset += cbBuf;
823 pInflateState->iOffset += cbBuf;
824 pInflateState->cbSize -= cbBuf;
825 Assert(pcbBuf);
826 *pcbBuf = cbBuf;
827 return VINF_SUCCESS;
828}
829
830/**
831 * Internal: read from a file and inflate the compressed data,
832 * distinguishing between async and normal operation
833 */
834DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
835 uint64_t uOffset, void *pvBuf,
836 size_t cbToRead, unsigned uMarker,
837 uint64_t *puLBA, uint32_t *pcbMarkerData)
838{
839 if (pVmdkFile->fAsyncIO)
840 {
841 AssertMsgFailed(("TODO\n"));
842 return VERR_NOT_SUPPORTED;
843 }
844 else
845 {
846 int rc;
847 PRTZIPDECOMP pZip = NULL;
848 VMDKMARKER Marker;
849 uint64_t uCompOffset, cbComp;
850 VMDKINFLATESTATE InflateState;
851 size_t cbActuallyRead;
852 size_t cbMarker = sizeof(Marker);
853
854 if (uMarker == VMDK_MARKER_IGNORE)
855 cbMarker -= sizeof(Marker.uType);
856 rc = vmdkFileReadAt(pVmdkFile, uOffset, &Marker, cbMarker, NULL);
857 if (RT_FAILURE(rc))
858 return rc;
859 Marker.uSector = RT_LE2H_U64(Marker.uSector);
860 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
861 if ( uMarker != VMDK_MARKER_IGNORE
862 && ( RT_LE2H_U32(Marker.uType) != uMarker
863 || Marker.cbSize != 0))
864 return VERR_VD_VMDK_INVALID_FORMAT;
865 if (Marker.cbSize != 0)
866 {
867 /* Compressed grain marker. Data follows immediately. */
868 uCompOffset = uOffset + 12;
869 cbComp = Marker.cbSize;
870 if (puLBA)
871 *puLBA = Marker.uSector;
872 if (pcbMarkerData)
873 *pcbMarkerData = cbComp + 12;
874 }
875 else
876 {
877 Marker.uType = RT_LE2H_U32(Marker.uType);
878 if (Marker.uType == VMDK_MARKER_EOS)
879 {
880 Assert(uMarker != VMDK_MARKER_EOS);
881 return VERR_VD_VMDK_INVALID_FORMAT;
882 }
883 else if ( Marker.uType == VMDK_MARKER_GT
884 || Marker.uType == VMDK_MARKER_GD
885 || Marker.uType == VMDK_MARKER_FOOTER)
886 {
887 uCompOffset = uOffset + 512;
888 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
889 if (pcbMarkerData)
890 *pcbMarkerData = cbComp + 512;
891 }
892 else
893 {
894 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
895 return VERR_VD_VMDK_INVALID_FORMAT;
896 }
897 }
898 InflateState.File = pVmdkFile;
899 InflateState.cbSize = cbComp;
900 InflateState.uFileOffset = uCompOffset;
901 InflateState.iOffset = -1;
902 /* Sanity check - the expansion ratio should be much less than 2. */
903 Assert(cbComp < 2 * cbToRead);
904 if (cbComp >= 2 * cbToRead)
905 return VERR_VD_VMDK_INVALID_FORMAT;
906
907 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
908 if (RT_FAILURE(rc))
909 return rc;
910 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
911 RTZipDecompDestroy(pZip);
912 if (RT_FAILURE(rc))
913 return rc;
914 if (cbActuallyRead != cbToRead)
915 rc = VERR_VD_VMDK_INVALID_FORMAT;
916 return rc;
917 }
918}
919
920static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
921{
922 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
923
924 Assert(cbBuf);
925 if (pDeflateState->iOffset < 0)
926 {
927 pvBuf = (const uint8_t *)pvBuf + 1;
928 cbBuf--;
929 pDeflateState->iOffset = 0;
930 }
931 if (!cbBuf)
932 return VINF_SUCCESS;
933 int rc = vmdkFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
934 if (RT_FAILURE(rc))
935 return rc;
936 pDeflateState->uFileOffset += cbBuf;
937 pDeflateState->iOffset += cbBuf;
938 return VINF_SUCCESS;
939}
940
941/**
942 * Internal: deflate the uncompressed data and write to a file,
943 * distinguishing between async and normal operation
944 */
945DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
946 uint64_t uOffset, const void *pvBuf,
947 size_t cbToWrite, unsigned uMarker,
948 uint64_t uLBA, uint32_t *pcbMarkerData)
949{
950 if (pVmdkFile->fAsyncIO)
951 {
952 AssertMsgFailed(("TODO\n"));
953 return VERR_NOT_SUPPORTED;
954 }
955 else
956 {
957 int rc;
958 PRTZIPCOMP pZip = NULL;
959 VMDKMARKER Marker;
960 uint64_t uCompOffset, cbDecomp;
961 VMDKDEFLATESTATE DeflateState;
962
963 Marker.uSector = RT_H2LE_U64(uLBA);
964 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
965 if (uMarker == VMDK_MARKER_IGNORE)
966 {
967 /* Compressed grain marker. Data follows immediately. */
968 uCompOffset = uOffset + 12;
969 cbDecomp = cbToWrite;
970 }
971 else
972 {
973 /** @todo implement creating the other marker types */
974 return VERR_NOT_IMPLEMENTED;
975 }
976 DeflateState.File = pVmdkFile;
977 DeflateState.uFileOffset = uCompOffset;
978 DeflateState.iOffset = -1;
979
980 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
981 if (RT_FAILURE(rc))
982 return rc;
983 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
984 if (RT_SUCCESS(rc))
985 rc = RTZipCompFinish(pZip);
986 RTZipCompDestroy(pZip);
987 if (RT_SUCCESS(rc))
988 {
989 if (pcbMarkerData)
990 *pcbMarkerData = 12 + DeflateState.iOffset;
991 /* Set the file size to remove old garbage in case the block is
992 * rewritten. Cannot cause data loss as the code calling this
993 * guarantees that data gets only appended. */
994 Assert(DeflateState.uFileOffset > uCompOffset);
995 rc = vmdkFileSetSize(pVmdkFile, DeflateState.uFileOffset);
996
997 if (uMarker == VMDK_MARKER_IGNORE)
998 {
999 /* Compressed grain marker. */
1000 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
1001 rc = vmdkFileWriteAt(pVmdkFile, uOffset, &Marker, 12, NULL);
1002 if (RT_FAILURE(rc))
1003 return rc;
1004 }
1005 else
1006 {
1007 /** @todo implement creating the other marker types */
1008 return VERR_NOT_IMPLEMENTED;
1009 }
1010 }
1011 return rc;
1012 }
1013}
1014
1015/**
1016 * Internal: check if all files are closed, prevent leaking resources.
1017 */
1018static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1019{
1020 int rc = VINF_SUCCESS, rc2;
1021 PVMDKFILE pVmdkFile;
1022
1023 Assert(pImage->pFiles == NULL);
1024 for (pVmdkFile = pImage->pFiles;
1025 pVmdkFile != NULL;
1026 pVmdkFile = pVmdkFile->pNext)
1027 {
1028 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1029 pVmdkFile->pszFilename));
1030 pImage->pFiles = pVmdkFile->pNext;
1031
1032 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
1033 rc2 = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
1034 pVmdkFile->pStorage);
1035 else
1036 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1037
1038 if (RT_SUCCESS(rc))
1039 rc = rc2;
1040 }
1041 return rc;
1042}
1043
1044/**
1045 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1046 * critical non-ASCII characters.
1047 */
1048static char *vmdkEncodeString(const char *psz)
1049{
1050 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1051 char *pszDst = szEnc;
1052
1053 AssertPtr(psz);
1054
1055 for (; *psz; psz = RTStrNextCp(psz))
1056 {
1057 char *pszDstPrev = pszDst;
1058 RTUNICP Cp = RTStrGetCp(psz);
1059 if (Cp == '\\')
1060 {
1061 pszDst = RTStrPutCp(pszDst, Cp);
1062 pszDst = RTStrPutCp(pszDst, Cp);
1063 }
1064 else if (Cp == '\n')
1065 {
1066 pszDst = RTStrPutCp(pszDst, '\\');
1067 pszDst = RTStrPutCp(pszDst, 'n');
1068 }
1069 else if (Cp == '\r')
1070 {
1071 pszDst = RTStrPutCp(pszDst, '\\');
1072 pszDst = RTStrPutCp(pszDst, 'r');
1073 }
1074 else
1075 pszDst = RTStrPutCp(pszDst, Cp);
1076 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1077 {
1078 pszDst = pszDstPrev;
1079 break;
1080 }
1081 }
1082 *pszDst = '\0';
1083 return RTStrDup(szEnc);
1084}
1085
1086/**
1087 * Internal: decode a string and store it into the specified string.
1088 */
1089static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1090{
1091 int rc = VINF_SUCCESS;
1092 char szBuf[4];
1093
1094 if (!cb)
1095 return VERR_BUFFER_OVERFLOW;
1096
1097 AssertPtr(psz);
1098
1099 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1100 {
1101 char *pszDst = szBuf;
1102 RTUNICP Cp = RTStrGetCp(pszEncoded);
1103 if (Cp == '\\')
1104 {
1105 pszEncoded = RTStrNextCp(pszEncoded);
1106 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1107 if (CpQ == 'n')
1108 RTStrPutCp(pszDst, '\n');
1109 else if (CpQ == 'r')
1110 RTStrPutCp(pszDst, '\r');
1111 else if (CpQ == '\0')
1112 {
1113 rc = VERR_VD_VMDK_INVALID_HEADER;
1114 break;
1115 }
1116 else
1117 RTStrPutCp(pszDst, CpQ);
1118 }
1119 else
1120 pszDst = RTStrPutCp(pszDst, Cp);
1121
1122 /* Need to leave space for terminating NUL. */
1123 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1124 {
1125 rc = VERR_BUFFER_OVERFLOW;
1126 break;
1127 }
1128 memcpy(psz, szBuf, pszDst - szBuf);
1129 psz += pszDst - szBuf;
1130 }
1131 *psz = '\0';
1132 return rc;
1133}
1134
1135static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1136{
1137 int rc = VINF_SUCCESS;
1138 unsigned i;
1139 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1140 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1141
1142 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1143 goto out;
1144
1145 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1146 if (!pGD)
1147 {
1148 rc = VERR_NO_MEMORY;
1149 goto out;
1150 }
1151 pExtent->pGD = pGD;
1152 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1153 * life files don't have them. The spec is wrong in creative ways. */
1154 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1155 pGD, cbGD, NULL);
1156 AssertRC(rc);
1157 if (RT_FAILURE(rc))
1158 {
1159 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1160 goto out;
1161 }
1162 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1163 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1164
1165 if (pExtent->uSectorRGD)
1166 {
1167 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1168 if (!pRGD)
1169 {
1170 rc = VERR_NO_MEMORY;
1171 goto out;
1172 }
1173 pExtent->pRGD = pRGD;
1174 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1175 * life files don't have them. The spec is wrong in creative ways. */
1176 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1177 pRGD, cbGD, NULL);
1178 AssertRC(rc);
1179 if (RT_FAILURE(rc))
1180 {
1181 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1182 goto out;
1183 }
1184 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1185 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1186
1187 /* Check grain table and redundant grain table for consistency. */
1188 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1189 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1190 if (!pTmpGT1)
1191 {
1192 rc = VERR_NO_MEMORY;
1193 goto out;
1194 }
1195 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1196 if (!pTmpGT2)
1197 {
1198 RTMemTmpFree(pTmpGT1);
1199 rc = VERR_NO_MEMORY;
1200 goto out;
1201 }
1202
1203 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1204 i < pExtent->cGDEntries;
1205 i++, pGDTmp++, pRGDTmp++)
1206 {
1207 /* If no grain table is allocated skip the entry. */
1208 if (*pGDTmp == 0 && *pRGDTmp == 0)
1209 continue;
1210
1211 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1212 {
1213 /* Just one grain directory entry refers to a not yet allocated
1214 * grain table or both grain directory copies refer to the same
1215 * grain table. Not allowed. */
1216 RTMemTmpFree(pTmpGT1);
1217 RTMemTmpFree(pTmpGT2);
1218 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1219 goto out;
1220 }
1221 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1222 * life files don't have them. The spec is wrong in creative ways. */
1223 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1224 pTmpGT1, cbGT, NULL);
1225 if (RT_FAILURE(rc))
1226 {
1227 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1228 RTMemTmpFree(pTmpGT1);
1229 RTMemTmpFree(pTmpGT2);
1230 goto out;
1231 }
1232 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1233 * life files don't have them. The spec is wrong in creative ways. */
1234 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1235 pTmpGT2, cbGT, NULL);
1236 if (RT_FAILURE(rc))
1237 {
1238 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1239 RTMemTmpFree(pTmpGT1);
1240 RTMemTmpFree(pTmpGT2);
1241 goto out;
1242 }
1243 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1244 {
1245 RTMemTmpFree(pTmpGT1);
1246 RTMemTmpFree(pTmpGT2);
1247 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1248 goto out;
1249 }
1250 }
1251
1252 /** @todo figure out what to do for unclean VMDKs. */
1253 RTMemTmpFree(pTmpGT1);
1254 RTMemTmpFree(pTmpGT2);
1255 }
1256
1257 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1258 {
1259 uint32_t uLastGrainWritten = 0;
1260 uint32_t uLastGrainSector = 0;
1261 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1262 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1263 if (!pTmpGT)
1264 {
1265 rc = VERR_NO_MEMORY;
1266 goto out;
1267 }
1268 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1269 {
1270 /* If no grain table is allocated skip the entry. */
1271 if (*pGDTmp == 0)
1272 continue;
1273
1274 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1275 * life files don't have them. The spec is wrong in creative ways. */
1276 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1277 pTmpGT, cbGT, NULL);
1278 if (RT_FAILURE(rc))
1279 {
1280 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1281 RTMemTmpFree(pTmpGT);
1282 goto out;
1283 }
1284 uint32_t j;
1285 uint32_t *pGTTmp;
1286 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1287 {
1288 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1289
1290 /* If no grain is allocated skip the entry. */
1291 if (uGTTmp == 0)
1292 continue;
1293
1294 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1295 {
1296 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1297 RTMemTmpFree(pTmpGT);
1298 goto out;
1299 }
1300 uLastGrainSector = uGTTmp;
1301 uLastGrainWritten = i * pExtent->cGTEntries + j;
1302 }
1303 }
1304 RTMemTmpFree(pTmpGT);
1305
1306 /* streamOptimized extents need a grain decompress buffer. */
1307 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1308 if (!pExtent->pvGrain)
1309 {
1310 rc = VERR_NO_MEMORY;
1311 goto out;
1312 }
1313
1314 if (uLastGrainSector)
1315 {
1316 uint64_t uLBA = 0;
1317 uint32_t cbMarker = 0;
1318 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1319 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1320 if (RT_FAILURE(rc))
1321 goto out;
1322
1323 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1324 pExtent->uGrainSector = uLastGrainSector;
1325 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1326 }
1327 pExtent->uLastGrainWritten = uLastGrainWritten;
1328 pExtent->uLastGrainSector = uLastGrainSector;
1329 }
1330
1331out:
1332 if (RT_FAILURE(rc))
1333 vmdkFreeGrainDirectory(pExtent);
1334 return rc;
1335}
1336
1337static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1338 bool fPreAlloc)
1339{
1340 int rc = VINF_SUCCESS;
1341 unsigned i;
1342 uint32_t *pGD = NULL, *pRGD = NULL;
1343 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1344 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1345 size_t cbGTRounded;
1346 uint64_t cbOverhead;
1347
1348 if (fPreAlloc)
1349 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1350 else
1351 cbGTRounded = 0;
1352
1353 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1354 if (!pGD)
1355 {
1356 rc = VERR_NO_MEMORY;
1357 goto out;
1358 }
1359 pExtent->pGD = pGD;
1360 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1361 if (!pRGD)
1362 {
1363 rc = VERR_NO_MEMORY;
1364 goto out;
1365 }
1366 pExtent->pRGD = pRGD;
1367
1368 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1369 /* For streamOptimized extents put the end-of-stream marker at the end. */
1370 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1371 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1372 else
1373 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1374 if (RT_FAILURE(rc))
1375 goto out;
1376 pExtent->uSectorRGD = uStartSector;
1377 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1378
1379 if (fPreAlloc)
1380 {
1381 uint32_t uGTSectorLE;
1382 uint64_t uOffsetSectors;
1383
1384 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1385 for (i = 0; i < pExtent->cGDEntries; i++)
1386 {
1387 pRGD[i] = uOffsetSectors;
1388 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1389 /* Write the redundant grain directory entry to disk. */
1390 rc = vmdkFileWriteAt(pExtent->pFile,
1391 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1392 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1393 if (RT_FAILURE(rc))
1394 {
1395 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1396 goto out;
1397 }
1398 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1399 }
1400
1401 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1402 for (i = 0; i < pExtent->cGDEntries; i++)
1403 {
1404 pGD[i] = uOffsetSectors;
1405 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1406 /* Write the grain directory entry to disk. */
1407 rc = vmdkFileWriteAt(pExtent->pFile,
1408 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1409 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1410 if (RT_FAILURE(rc))
1411 {
1412 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1413 goto out;
1414 }
1415 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1416 }
1417 }
1418 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1419
1420 /* streamOptimized extents need a grain decompress buffer. */
1421 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1422 {
1423 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1424 if (!pExtent->pvGrain)
1425 {
1426 rc = VERR_NO_MEMORY;
1427 goto out;
1428 }
1429 }
1430
1431out:
1432 if (RT_FAILURE(rc))
1433 vmdkFreeGrainDirectory(pExtent);
1434 return rc;
1435}
1436
1437static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1438{
1439 if (pExtent->pGD)
1440 {
1441 RTMemFree(pExtent->pGD);
1442 pExtent->pGD = NULL;
1443 }
1444 if (pExtent->pRGD)
1445 {
1446 RTMemFree(pExtent->pRGD);
1447 pExtent->pRGD = NULL;
1448 }
1449}
1450
1451static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1452 char **ppszUnquoted, char **ppszNext)
1453{
1454 char *pszQ;
1455 char *pszUnquoted;
1456
1457 /* Skip over whitespace. */
1458 while (*pszStr == ' ' || *pszStr == '\t')
1459 pszStr++;
1460
1461 if (*pszStr != '"')
1462 {
1463 pszQ = (char *)pszStr;
1464 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1465 pszQ++;
1466 }
1467 else
1468 {
1469 pszStr++;
1470 pszQ = (char *)strchr(pszStr, '"');
1471 if (pszQ == NULL)
1472 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1473 }
1474
1475 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1476 if (!pszUnquoted)
1477 return VERR_NO_MEMORY;
1478 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1479 pszUnquoted[pszQ - pszStr] = '\0';
1480 *ppszUnquoted = pszUnquoted;
1481 if (ppszNext)
1482 *ppszNext = pszQ + 1;
1483 return VINF_SUCCESS;
1484}
1485
1486static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1487 const char *pszLine)
1488{
1489 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1490 ssize_t cbDiff = strlen(pszLine) + 1;
1491
1492 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1493 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1494 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1495
1496 memcpy(pEnd, pszLine, cbDiff);
1497 pDescriptor->cLines++;
1498 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1499 pDescriptor->fDirty = true;
1500
1501 return VINF_SUCCESS;
1502}
1503
1504static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1505 const char *pszKey, const char **ppszValue)
1506{
1507 size_t cbKey = strlen(pszKey);
1508 const char *pszValue;
1509
1510 while (uStart != 0)
1511 {
1512 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1513 {
1514 /* Key matches, check for a '=' (preceded by whitespace). */
1515 pszValue = pDescriptor->aLines[uStart] + cbKey;
1516 while (*pszValue == ' ' || *pszValue == '\t')
1517 pszValue++;
1518 if (*pszValue == '=')
1519 {
1520 *ppszValue = pszValue + 1;
1521 break;
1522 }
1523 }
1524 uStart = pDescriptor->aNextLines[uStart];
1525 }
1526 return !!uStart;
1527}
1528
1529static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1530 unsigned uStart,
1531 const char *pszKey, const char *pszValue)
1532{
1533 char *pszTmp;
1534 size_t cbKey = strlen(pszKey);
1535 unsigned uLast = 0;
1536
1537 while (uStart != 0)
1538 {
1539 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1540 {
1541 /* Key matches, check for a '=' (preceded by whitespace). */
1542 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1543 while (*pszTmp == ' ' || *pszTmp == '\t')
1544 pszTmp++;
1545 if (*pszTmp == '=')
1546 {
1547 pszTmp++;
1548 while (*pszTmp == ' ' || *pszTmp == '\t')
1549 pszTmp++;
1550 break;
1551 }
1552 }
1553 if (!pDescriptor->aNextLines[uStart])
1554 uLast = uStart;
1555 uStart = pDescriptor->aNextLines[uStart];
1556 }
1557 if (uStart)
1558 {
1559 if (pszValue)
1560 {
1561 /* Key already exists, replace existing value. */
1562 size_t cbOldVal = strlen(pszTmp);
1563 size_t cbNewVal = strlen(pszValue);
1564 ssize_t cbDiff = cbNewVal - cbOldVal;
1565 /* Check for buffer overflow. */
1566 if ( pDescriptor->aLines[pDescriptor->cLines]
1567 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1568 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1569
1570 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1571 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1572 memcpy(pszTmp, pszValue, cbNewVal + 1);
1573 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1574 pDescriptor->aLines[i] += cbDiff;
1575 }
1576 else
1577 {
1578 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1579 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1580 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1581 {
1582 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1583 if (pDescriptor->aNextLines[i])
1584 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1585 else
1586 pDescriptor->aNextLines[i-1] = 0;
1587 }
1588 pDescriptor->cLines--;
1589 /* Adjust starting line numbers of following descriptor sections. */
1590 if (uStart < pDescriptor->uFirstExtent)
1591 pDescriptor->uFirstExtent--;
1592 if (uStart < pDescriptor->uFirstDDB)
1593 pDescriptor->uFirstDDB--;
1594 }
1595 }
1596 else
1597 {
1598 /* Key doesn't exist, append after the last entry in this category. */
1599 if (!pszValue)
1600 {
1601 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1602 return VINF_SUCCESS;
1603 }
1604 cbKey = strlen(pszKey);
1605 size_t cbValue = strlen(pszValue);
1606 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1607 /* Check for buffer overflow. */
1608 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1609 || ( pDescriptor->aLines[pDescriptor->cLines]
1610 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1611 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1612 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1613 {
1614 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1615 if (pDescriptor->aNextLines[i - 1])
1616 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1617 else
1618 pDescriptor->aNextLines[i] = 0;
1619 }
1620 uStart = uLast + 1;
1621 pDescriptor->aNextLines[uLast] = uStart;
1622 pDescriptor->aNextLines[uStart] = 0;
1623 pDescriptor->cLines++;
1624 pszTmp = pDescriptor->aLines[uStart];
1625 memmove(pszTmp + cbDiff, pszTmp,
1626 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1627 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1628 pDescriptor->aLines[uStart][cbKey] = '=';
1629 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1630 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1631 pDescriptor->aLines[i] += cbDiff;
1632
1633 /* Adjust starting line numbers of following descriptor sections. */
1634 if (uStart <= pDescriptor->uFirstExtent)
1635 pDescriptor->uFirstExtent++;
1636 if (uStart <= pDescriptor->uFirstDDB)
1637 pDescriptor->uFirstDDB++;
1638 }
1639 pDescriptor->fDirty = true;
1640 return VINF_SUCCESS;
1641}
1642
1643static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1644 uint32_t *puValue)
1645{
1646 const char *pszValue;
1647
1648 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1649 &pszValue))
1650 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1651 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1652}
1653
1654static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1655 const char *pszKey, const char **ppszValue)
1656{
1657 const char *pszValue;
1658 char *pszValueUnquoted;
1659
1660 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1661 &pszValue))
1662 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1663 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1664 if (RT_FAILURE(rc))
1665 return rc;
1666 *ppszValue = pszValueUnquoted;
1667 return rc;
1668}
1669
1670static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1671 const char *pszKey, const char *pszValue)
1672{
1673 char *pszValueQuoted;
1674
1675 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1676 if (RT_FAILURE(rc))
1677 return rc;
1678 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1679 pszValueQuoted);
1680 RTStrFree(pszValueQuoted);
1681 return rc;
1682}
1683
1684static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1685 PVMDKDESCRIPTOR pDescriptor)
1686{
1687 unsigned uEntry = pDescriptor->uFirstExtent;
1688 ssize_t cbDiff;
1689
1690 if (!uEntry)
1691 return;
1692
1693 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1694 /* Move everything including \0 in the entry marking the end of buffer. */
1695 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1696 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1697 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1698 {
1699 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1700 if (pDescriptor->aNextLines[i])
1701 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1702 else
1703 pDescriptor->aNextLines[i - 1] = 0;
1704 }
1705 pDescriptor->cLines--;
1706 if (pDescriptor->uFirstDDB)
1707 pDescriptor->uFirstDDB--;
1708
1709 return;
1710}
1711
1712static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1713 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1714 VMDKETYPE enmType, const char *pszBasename,
1715 uint64_t uSectorOffset)
1716{
1717 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1718 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1719 char *pszTmp;
1720 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1721 char szExt[1024];
1722 ssize_t cbDiff;
1723
1724 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1725 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1726
1727 /* Find last entry in extent description. */
1728 while (uStart)
1729 {
1730 if (!pDescriptor->aNextLines[uStart])
1731 uLast = uStart;
1732 uStart = pDescriptor->aNextLines[uStart];
1733 }
1734
1735 if (enmType == VMDKETYPE_ZERO)
1736 {
1737 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1738 cNominalSectors, apszType[enmType]);
1739 }
1740 else if (enmType == VMDKETYPE_FLAT)
1741 {
1742 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1743 apszAccess[enmAccess], cNominalSectors,
1744 apszType[enmType], pszBasename, uSectorOffset);
1745 }
1746 else
1747 {
1748 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1749 apszAccess[enmAccess], cNominalSectors,
1750 apszType[enmType], pszBasename);
1751 }
1752 cbDiff = strlen(szExt) + 1;
1753
1754 /* Check for buffer overflow. */
1755 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1756 || ( pDescriptor->aLines[pDescriptor->cLines]
1757 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1758 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1759
1760 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1761 {
1762 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1763 if (pDescriptor->aNextLines[i - 1])
1764 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1765 else
1766 pDescriptor->aNextLines[i] = 0;
1767 }
1768 uStart = uLast + 1;
1769 pDescriptor->aNextLines[uLast] = uStart;
1770 pDescriptor->aNextLines[uStart] = 0;
1771 pDescriptor->cLines++;
1772 pszTmp = pDescriptor->aLines[uStart];
1773 memmove(pszTmp + cbDiff, pszTmp,
1774 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1775 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1776 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1777 pDescriptor->aLines[i] += cbDiff;
1778
1779 /* Adjust starting line numbers of following descriptor sections. */
1780 if (uStart <= pDescriptor->uFirstDDB)
1781 pDescriptor->uFirstDDB++;
1782
1783 pDescriptor->fDirty = true;
1784 return VINF_SUCCESS;
1785}
1786
1787static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1788 const char *pszKey, const char **ppszValue)
1789{
1790 const char *pszValue;
1791 char *pszValueUnquoted;
1792
1793 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1794 &pszValue))
1795 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1796 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1797 if (RT_FAILURE(rc))
1798 return rc;
1799 *ppszValue = pszValueUnquoted;
1800 return rc;
1801}
1802
1803static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1804 const char *pszKey, uint32_t *puValue)
1805{
1806 const char *pszValue;
1807 char *pszValueUnquoted;
1808
1809 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1810 &pszValue))
1811 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1812 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1813 if (RT_FAILURE(rc))
1814 return rc;
1815 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1816 RTMemTmpFree(pszValueUnquoted);
1817 return rc;
1818}
1819
1820static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1821 const char *pszKey, PRTUUID pUuid)
1822{
1823 const char *pszValue;
1824 char *pszValueUnquoted;
1825
1826 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1827 &pszValue))
1828 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1829 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1830 if (RT_FAILURE(rc))
1831 return rc;
1832 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1833 RTMemTmpFree(pszValueUnquoted);
1834 return rc;
1835}
1836
1837static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1838 const char *pszKey, const char *pszVal)
1839{
1840 int rc;
1841 char *pszValQuoted;
1842
1843 if (pszVal)
1844 {
1845 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1846 if (RT_FAILURE(rc))
1847 return rc;
1848 }
1849 else
1850 pszValQuoted = NULL;
1851 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1852 pszValQuoted);
1853 if (pszValQuoted)
1854 RTStrFree(pszValQuoted);
1855 return rc;
1856}
1857
1858static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1859 const char *pszKey, PCRTUUID pUuid)
1860{
1861 char *pszUuid;
1862
1863 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1864 if (RT_FAILURE(rc))
1865 return rc;
1866 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1867 pszUuid);
1868 RTStrFree(pszUuid);
1869 return rc;
1870}
1871
1872static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1873 const char *pszKey, uint32_t uValue)
1874{
1875 char *pszValue;
1876
1877 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1878 if (RT_FAILURE(rc))
1879 return rc;
1880 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1881 pszValue);
1882 RTStrFree(pszValue);
1883 return rc;
1884}
1885
1886static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1887 size_t cbDescData,
1888 PVMDKDESCRIPTOR pDescriptor)
1889{
1890 int rc = VINF_SUCCESS;
1891 unsigned cLine = 0, uLastNonEmptyLine = 0;
1892 char *pTmp = pDescData;
1893
1894 pDescriptor->cbDescAlloc = cbDescData;
1895 while (*pTmp != '\0')
1896 {
1897 pDescriptor->aLines[cLine++] = pTmp;
1898 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1899 {
1900 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1901 goto out;
1902 }
1903
1904 while (*pTmp != '\0' && *pTmp != '\n')
1905 {
1906 if (*pTmp == '\r')
1907 {
1908 if (*(pTmp + 1) != '\n')
1909 {
1910 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1911 goto out;
1912 }
1913 else
1914 {
1915 /* Get rid of CR character. */
1916 *pTmp = '\0';
1917 }
1918 }
1919 pTmp++;
1920 }
1921 /* Get rid of LF character. */
1922 if (*pTmp == '\n')
1923 {
1924 *pTmp = '\0';
1925 pTmp++;
1926 }
1927 }
1928 pDescriptor->cLines = cLine;
1929 /* Pointer right after the end of the used part of the buffer. */
1930 pDescriptor->aLines[cLine] = pTmp;
1931
1932 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1933 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1934 {
1935 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1936 goto out;
1937 }
1938
1939 /* Initialize those, because we need to be able to reopen an image. */
1940 pDescriptor->uFirstDesc = 0;
1941 pDescriptor->uFirstExtent = 0;
1942 pDescriptor->uFirstDDB = 0;
1943 for (unsigned i = 0; i < cLine; i++)
1944 {
1945 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1946 {
1947 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1948 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1949 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1950 {
1951 /* An extent descriptor. */
1952 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1953 {
1954 /* Incorrect ordering of entries. */
1955 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1956 goto out;
1957 }
1958 if (!pDescriptor->uFirstExtent)
1959 {
1960 pDescriptor->uFirstExtent = i;
1961 uLastNonEmptyLine = 0;
1962 }
1963 }
1964 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1965 {
1966 /* A disk database entry. */
1967 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1968 {
1969 /* Incorrect ordering of entries. */
1970 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1971 goto out;
1972 }
1973 if (!pDescriptor->uFirstDDB)
1974 {
1975 pDescriptor->uFirstDDB = i;
1976 uLastNonEmptyLine = 0;
1977 }
1978 }
1979 else
1980 {
1981 /* A normal entry. */
1982 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1983 {
1984 /* Incorrect ordering of entries. */
1985 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1986 goto out;
1987 }
1988 if (!pDescriptor->uFirstDesc)
1989 {
1990 pDescriptor->uFirstDesc = i;
1991 uLastNonEmptyLine = 0;
1992 }
1993 }
1994 if (uLastNonEmptyLine)
1995 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1996 uLastNonEmptyLine = i;
1997 }
1998 }
1999
2000out:
2001 return rc;
2002}
2003
2004static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2005 PCPDMMEDIAGEOMETRY pPCHSGeometry)
2006{
2007 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2008 VMDK_DDB_GEO_PCHS_CYLINDERS,
2009 pPCHSGeometry->cCylinders);
2010 if (RT_FAILURE(rc))
2011 return rc;
2012 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2013 VMDK_DDB_GEO_PCHS_HEADS,
2014 pPCHSGeometry->cHeads);
2015 if (RT_FAILURE(rc))
2016 return rc;
2017 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2018 VMDK_DDB_GEO_PCHS_SECTORS,
2019 pPCHSGeometry->cSectors);
2020 return rc;
2021}
2022
2023static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2024 PCPDMMEDIAGEOMETRY pLCHSGeometry)
2025{
2026 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2027 VMDK_DDB_GEO_LCHS_CYLINDERS,
2028 pLCHSGeometry->cCylinders);
2029 if (RT_FAILURE(rc))
2030 return rc;
2031 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2032 VMDK_DDB_GEO_LCHS_HEADS,
2033 pLCHSGeometry->cHeads);
2034 if (RT_FAILURE(rc))
2035 return rc;
2036 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2037 VMDK_DDB_GEO_LCHS_SECTORS,
2038 pLCHSGeometry->cSectors);
2039 return rc;
2040}
2041
2042static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2043 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2044{
2045 int rc;
2046
2047 pDescriptor->uFirstDesc = 0;
2048 pDescriptor->uFirstExtent = 0;
2049 pDescriptor->uFirstDDB = 0;
2050 pDescriptor->cLines = 0;
2051 pDescriptor->cbDescAlloc = cbDescData;
2052 pDescriptor->fDirty = false;
2053 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2054 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2055
2056 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2057 if (RT_FAILURE(rc))
2058 goto out;
2059 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2060 if (RT_FAILURE(rc))
2061 goto out;
2062 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2063 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2064 if (RT_FAILURE(rc))
2065 goto out;
2066 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2067 if (RT_FAILURE(rc))
2068 goto out;
2069 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2070 if (RT_FAILURE(rc))
2071 goto out;
2072 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2073 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2074 if (RT_FAILURE(rc))
2075 goto out;
2076 /* The trailing space is created by VMware, too. */
2077 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2078 if (RT_FAILURE(rc))
2079 goto out;
2080 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2081 if (RT_FAILURE(rc))
2082 goto out;
2083 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2084 if (RT_FAILURE(rc))
2085 goto out;
2086 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2087 if (RT_FAILURE(rc))
2088 goto out;
2089 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2090
2091 /* Now that the framework is in place, use the normal functions to insert
2092 * the remaining keys. */
2093 char szBuf[9];
2094 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2095 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2096 "CID", szBuf);
2097 if (RT_FAILURE(rc))
2098 goto out;
2099 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2100 "parentCID", "ffffffff");
2101 if (RT_FAILURE(rc))
2102 goto out;
2103
2104 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2105 if (RT_FAILURE(rc))
2106 goto out;
2107
2108out:
2109 return rc;
2110}
2111
2112static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2113 size_t cbDescData)
2114{
2115 int rc;
2116 unsigned cExtents;
2117 unsigned uLine;
2118 unsigned i;
2119
2120 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2121 &pImage->Descriptor);
2122 if (RT_FAILURE(rc))
2123 return rc;
2124
2125 /* Check version, must be 1. */
2126 uint32_t uVersion;
2127 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2128 if (RT_FAILURE(rc))
2129 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2130 if (uVersion != 1)
2131 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2132
2133 /* Get image creation type and determine image flags. */
2134 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2135 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2136 &pszCreateType);
2137 if (RT_FAILURE(rc))
2138 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2139 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2140 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2141 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2142 else if ( !strcmp(pszCreateType, "partitionedDevice")
2143 || !strcmp(pszCreateType, "fullDevice"))
2144 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2145 else if (!strcmp(pszCreateType, "streamOptimized"))
2146 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2147 else if (!strcmp(pszCreateType, "vmfs"))
2148 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2149 RTStrFree((char *)(void *)pszCreateType);
2150
2151 /* Count the number of extent config entries. */
2152 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2153 uLine != 0;
2154 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2155 /* nothing */;
2156
2157 if (!pImage->pDescData && cExtents != 1)
2158 {
2159 /* Monolithic image, must have only one extent (already opened). */
2160 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2161 }
2162
2163 if (pImage->pDescData)
2164 {
2165 /* Non-monolithic image, extents need to be allocated. */
2166 rc = vmdkCreateExtents(pImage, cExtents);
2167 if (RT_FAILURE(rc))
2168 return rc;
2169 }
2170
2171 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2172 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2173 {
2174 char *pszLine = pImage->Descriptor.aLines[uLine];
2175
2176 /* Access type of the extent. */
2177 if (!strncmp(pszLine, "RW", 2))
2178 {
2179 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2180 pszLine += 2;
2181 }
2182 else if (!strncmp(pszLine, "RDONLY", 6))
2183 {
2184 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2185 pszLine += 6;
2186 }
2187 else if (!strncmp(pszLine, "NOACCESS", 8))
2188 {
2189 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2190 pszLine += 8;
2191 }
2192 else
2193 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2194 if (*pszLine++ != ' ')
2195 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2196
2197 /* Nominal size of the extent. */
2198 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2199 &pImage->pExtents[i].cNominalSectors);
2200 if (RT_FAILURE(rc))
2201 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2202 if (*pszLine++ != ' ')
2203 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2204
2205 /* Type of the extent. */
2206#ifdef VBOX_WITH_VMDK_ESX
2207 /** @todo Add the ESX extent types. Not necessary for now because
2208 * the ESX extent types are only used inside an ESX server. They are
2209 * automatically converted if the VMDK is exported. */
2210#endif /* VBOX_WITH_VMDK_ESX */
2211 if (!strncmp(pszLine, "SPARSE", 6))
2212 {
2213 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2214 pszLine += 6;
2215 }
2216 else if (!strncmp(pszLine, "FLAT", 4))
2217 {
2218 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2219 pszLine += 4;
2220 }
2221 else if (!strncmp(pszLine, "ZERO", 4))
2222 {
2223 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2224 pszLine += 4;
2225 }
2226 else if (!strncmp(pszLine, "VMFS", 4))
2227 {
2228 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2229 pszLine += 4;
2230 }
2231 else
2232 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2233 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2234 {
2235 /* This one has no basename or offset. */
2236 if (*pszLine == ' ')
2237 pszLine++;
2238 if (*pszLine != '\0')
2239 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2240 pImage->pExtents[i].pszBasename = NULL;
2241 }
2242 else
2243 {
2244 /* All other extent types have basename and optional offset. */
2245 if (*pszLine++ != ' ')
2246 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2247
2248 /* Basename of the image. Surrounded by quotes. */
2249 char *pszBasename;
2250 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2251 if (RT_FAILURE(rc))
2252 return rc;
2253 pImage->pExtents[i].pszBasename = pszBasename;
2254 if (*pszLine == ' ')
2255 {
2256 pszLine++;
2257 if (*pszLine != '\0')
2258 {
2259 /* Optional offset in extent specified. */
2260 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2261 &pImage->pExtents[i].uSectorOffset);
2262 if (RT_FAILURE(rc))
2263 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2264 }
2265 }
2266
2267 if (*pszLine != '\0')
2268 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2269 }
2270 }
2271
2272 /* Determine PCHS geometry (autogenerate if necessary). */
2273 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2274 VMDK_DDB_GEO_PCHS_CYLINDERS,
2275 &pImage->PCHSGeometry.cCylinders);
2276 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2277 pImage->PCHSGeometry.cCylinders = 0;
2278 else if (RT_FAILURE(rc))
2279 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2280 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2281 VMDK_DDB_GEO_PCHS_HEADS,
2282 &pImage->PCHSGeometry.cHeads);
2283 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2284 pImage->PCHSGeometry.cHeads = 0;
2285 else if (RT_FAILURE(rc))
2286 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2287 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2288 VMDK_DDB_GEO_PCHS_SECTORS,
2289 &pImage->PCHSGeometry.cSectors);
2290 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2291 pImage->PCHSGeometry.cSectors = 0;
2292 else if (RT_FAILURE(rc))
2293 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2294 if ( pImage->PCHSGeometry.cCylinders == 0
2295 || pImage->PCHSGeometry.cHeads == 0
2296 || pImage->PCHSGeometry.cHeads > 16
2297 || pImage->PCHSGeometry.cSectors == 0
2298 || pImage->PCHSGeometry.cSectors > 63)
2299 {
2300 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2301 * as the total image size isn't known yet). */
2302 pImage->PCHSGeometry.cCylinders = 0;
2303 pImage->PCHSGeometry.cHeads = 16;
2304 pImage->PCHSGeometry.cSectors = 63;
2305 }
2306
2307 /* Determine LCHS geometry (set to 0 if not specified). */
2308 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2309 VMDK_DDB_GEO_LCHS_CYLINDERS,
2310 &pImage->LCHSGeometry.cCylinders);
2311 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2312 pImage->LCHSGeometry.cCylinders = 0;
2313 else if (RT_FAILURE(rc))
2314 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2315 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2316 VMDK_DDB_GEO_LCHS_HEADS,
2317 &pImage->LCHSGeometry.cHeads);
2318 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2319 pImage->LCHSGeometry.cHeads = 0;
2320 else if (RT_FAILURE(rc))
2321 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2322 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2323 VMDK_DDB_GEO_LCHS_SECTORS,
2324 &pImage->LCHSGeometry.cSectors);
2325 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2326 pImage->LCHSGeometry.cSectors = 0;
2327 else if (RT_FAILURE(rc))
2328 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2329 if ( pImage->LCHSGeometry.cCylinders == 0
2330 || pImage->LCHSGeometry.cHeads == 0
2331 || pImage->LCHSGeometry.cSectors == 0)
2332 {
2333 pImage->LCHSGeometry.cCylinders = 0;
2334 pImage->LCHSGeometry.cHeads = 0;
2335 pImage->LCHSGeometry.cSectors = 0;
2336 }
2337
2338 /* Get image UUID. */
2339 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2340 &pImage->ImageUuid);
2341 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2342 {
2343 /* Image without UUID. Probably created by VMware and not yet used
2344 * by VirtualBox. Can only be added for images opened in read/write
2345 * mode, so don't bother producing a sensible UUID otherwise. */
2346 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2347 RTUuidClear(&pImage->ImageUuid);
2348 else
2349 {
2350 rc = RTUuidCreate(&pImage->ImageUuid);
2351 if (RT_FAILURE(rc))
2352 return rc;
2353 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2354 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2355 if (RT_FAILURE(rc))
2356 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2357 }
2358 }
2359 else if (RT_FAILURE(rc))
2360 return rc;
2361
2362 /* Get image modification UUID. */
2363 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2364 VMDK_DDB_MODIFICATION_UUID,
2365 &pImage->ModificationUuid);
2366 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2367 {
2368 /* Image without UUID. Probably created by VMware and not yet used
2369 * by VirtualBox. Can only be added for images opened in read/write
2370 * mode, so don't bother producing a sensible UUID otherwise. */
2371 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2372 RTUuidClear(&pImage->ModificationUuid);
2373 else
2374 {
2375 rc = RTUuidCreate(&pImage->ModificationUuid);
2376 if (RT_FAILURE(rc))
2377 return rc;
2378 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2379 VMDK_DDB_MODIFICATION_UUID,
2380 &pImage->ModificationUuid);
2381 if (RT_FAILURE(rc))
2382 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2383 }
2384 }
2385 else if (RT_FAILURE(rc))
2386 return rc;
2387
2388 /* Get UUID of parent image. */
2389 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2390 &pImage->ParentUuid);
2391 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2392 {
2393 /* Image without UUID. Probably created by VMware and not yet used
2394 * by VirtualBox. Can only be added for images opened in read/write
2395 * mode, so don't bother producing a sensible UUID otherwise. */
2396 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2397 RTUuidClear(&pImage->ParentUuid);
2398 else
2399 {
2400 rc = RTUuidClear(&pImage->ParentUuid);
2401 if (RT_FAILURE(rc))
2402 return rc;
2403 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2404 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2405 if (RT_FAILURE(rc))
2406 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2407 }
2408 }
2409 else if (RT_FAILURE(rc))
2410 return rc;
2411
2412 /* Get parent image modification UUID. */
2413 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2414 VMDK_DDB_PARENT_MODIFICATION_UUID,
2415 &pImage->ParentModificationUuid);
2416 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2417 {
2418 /* Image without UUID. Probably created by VMware and not yet used
2419 * by VirtualBox. Can only be added for images opened in read/write
2420 * mode, so don't bother producing a sensible UUID otherwise. */
2421 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2422 RTUuidClear(&pImage->ParentModificationUuid);
2423 else
2424 {
2425 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2426 if (RT_FAILURE(rc))
2427 return rc;
2428 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2429 VMDK_DDB_PARENT_MODIFICATION_UUID,
2430 &pImage->ParentModificationUuid);
2431 if (RT_FAILURE(rc))
2432 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2433 }
2434 }
2435 else if (RT_FAILURE(rc))
2436 return rc;
2437
2438 return VINF_SUCCESS;
2439}
2440
2441/**
2442 * Internal: write/update the descriptor part of the image.
2443 */
2444static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2445{
2446 int rc = VINF_SUCCESS;
2447 uint64_t cbLimit;
2448 uint64_t uOffset;
2449 PVMDKFILE pDescFile;
2450
2451 if (pImage->pDescData)
2452 {
2453 /* Separate descriptor file. */
2454 uOffset = 0;
2455 cbLimit = 0;
2456 pDescFile = pImage->pFile;
2457 }
2458 else
2459 {
2460 /* Embedded descriptor file. */
2461 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2462 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2463 pDescFile = pImage->pExtents[0].pFile;
2464 }
2465 /* Bail out if there is no file to write to. */
2466 if (pDescFile == NULL)
2467 return VERR_INVALID_PARAMETER;
2468
2469 /*
2470 * Allocate temporary descriptor buffer.
2471 * In case there is no limit allocate a default
2472 * and increase if required.
2473 */
2474 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2475 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2476 unsigned offDescriptor = 0;
2477
2478 if (!pszDescriptor)
2479 return VERR_NO_MEMORY;
2480
2481 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2482 {
2483 const char *psz = pImage->Descriptor.aLines[i];
2484 size_t cb = strlen(psz);
2485
2486 /*
2487 * Increase the descriptor if there is no limit and
2488 * there is not enough room left for this line.
2489 */
2490 if (offDescriptor + cb + 1 > cbDescriptor)
2491 {
2492 if (cbLimit)
2493 {
2494 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2495 break;
2496 }
2497 else
2498 {
2499 char *pszDescriptorNew = NULL;
2500 LogFlow(("Increasing descriptor cache\n"));
2501
2502 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2503 if (!pszDescriptorNew)
2504 {
2505 rc = VERR_NO_MEMORY;
2506 break;
2507 }
2508 pszDescriptorNew = pszDescriptor;
2509 cbDescriptor += cb + 4 * _1K;
2510 }
2511 }
2512
2513 if (cb > 0)
2514 {
2515 memcpy(pszDescriptor + offDescriptor, psz, cb);
2516 offDescriptor += cb;
2517 }
2518
2519 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2520 offDescriptor++;
2521 }
2522
2523 if (RT_SUCCESS(rc))
2524 {
2525 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2526 if (RT_FAILURE(rc))
2527 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2528 }
2529
2530 if (RT_SUCCESS(rc) && !cbLimit)
2531 {
2532 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2533 if (RT_FAILURE(rc))
2534 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2535 }
2536
2537 if (RT_SUCCESS(rc))
2538 pImage->Descriptor.fDirty = false;
2539
2540 RTMemFree(pszDescriptor);
2541 return rc;
2542}
2543
2544/**
2545 * Internal: write/update the descriptor part of the image - async version.
2546 */
2547static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2548{
2549 int rc = VINF_SUCCESS;
2550 uint64_t cbLimit;
2551 uint64_t uOffset;
2552 PVMDKFILE pDescFile;
2553
2554 if (pImage->pDescData)
2555 {
2556 /* Separate descriptor file. */
2557 uOffset = 0;
2558 cbLimit = 0;
2559 pDescFile = pImage->pFile;
2560 }
2561 else
2562 {
2563 /* Embedded descriptor file. */
2564 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2565 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2566 pDescFile = pImage->pExtents[0].pFile;
2567 }
2568 /* Bail out if there is no file to write to. */
2569 if (pDescFile == NULL)
2570 return VERR_INVALID_PARAMETER;
2571
2572 /*
2573 * Allocate temporary descriptor buffer.
2574 * In case there is no limit allocate a default
2575 * and increase if required.
2576 */
2577 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2578 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2579 unsigned offDescriptor = 0;
2580
2581 if (!pszDescriptor)
2582 return VERR_NO_MEMORY;
2583
2584 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2585 {
2586 const char *psz = pImage->Descriptor.aLines[i];
2587 size_t cb = strlen(psz);
2588
2589 /*
2590 * Increase the descriptor if there is no limit and
2591 * there is not enough room left for this line.
2592 */
2593 if (offDescriptor + cb + 1 > cbDescriptor)
2594 {
2595 if (cbLimit)
2596 {
2597 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2598 break;
2599 }
2600 else
2601 {
2602 char *pszDescriptorNew = NULL;
2603 LogFlow(("Increasing descriptor cache\n"));
2604
2605 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2606 if (!pszDescriptorNew)
2607 {
2608 rc = VERR_NO_MEMORY;
2609 break;
2610 }
2611 pszDescriptorNew = pszDescriptor;
2612 cbDescriptor += cb + 4 * _1K;
2613 }
2614 }
2615
2616 if (cb > 0)
2617 {
2618 memcpy(pszDescriptor + offDescriptor, psz, cb);
2619 offDescriptor += cb;
2620 }
2621
2622 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2623 offDescriptor++;
2624 }
2625
2626 if (RT_SUCCESS(rc))
2627 {
2628 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2629 if (RT_FAILURE(rc))
2630 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2631 }
2632
2633 if (RT_SUCCESS(rc) && !cbLimit)
2634 {
2635 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2636 if (RT_FAILURE(rc))
2637 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2638 }
2639
2640 if (RT_SUCCESS(rc))
2641 pImage->Descriptor.fDirty = false;
2642
2643 RTMemFree(pszDescriptor);
2644 return rc;
2645}
2646
2647/**
2648 * Internal: validate the consistency check values in a binary header.
2649 */
2650static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2651{
2652 int rc = VINF_SUCCESS;
2653 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2654 {
2655 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2656 return rc;
2657 }
2658 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2659 {
2660 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2661 return rc;
2662 }
2663 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2664 && ( pHeader->singleEndLineChar != '\n'
2665 || pHeader->nonEndLineChar != ' '
2666 || pHeader->doubleEndLineChar1 != '\r'
2667 || pHeader->doubleEndLineChar2 != '\n') )
2668 {
2669 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2670 return rc;
2671 }
2672 return rc;
2673}
2674
2675/**
2676 * Internal: read metadata belonging to an extent with binary header, i.e.
2677 * as found in monolithic files.
2678 */
2679static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2680{
2681 SparseExtentHeader Header;
2682 uint64_t cSectorsPerGDE;
2683
2684 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2685 AssertRC(rc);
2686 if (RT_FAILURE(rc))
2687 {
2688 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2689 goto out;
2690 }
2691 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2692 if (RT_FAILURE(rc))
2693 goto out;
2694 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2695 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2696 {
2697 /* Read the footer, which isn't compressed and comes before the
2698 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2699 * VMware reality. Theory and practice have very little in common. */
2700 uint64_t cbSize;
2701 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2702 AssertRC(rc);
2703 if (RT_FAILURE(rc))
2704 {
2705 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2706 goto out;
2707 }
2708 cbSize = RT_ALIGN_64(cbSize, 512);
2709 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2710 AssertRC(rc);
2711 if (RT_FAILURE(rc))
2712 {
2713 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2714 goto out;
2715 }
2716 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2717 if (RT_FAILURE(rc))
2718 goto out;
2719 pExtent->fFooter = true;
2720 }
2721 pExtent->uVersion = RT_LE2H_U32(Header.version);
2722 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2723 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2724 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2725 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2726 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2727 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2728 {
2729 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2730 goto out;
2731 }
2732 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2733 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2734 {
2735 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2736 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2737 }
2738 else
2739 {
2740 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2741 pExtent->uSectorRGD = 0;
2742 }
2743 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2744 {
2745 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2746 goto out;
2747 }
2748 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2749 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2750 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2751 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2752 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2753 {
2754 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2755 goto out;
2756 }
2757 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2758 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2759
2760 /* Fix up the number of descriptor sectors, as some flat images have
2761 * really just one, and this causes failures when inserting the UUID
2762 * values and other extra information. */
2763 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2764 {
2765 /* Do it the easy way - just fix it for flat images which have no
2766 * other complicated metadata which needs space too. */
2767 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2768 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2769 pExtent->cDescriptorSectors = 4;
2770 }
2771
2772out:
2773 if (RT_FAILURE(rc))
2774 vmdkFreeExtentData(pImage, pExtent, false);
2775
2776 return rc;
2777}
2778
2779/**
2780 * Internal: read additional metadata belonging to an extent. For those
2781 * extents which have no additional metadata just verify the information.
2782 */
2783static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2784{
2785 int rc = VINF_SUCCESS;
2786 uint64_t cbExtentSize;
2787
2788 /* The image must be a multiple of a sector in size and contain the data
2789 * area (flat images only). If not, it means the image is at least
2790 * truncated, or even seriously garbled. */
2791 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2792 if (RT_FAILURE(rc))
2793 {
2794 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2795 goto out;
2796 }
2797/* disabled the size check again as there are too many too short vmdks out there */
2798#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2799 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2800 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2801 {
2802 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2803 goto out;
2804 }
2805#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2806 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2807 goto out;
2808
2809 /* The spec says that this must be a power of two and greater than 8,
2810 * but probably they meant not less than 8. */
2811 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2812 || pExtent->cSectorsPerGrain < 8)
2813 {
2814 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2815 goto out;
2816 }
2817
2818 /* This code requires that a grain table must hold a power of two multiple
2819 * of the number of entries per GT cache entry. */
2820 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2821 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2822 {
2823 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2824 goto out;
2825 }
2826
2827 rc = vmdkReadGrainDirectory(pExtent);
2828
2829out:
2830 if (RT_FAILURE(rc))
2831 vmdkFreeExtentData(pImage, pExtent, false);
2832
2833 return rc;
2834}
2835
2836/**
2837 * Internal: write/update the metadata for a sparse extent.
2838 */
2839static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2840{
2841 SparseExtentHeader Header;
2842
2843 memset(&Header, '\0', sizeof(Header));
2844 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2845 Header.version = RT_H2LE_U32(pExtent->uVersion);
2846 Header.flags = RT_H2LE_U32(RT_BIT(0));
2847 if (pExtent->pRGD)
2848 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2849 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2850 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2851 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2852 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2853 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2854 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2855 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2856 if (pExtent->fFooter && uOffset == 0)
2857 {
2858 if (pExtent->pRGD)
2859 {
2860 Assert(pExtent->uSectorRGD);
2861 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2862 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2863 }
2864 else
2865 {
2866 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2867 }
2868 }
2869 else
2870 {
2871 if (pExtent->pRGD)
2872 {
2873 Assert(pExtent->uSectorRGD);
2874 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2875 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2876 }
2877 else
2878 {
2879 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2880 }
2881 }
2882 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2883 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2884 Header.singleEndLineChar = '\n';
2885 Header.nonEndLineChar = ' ';
2886 Header.doubleEndLineChar1 = '\r';
2887 Header.doubleEndLineChar2 = '\n';
2888 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2889
2890 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2891 AssertRC(rc);
2892 if (RT_FAILURE(rc))
2893 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2894 return rc;
2895}
2896
2897#ifdef VBOX_WITH_VMDK_ESX
2898/**
2899 * Internal: unused code to read the metadata of a sparse ESX extent.
2900 *
2901 * Such extents never leave ESX server, so this isn't ever used.
2902 */
2903static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2904{
2905 COWDisk_Header Header;
2906 uint64_t cSectorsPerGDE;
2907
2908 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2909 AssertRC(rc);
2910 if (RT_FAILURE(rc))
2911 goto out;
2912 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2913 || RT_LE2H_U32(Header.version) != 1
2914 || RT_LE2H_U32(Header.flags) != 3)
2915 {
2916 rc = VERR_VD_VMDK_INVALID_HEADER;
2917 goto out;
2918 }
2919 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2920 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2921 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2922 /* The spec says that this must be between 1 sector and 1MB. This code
2923 * assumes it's a power of two, so check that requirement, too. */
2924 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2925 || pExtent->cSectorsPerGrain == 0
2926 || pExtent->cSectorsPerGrain > 2048)
2927 {
2928 rc = VERR_VD_VMDK_INVALID_HEADER;
2929 goto out;
2930 }
2931 pExtent->uDescriptorSector = 0;
2932 pExtent->cDescriptorSectors = 0;
2933 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2934 pExtent->uSectorRGD = 0;
2935 pExtent->cOverheadSectors = 0;
2936 pExtent->cGTEntries = 4096;
2937 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2938 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2939 {
2940 rc = VERR_VD_VMDK_INVALID_HEADER;
2941 goto out;
2942 }
2943 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2944 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2945 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2946 {
2947 /* Inconsistency detected. Computed number of GD entries doesn't match
2948 * stored value. Better be safe than sorry. */
2949 rc = VERR_VD_VMDK_INVALID_HEADER;
2950 goto out;
2951 }
2952 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2953 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2954
2955 rc = vmdkReadGrainDirectory(pExtent);
2956
2957out:
2958 if (RT_FAILURE(rc))
2959 vmdkFreeExtentData(pImage, pExtent, false);
2960
2961 return rc;
2962}
2963#endif /* VBOX_WITH_VMDK_ESX */
2964
2965/**
2966 * Internal: free the memory used by the extent data structure, optionally
2967 * deleting the referenced files.
2968 */
2969static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2970 bool fDelete)
2971{
2972 vmdkFreeGrainDirectory(pExtent);
2973 if (pExtent->pDescData)
2974 {
2975 RTMemFree(pExtent->pDescData);
2976 pExtent->pDescData = NULL;
2977 }
2978 if (pExtent->pFile != NULL)
2979 {
2980 /* Do not delete raw extents, these have full and base names equal. */
2981 vmdkFileClose(pImage, &pExtent->pFile,
2982 fDelete
2983 && pExtent->pszFullname
2984 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2985 }
2986 if (pExtent->pszBasename)
2987 {
2988 RTMemTmpFree((void *)pExtent->pszBasename);
2989 pExtent->pszBasename = NULL;
2990 }
2991 if (pExtent->pszFullname)
2992 {
2993 RTStrFree((char *)(void *)pExtent->pszFullname);
2994 pExtent->pszFullname = NULL;
2995 }
2996 if (pExtent->pvGrain)
2997 {
2998 RTMemFree(pExtent->pvGrain);
2999 pExtent->pvGrain = NULL;
3000 }
3001}
3002
3003/**
3004 * Internal: allocate grain table cache if necessary for this image.
3005 */
3006static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3007{
3008 PVMDKEXTENT pExtent;
3009
3010 /* Allocate grain table cache if any sparse extent is present. */
3011 for (unsigned i = 0; i < pImage->cExtents; i++)
3012 {
3013 pExtent = &pImage->pExtents[i];
3014 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3015#ifdef VBOX_WITH_VMDK_ESX
3016 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3017#endif /* VBOX_WITH_VMDK_ESX */
3018 )
3019 {
3020 /* Allocate grain table cache. */
3021 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3022 if (!pImage->pGTCache)
3023 return VERR_NO_MEMORY;
3024 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3025 {
3026 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3027 pGCE->uExtent = UINT32_MAX;
3028 }
3029 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3030 break;
3031 }
3032 }
3033
3034 return VINF_SUCCESS;
3035}
3036
3037/**
3038 * Internal: allocate the given number of extents.
3039 */
3040static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3041{
3042 int rc = VINF_SUCCESS;
3043 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3044 if (pImage)
3045 {
3046 for (unsigned i = 0; i < cExtents; i++)
3047 {
3048 pExtents[i].pFile = NULL;
3049 pExtents[i].pszBasename = NULL;
3050 pExtents[i].pszFullname = NULL;
3051 pExtents[i].pGD = NULL;
3052 pExtents[i].pRGD = NULL;
3053 pExtents[i].pDescData = NULL;
3054 pExtents[i].uVersion = 1;
3055 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3056 pExtents[i].uExtent = i;
3057 pExtents[i].pImage = pImage;
3058 }
3059 pImage->pExtents = pExtents;
3060 pImage->cExtents = cExtents;
3061 }
3062 else
3063 rc = VERR_NO_MEMORY;
3064
3065 return rc;
3066}
3067
3068/**
3069 * Internal: Open an image, constructing all necessary data structures.
3070 */
3071static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3072{
3073 int rc;
3074 uint32_t u32Magic;
3075 PVMDKFILE pFile;
3076 PVMDKEXTENT pExtent;
3077
3078 pImage->uOpenFlags = uOpenFlags;
3079
3080 /* Try to get error interface. */
3081 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3082 if (pImage->pInterfaceError)
3083 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3084
3085 /* Try to get async I/O interface. */
3086 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
3087 if (pImage->pInterfaceIO)
3088 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
3089
3090 /*
3091 * Open the image.
3092 * We don't have to check for asynchronous access because
3093 * we only support raw access and the opened file is a description
3094 * file were no data is stored.
3095 */
3096 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3097 uOpenFlags & VD_OPEN_FLAGS_READONLY
3098 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3099 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3100 if (RT_FAILURE(rc))
3101 {
3102 /* Do NOT signal an appropriate error here, as the VD layer has the
3103 * choice of retrying the open if it failed. */
3104 goto out;
3105 }
3106 pImage->pFile = pFile;
3107
3108 /* Read magic (if present). */
3109 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3110 if (RT_FAILURE(rc))
3111 {
3112 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3113 goto out;
3114 }
3115
3116 /* Handle the file according to its magic number. */
3117 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3118 {
3119 /* Async I/IO is not supported with these files yet. So fail if opened in async I/O mode. */
3120 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3121 {
3122 rc = VERR_NOT_SUPPORTED;
3123 goto out;
3124 }
3125
3126 /* It's a hosted single-extent image. */
3127 rc = vmdkCreateExtents(pImage, 1);
3128 if (RT_FAILURE(rc))
3129 goto out;
3130 /* The opened file is passed to the extent. No separate descriptor
3131 * file, so no need to keep anything open for the image. */
3132 pExtent = &pImage->pExtents[0];
3133 pExtent->pFile = pFile;
3134 pImage->pFile = NULL;
3135 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3136 if (!pExtent->pszFullname)
3137 {
3138 rc = VERR_NO_MEMORY;
3139 goto out;
3140 }
3141 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3142 if (RT_FAILURE(rc))
3143 goto out;
3144
3145 /* As we're dealing with a monolithic image here, there must
3146 * be a descriptor embedded in the image file. */
3147 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3148 {
3149 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3150 goto out;
3151 }
3152 /* HACK: extend the descriptor if it is unusually small and it fits in
3153 * the unused space after the image header. Allows opening VMDK files
3154 * with extremely small descriptor in read/write mode. */
3155 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3156 && pExtent->cDescriptorSectors < 3
3157 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3158 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3159 {
3160 pExtent->cDescriptorSectors = 4;
3161 pExtent->fMetaDirty = true;
3162 }
3163 /* Read the descriptor from the extent. */
3164 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3165 if (!pExtent->pDescData)
3166 {
3167 rc = VERR_NO_MEMORY;
3168 goto out;
3169 }
3170 rc = vmdkFileReadAt(pExtent->pFile,
3171 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3172 pExtent->pDescData,
3173 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3174 AssertRC(rc);
3175 if (RT_FAILURE(rc))
3176 {
3177 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3178 goto out;
3179 }
3180
3181 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3182 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3183 if (RT_FAILURE(rc))
3184 goto out;
3185
3186 rc = vmdkReadMetaExtent(pImage, pExtent);
3187 if (RT_FAILURE(rc))
3188 goto out;
3189
3190 /* Mark the extent as unclean if opened in read-write mode. */
3191 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3192 {
3193 pExtent->fUncleanShutdown = true;
3194 pExtent->fMetaDirty = true;
3195 }
3196 }
3197 else
3198 {
3199 /* Allocate at least 10K, and make sure that there is 5K free space
3200 * in case new entries need to be added to the descriptor. Never
3201 * alocate more than 128K, because that's no valid descriptor file
3202 * and will result in the correct "truncated read" error handling. */
3203 uint64_t cbFileSize;
3204 rc = vmdkFileGetSize(pFile, &cbFileSize);
3205 if (RT_FAILURE(rc))
3206 goto out;
3207
3208 uint64_t cbSize = cbFileSize;
3209 if (cbSize % VMDK_SECTOR2BYTE(10))
3210 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3211 else
3212 cbSize += VMDK_SECTOR2BYTE(10);
3213 cbSize = RT_MIN(cbSize, _128K);
3214 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3215 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3216 if (!pImage->pDescData)
3217 {
3218 rc = VERR_NO_MEMORY;
3219 goto out;
3220 }
3221
3222 size_t cbRead;
3223 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3224 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3225 &cbRead);
3226 if (RT_FAILURE(rc))
3227 {
3228 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3229 goto out;
3230 }
3231 if (cbRead == pImage->cbDescAlloc)
3232 {
3233 /* Likely the read is truncated. Better fail a bit too early
3234 * (normally the descriptor is much smaller than our buffer). */
3235 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3236 goto out;
3237 }
3238
3239 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3240 pImage->cbDescAlloc);
3241 if (RT_FAILURE(rc))
3242 goto out;
3243
3244 /*
3245 * We have to check for the asynchronous open flag. The
3246 * extents are parsed and the type of all are known now.
3247 * Check if every extent is either FLAT or ZERO.
3248 */
3249 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3250 {
3251 unsigned cFlatExtents = 0;
3252
3253 for (unsigned i = 0; i < pImage->cExtents; i++)
3254 {
3255 pExtent = &pImage->pExtents[i];
3256
3257 if (( pExtent->enmType != VMDKETYPE_FLAT
3258 && pExtent->enmType != VMDKETYPE_ZERO
3259 && pExtent->enmType != VMDKETYPE_VMFS)
3260 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3261 {
3262 /*
3263 * Opened image contains at least one none flat or zero extent.
3264 * Return error but don't set error message as the caller
3265 * has the chance to open in non async I/O mode.
3266 */
3267 rc = VERR_NOT_SUPPORTED;
3268 goto out;
3269 }
3270 if (pExtent->enmType == VMDKETYPE_FLAT)
3271 cFlatExtents++;
3272 }
3273 }
3274
3275 for (unsigned i = 0; i < pImage->cExtents; i++)
3276 {
3277 pExtent = &pImage->pExtents[i];
3278
3279 if (pExtent->pszBasename)
3280 {
3281 /* Hack to figure out whether the specified name in the
3282 * extent descriptor is absolute. Doesn't always work, but
3283 * should be good enough for now. */
3284 char *pszFullname;
3285 /** @todo implement proper path absolute check. */
3286 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3287 {
3288 pszFullname = RTStrDup(pExtent->pszBasename);
3289 if (!pszFullname)
3290 {
3291 rc = VERR_NO_MEMORY;
3292 goto out;
3293 }
3294 }
3295 else
3296 {
3297 size_t cbDirname;
3298 char *pszDirname = RTStrDup(pImage->pszFilename);
3299 if (!pszDirname)
3300 {
3301 rc = VERR_NO_MEMORY;
3302 goto out;
3303 }
3304 RTPathStripFilename(pszDirname);
3305 cbDirname = strlen(pszDirname);
3306 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3307 RTPATH_SLASH, pExtent->pszBasename);
3308 RTStrFree(pszDirname);
3309 if (RT_FAILURE(rc))
3310 goto out;
3311 }
3312 pExtent->pszFullname = pszFullname;
3313 }
3314 else
3315 pExtent->pszFullname = NULL;
3316
3317 switch (pExtent->enmType)
3318 {
3319 case VMDKETYPE_HOSTED_SPARSE:
3320 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3321 uOpenFlags & VD_OPEN_FLAGS_READONLY
3322 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3323 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3324 if (RT_FAILURE(rc))
3325 {
3326 /* Do NOT signal an appropriate error here, as the VD
3327 * layer has the choice of retrying the open if it
3328 * failed. */
3329 goto out;
3330 }
3331 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3332 if (RT_FAILURE(rc))
3333 goto out;
3334 rc = vmdkReadMetaExtent(pImage, pExtent);
3335 if (RT_FAILURE(rc))
3336 goto out;
3337
3338 /* Mark extent as unclean if opened in read-write mode. */
3339 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3340 {
3341 pExtent->fUncleanShutdown = true;
3342 pExtent->fMetaDirty = true;
3343 }
3344 break;
3345 case VMDKETYPE_VMFS:
3346 case VMDKETYPE_FLAT:
3347 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3348 uOpenFlags & VD_OPEN_FLAGS_READONLY
3349 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3350 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3351 if (RT_FAILURE(rc))
3352 {
3353 /* Do NOT signal an appropriate error here, as the VD
3354 * layer has the choice of retrying the open if it
3355 * failed. */
3356 goto out;
3357 }
3358 break;
3359 case VMDKETYPE_ZERO:
3360 /* Nothing to do. */
3361 break;
3362 default:
3363 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3364 }
3365 }
3366 }
3367
3368 /* Make sure this is not reached accidentally with an error status. */
3369 AssertRC(rc);
3370
3371 /* Determine PCHS geometry if not set. */
3372 if (pImage->PCHSGeometry.cCylinders == 0)
3373 {
3374 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3375 / pImage->PCHSGeometry.cHeads
3376 / pImage->PCHSGeometry.cSectors;
3377 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3378 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3379 {
3380 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3381 AssertRC(rc);
3382 }
3383 }
3384
3385 /* Update the image metadata now in case has changed. */
3386 rc = vmdkFlushImage(pImage);
3387 if (RT_FAILURE(rc))
3388 goto out;
3389
3390 /* Figure out a few per-image constants from the extents. */
3391 pImage->cbSize = 0;
3392 for (unsigned i = 0; i < pImage->cExtents; i++)
3393 {
3394 pExtent = &pImage->pExtents[i];
3395 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3396#ifdef VBOX_WITH_VMDK_ESX
3397 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3398#endif /* VBOX_WITH_VMDK_ESX */
3399 )
3400 {
3401 /* Here used to be a check whether the nominal size of an extent
3402 * is a multiple of the grain size. The spec says that this is
3403 * always the case, but unfortunately some files out there in the
3404 * wild violate the spec (e.g. ReactOS 0.3.1). */
3405 }
3406 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3407 }
3408
3409 for (unsigned i = 0; i < pImage->cExtents; i++)
3410 {
3411 pExtent = &pImage->pExtents[i];
3412 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3413 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3414 {
3415 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3416 break;
3417 }
3418 }
3419
3420 rc = vmdkAllocateGrainTableCache(pImage);
3421 if (RT_FAILURE(rc))
3422 goto out;
3423
3424out:
3425 if (RT_FAILURE(rc))
3426 vmdkFreeImage(pImage, false);
3427 return rc;
3428}
3429
3430/**
3431 * Internal: create VMDK images for raw disk/partition access.
3432 */
3433static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3434 uint64_t cbSize)
3435{
3436 int rc = VINF_SUCCESS;
3437 PVMDKEXTENT pExtent;
3438
3439 if (pRaw->fRawDisk)
3440 {
3441 /* Full raw disk access. This requires setting up a descriptor
3442 * file and open the (flat) raw disk. */
3443 rc = vmdkCreateExtents(pImage, 1);
3444 if (RT_FAILURE(rc))
3445 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3446 pExtent = &pImage->pExtents[0];
3447 /* Create raw disk descriptor file. */
3448 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3449 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3450 false);
3451 if (RT_FAILURE(rc))
3452 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3453
3454 /* Set up basename for extent description. Cannot use StrDup. */
3455 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3456 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3457 if (!pszBasename)
3458 return VERR_NO_MEMORY;
3459 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3460 pExtent->pszBasename = pszBasename;
3461 /* For raw disks the full name is identical to the base name. */
3462 pExtent->pszFullname = RTStrDup(pszBasename);
3463 if (!pExtent->pszFullname)
3464 return VERR_NO_MEMORY;
3465 pExtent->enmType = VMDKETYPE_FLAT;
3466 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3467 pExtent->uSectorOffset = 0;
3468 pExtent->enmAccess = VMDKACCESS_READWRITE;
3469 pExtent->fMetaDirty = false;
3470
3471 /* Open flat image, the raw disk. */
3472 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3473 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3474 if (RT_FAILURE(rc))
3475 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3476 }
3477 else
3478 {
3479 /* Raw partition access. This requires setting up a descriptor
3480 * file, write the partition information to a flat extent and
3481 * open all the (flat) raw disk partitions. */
3482
3483 /* First pass over the partitions to determine how many
3484 * extents we need. One partition can require up to 4 extents.
3485 * One to skip over unpartitioned space, one for the
3486 * partitioning data, one to skip over unpartitioned space
3487 * and one for the partition data. */
3488 unsigned cExtents = 0;
3489 uint64_t uStart = 0;
3490 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3491 {
3492 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3493 if (pPart->cbPartitionData)
3494 {
3495 if (uStart > pPart->uPartitionDataStart)
3496 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
3497 else if (uStart != pPart->uPartitionDataStart)
3498 cExtents++;
3499 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3500 cExtents++;
3501 }
3502 if (pPart->cbPartition)
3503 {
3504 if (uStart > pPart->uPartitionStart)
3505 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
3506 else if (uStart != pPart->uPartitionStart)
3507 cExtents++;
3508 uStart = pPart->uPartitionStart + pPart->cbPartition;
3509 cExtents++;
3510 }
3511 }
3512 /* Another extent for filling up the rest of the image. */
3513 if (uStart != cbSize)
3514 cExtents++;
3515
3516 rc = vmdkCreateExtents(pImage, cExtents);
3517 if (RT_FAILURE(rc))
3518 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3519
3520 /* Create raw partition descriptor file. */
3521 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3522 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3523 false);
3524 if (RT_FAILURE(rc))
3525 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3526
3527 /* Create base filename for the partition table extent. */
3528 /** @todo remove fixed buffer without creating memory leaks. */
3529 char pszPartition[1024];
3530 const char *pszBase = RTPathFilename(pImage->pszFilename);
3531 const char *pszExt = RTPathExt(pszBase);
3532 if (pszExt == NULL)
3533 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3534 char *pszBaseBase = RTStrDup(pszBase);
3535 if (!pszBaseBase)
3536 return VERR_NO_MEMORY;
3537 RTPathStripExt(pszBaseBase);
3538 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3539 pszBaseBase, pszExt);
3540 RTStrFree(pszBaseBase);
3541
3542 /* Second pass over the partitions, now define all extents. */
3543 uint64_t uPartOffset = 0;
3544 cExtents = 0;
3545 uStart = 0;
3546 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3547 {
3548 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3549 if (pPart->cbPartitionData)
3550 {
3551 if (uStart != pPart->uPartitionDataStart)
3552 {
3553 pExtent = &pImage->pExtents[cExtents++];
3554 pExtent->pszBasename = NULL;
3555 pExtent->pszFullname = NULL;
3556 pExtent->enmType = VMDKETYPE_ZERO;
3557 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionDataStart - uStart);
3558 pExtent->uSectorOffset = 0;
3559 pExtent->enmAccess = VMDKACCESS_READWRITE;
3560 pExtent->fMetaDirty = false;
3561 }
3562 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3563 pExtent = &pImage->pExtents[cExtents++];
3564 /* Set up basename for extent description. Can't use StrDup. */
3565 size_t cbBasename = strlen(pszPartition) + 1;
3566 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3567 if (!pszBasename)
3568 return VERR_NO_MEMORY;
3569 memcpy(pszBasename, pszPartition, cbBasename);
3570 pExtent->pszBasename = pszBasename;
3571
3572 /* Set up full name for partition extent. */
3573 size_t cbDirname;
3574 char *pszDirname = RTStrDup(pImage->pszFilename);
3575 if (!pszDirname)
3576 return VERR_NO_MEMORY;
3577 RTPathStripFilename(pszDirname);
3578 cbDirname = strlen(pszDirname);
3579 char *pszFullname;
3580 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3581 RTPATH_SLASH, pExtent->pszBasename);
3582 RTStrFree(pszDirname);
3583 if (RT_FAILURE(rc))
3584 return rc;
3585 pExtent->pszFullname = pszFullname;
3586 pExtent->enmType = VMDKETYPE_FLAT;
3587 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3588 pExtent->uSectorOffset = uPartOffset;
3589 pExtent->enmAccess = VMDKACCESS_READWRITE;
3590 pExtent->fMetaDirty = false;
3591
3592 /* Create partition table flat image. */
3593 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3594 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3595 false);
3596 if (RT_FAILURE(rc))
3597 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3598 rc = vmdkFileWriteAt(pExtent->pFile,
3599 VMDK_SECTOR2BYTE(uPartOffset),
3600 pPart->pvPartitionData,
3601 pPart->cbPartitionData, NULL);
3602 if (RT_FAILURE(rc))
3603 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3604 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3605 }
3606 if (pPart->cbPartition)
3607 {
3608 if (uStart != pPart->uPartitionStart)
3609 {
3610 pExtent = &pImage->pExtents[cExtents++];
3611 pExtent->pszBasename = NULL;
3612 pExtent->pszFullname = NULL;
3613 pExtent->enmType = VMDKETYPE_ZERO;
3614 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionStart - uStart);
3615 pExtent->uSectorOffset = 0;
3616 pExtent->enmAccess = VMDKACCESS_READWRITE;
3617 pExtent->fMetaDirty = false;
3618 }
3619 uStart = pPart->uPartitionStart + pPart->cbPartition;
3620 pExtent = &pImage->pExtents[cExtents++];
3621 if (pPart->pszRawDevice)
3622 {
3623 /* Set up basename for extent descr. Can't use StrDup. */
3624 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3625 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3626 if (!pszBasename)
3627 return VERR_NO_MEMORY;
3628 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3629 pExtent->pszBasename = pszBasename;
3630 /* For raw disks full name is identical to base name. */
3631 pExtent->pszFullname = RTStrDup(pszBasename);
3632 if (!pExtent->pszFullname)
3633 return VERR_NO_MEMORY;
3634 pExtent->enmType = VMDKETYPE_FLAT;
3635 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3636 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uPartitionStartOffset);
3637 pExtent->enmAccess = VMDKACCESS_READWRITE;
3638 pExtent->fMetaDirty = false;
3639
3640 /* Open flat image, the raw partition. */
3641 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3642 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3643 false);
3644 if (RT_FAILURE(rc))
3645 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3646 }
3647 else
3648 {
3649 pExtent->pszBasename = NULL;
3650 pExtent->pszFullname = NULL;
3651 pExtent->enmType = VMDKETYPE_ZERO;
3652 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3653 pExtent->uSectorOffset = 0;
3654 pExtent->enmAccess = VMDKACCESS_READWRITE;
3655 pExtent->fMetaDirty = false;
3656 }
3657 }
3658 }
3659 /* Another extent for filling up the rest of the image. */
3660 if (uStart != cbSize)
3661 {
3662 pExtent = &pImage->pExtents[cExtents++];
3663 pExtent->pszBasename = NULL;
3664 pExtent->pszFullname = NULL;
3665 pExtent->enmType = VMDKETYPE_ZERO;
3666 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3667 pExtent->uSectorOffset = 0;
3668 pExtent->enmAccess = VMDKACCESS_READWRITE;
3669 pExtent->fMetaDirty = false;
3670 }
3671 }
3672
3673 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3674 pRaw->fRawDisk ?
3675 "fullDevice" : "partitionedDevice");
3676 if (RT_FAILURE(rc))
3677 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3678 return rc;
3679}
3680
3681/**
3682 * Internal: create a regular (i.e. file-backed) VMDK image.
3683 */
3684static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3685 unsigned uImageFlags,
3686 PFNVDPROGRESS pfnProgress, void *pvUser,
3687 unsigned uPercentStart, unsigned uPercentSpan)
3688{
3689 int rc = VINF_SUCCESS;
3690 unsigned cExtents = 1;
3691 uint64_t cbOffset = 0;
3692 uint64_t cbRemaining = cbSize;
3693
3694 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3695 {
3696 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3697 /* Do proper extent computation: need one smaller extent if the total
3698 * size isn't evenly divisible by the split size. */
3699 if (cbSize % VMDK_2G_SPLIT_SIZE)
3700 cExtents++;
3701 }
3702 rc = vmdkCreateExtents(pImage, cExtents);
3703 if (RT_FAILURE(rc))
3704 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3705
3706 /* Basename strings needed for constructing the extent names. */
3707 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3708 AssertPtr(pszBasenameSubstr);
3709 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3710
3711 /* Create searate descriptor file if necessary. */
3712 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3713 {
3714 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3715 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3716 false);
3717 if (RT_FAILURE(rc))
3718 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3719 }
3720 else
3721 pImage->pFile = NULL;
3722
3723 /* Set up all extents. */
3724 for (unsigned i = 0; i < cExtents; i++)
3725 {
3726 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3727 uint64_t cbExtent = cbRemaining;
3728
3729 /* Set up fullname/basename for extent description. Cannot use StrDup
3730 * for basename, as it is not guaranteed that the memory can be freed
3731 * with RTMemTmpFree, which must be used as in other code paths
3732 * StrDup is not usable. */
3733 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3734 {
3735 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3736 if (!pszBasename)
3737 return VERR_NO_MEMORY;
3738 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3739 pExtent->pszBasename = pszBasename;
3740 }
3741 else
3742 {
3743 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3744 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3745 RTPathStripExt(pszBasenameBase);
3746 char *pszTmp;
3747 size_t cbTmp;
3748 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3749 {
3750 if (cExtents == 1)
3751 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3752 pszBasenameExt);
3753 else
3754 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3755 i+1, pszBasenameExt);
3756 }
3757 else
3758 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3759 pszBasenameExt);
3760 RTStrFree(pszBasenameBase);
3761 if (RT_FAILURE(rc))
3762 return rc;
3763 cbTmp = strlen(pszTmp) + 1;
3764 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3765 if (!pszBasename)
3766 return VERR_NO_MEMORY;
3767 memcpy(pszBasename, pszTmp, cbTmp);
3768 RTStrFree(pszTmp);
3769 pExtent->pszBasename = pszBasename;
3770 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3771 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3772 }
3773 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3774 RTPathStripFilename(pszBasedirectory);
3775 char *pszFullname;
3776 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3777 RTPATH_SLASH, pExtent->pszBasename);
3778 RTStrFree(pszBasedirectory);
3779 if (RT_FAILURE(rc))
3780 return rc;
3781 pExtent->pszFullname = pszFullname;
3782
3783 /* Create file for extent. */
3784 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3785 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3786 false);
3787 if (RT_FAILURE(rc))
3788 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3789 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3790 {
3791 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3792 if (RT_FAILURE(rc))
3793 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3794
3795 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3796 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3797 * file and the guest could complain about an ATA timeout. */
3798
3799 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3800 * Currently supported file systems are ext4 and ocfs2. */
3801
3802 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3803 const size_t cbBuf = 128 * _1K;
3804 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3805 if (!pvBuf)
3806 return VERR_NO_MEMORY;
3807
3808 uint64_t uOff = 0;
3809 /* Write data to all image blocks. */
3810 while (uOff < cbExtent)
3811 {
3812 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3813
3814 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3815 if (RT_FAILURE(rc))
3816 {
3817 RTMemFree(pvBuf);
3818 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3819 }
3820
3821 uOff += cbChunk;
3822
3823 if (pfnProgress)
3824 {
3825 rc = pfnProgress(pvUser,
3826 uPercentStart + uOff * uPercentSpan / cbExtent);
3827 if (RT_FAILURE(rc))
3828 {
3829 RTMemFree(pvBuf);
3830 return rc;
3831 }
3832 }
3833 }
3834 RTMemTmpFree(pvBuf);
3835 }
3836
3837 /* Place descriptor file information (where integrated). */
3838 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3839 {
3840 pExtent->uDescriptorSector = 1;
3841 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3842 /* The descriptor is part of the (only) extent. */
3843 pExtent->pDescData = pImage->pDescData;
3844 pImage->pDescData = NULL;
3845 }
3846
3847 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3848 {
3849 uint64_t cSectorsPerGDE, cSectorsPerGD;
3850 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3851 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3852 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3853 pExtent->cGTEntries = 512;
3854 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3855 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3856 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3857 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3858 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3859 {
3860 /* The spec says version is 1 for all VMDKs, but the vast
3861 * majority of streamOptimized VMDKs actually contain
3862 * version 3 - so go with the majority. Both are acepted. */
3863 pExtent->uVersion = 3;
3864 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3865 }
3866 }
3867 else
3868 {
3869 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3870 pExtent->enmType = VMDKETYPE_VMFS;
3871 else
3872 pExtent->enmType = VMDKETYPE_FLAT;
3873 }
3874
3875 pExtent->enmAccess = VMDKACCESS_READWRITE;
3876 pExtent->fUncleanShutdown = true;
3877 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3878 pExtent->uSectorOffset = 0;
3879 pExtent->fMetaDirty = true;
3880
3881 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3882 {
3883 rc = vmdkCreateGrainDirectory(pExtent,
3884 RT_MAX( pExtent->uDescriptorSector
3885 + pExtent->cDescriptorSectors,
3886 1),
3887 true);
3888 if (RT_FAILURE(rc))
3889 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3890 }
3891
3892 if (RT_SUCCESS(rc) && pfnProgress)
3893 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
3894
3895 cbRemaining -= cbExtent;
3896 cbOffset += cbExtent;
3897 }
3898
3899 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3900 {
3901 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3902 * controller type is set in an image. */
3903 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3904 if (RT_FAILURE(rc))
3905 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3906 }
3907
3908 const char *pszDescType = NULL;
3909 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3910 {
3911 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3912 pszDescType = "vmfs";
3913 else
3914 pszDescType = (cExtents == 1)
3915 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3916 }
3917 else
3918 {
3919 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3920 pszDescType = "streamOptimized";
3921 else
3922 {
3923 pszDescType = (cExtents == 1)
3924 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3925 }
3926 }
3927 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3928 pszDescType);
3929 if (RT_FAILURE(rc))
3930 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3931 return rc;
3932}
3933
3934/**
3935 * Internal: The actual code for creating any VMDK variant currently in
3936 * existence on hosted environments.
3937 */
3938static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3939 unsigned uImageFlags, const char *pszComment,
3940 PCPDMMEDIAGEOMETRY pPCHSGeometry,
3941 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3942 PFNVDPROGRESS pfnProgress, void *pvUser,
3943 unsigned uPercentStart, unsigned uPercentSpan)
3944{
3945 int rc;
3946
3947 pImage->uImageFlags = uImageFlags;
3948
3949 /* Try to get error interface. */
3950 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3951 if (pImage->pInterfaceError)
3952 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3953
3954 /* Try to get async I/O interface. */
3955 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IO);
3956 if (pImage->pInterfaceIO)
3957 pImage->pInterfaceIOCallbacks = VDGetInterfaceIO(pImage->pInterfaceIO);
3958
3959 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3960 &pImage->Descriptor);
3961 if (RT_FAILURE(rc))
3962 {
3963 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
3964 goto out;
3965 }
3966
3967 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3968 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3969 {
3970 /* Raw disk image (includes raw partition). */
3971 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3972 /* As the comment is misused, zap it so that no garbage comment
3973 * is set below. */
3974 pszComment = NULL;
3975 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3976 }
3977 else
3978 {
3979 /* Regular fixed or sparse image (monolithic or split). */
3980 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3981 pfnProgress, pvUser, uPercentStart,
3982 uPercentSpan * 95 / 100);
3983 }
3984
3985 if (RT_FAILURE(rc))
3986 goto out;
3987
3988 if (RT_SUCCESS(rc) && pfnProgress)
3989 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
3990
3991 pImage->cbSize = cbSize;
3992
3993 for (unsigned i = 0; i < pImage->cExtents; i++)
3994 {
3995 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3996
3997 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3998 pExtent->cNominalSectors, pExtent->enmType,
3999 pExtent->pszBasename, pExtent->uSectorOffset);
4000 if (RT_FAILURE(rc))
4001 {
4002 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4003 goto out;
4004 }
4005 }
4006 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4007
4008 if ( pPCHSGeometry->cCylinders != 0
4009 && pPCHSGeometry->cHeads != 0
4010 && pPCHSGeometry->cSectors != 0)
4011 {
4012 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4013 if (RT_FAILURE(rc))
4014 goto out;
4015 }
4016 if ( pLCHSGeometry->cCylinders != 0
4017 && pLCHSGeometry->cHeads != 0
4018 && pLCHSGeometry->cSectors != 0)
4019 {
4020 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4021 if (RT_FAILURE(rc))
4022 goto out;
4023 }
4024
4025 pImage->LCHSGeometry = *pLCHSGeometry;
4026 pImage->PCHSGeometry = *pPCHSGeometry;
4027
4028 pImage->ImageUuid = *pUuid;
4029 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4030 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4031 if (RT_FAILURE(rc))
4032 {
4033 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4034 goto out;
4035 }
4036 RTUuidClear(&pImage->ParentUuid);
4037 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4038 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4039 if (RT_FAILURE(rc))
4040 {
4041 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4042 goto out;
4043 }
4044 RTUuidClear(&pImage->ModificationUuid);
4045 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4046 VMDK_DDB_MODIFICATION_UUID,
4047 &pImage->ModificationUuid);
4048 if (RT_FAILURE(rc))
4049 {
4050 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4051 goto out;
4052 }
4053 RTUuidClear(&pImage->ParentModificationUuid);
4054 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4055 VMDK_DDB_PARENT_MODIFICATION_UUID,
4056 &pImage->ParentModificationUuid);
4057 if (RT_FAILURE(rc))
4058 {
4059 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4060 goto out;
4061 }
4062
4063 rc = vmdkAllocateGrainTableCache(pImage);
4064 if (RT_FAILURE(rc))
4065 goto out;
4066
4067 rc = vmdkSetImageComment(pImage, pszComment);
4068 if (RT_FAILURE(rc))
4069 {
4070 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4071 goto out;
4072 }
4073
4074 if (RT_SUCCESS(rc) && pfnProgress)
4075 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4076
4077 rc = vmdkFlushImage(pImage);
4078
4079out:
4080 if (RT_SUCCESS(rc) && pfnProgress)
4081 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4082
4083 if (RT_FAILURE(rc))
4084 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4085 return rc;
4086}
4087
4088/**
4089 * Internal: Update image comment.
4090 */
4091static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4092{
4093 char *pszCommentEncoded;
4094 if (pszComment)
4095 {
4096 pszCommentEncoded = vmdkEncodeString(pszComment);
4097 if (!pszCommentEncoded)
4098 return VERR_NO_MEMORY;
4099 }
4100 else
4101 pszCommentEncoded = NULL;
4102 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4103 "ddb.comment", pszCommentEncoded);
4104 if (pszComment)
4105 RTStrFree(pszCommentEncoded);
4106 if (RT_FAILURE(rc))
4107 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4108 return VINF_SUCCESS;
4109}
4110
4111/**
4112 * Internal. Free all allocated space for representing an image, and optionally
4113 * delete the image from disk.
4114 */
4115static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4116{
4117 AssertPtr(pImage);
4118
4119 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4120 {
4121 /* Mark all extents as clean. */
4122 for (unsigned i = 0; i < pImage->cExtents; i++)
4123 {
4124 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4125#ifdef VBOX_WITH_VMDK_ESX
4126 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4127#endif /* VBOX_WITH_VMDK_ESX */
4128 )
4129 && pImage->pExtents[i].fUncleanShutdown)
4130 {
4131 pImage->pExtents[i].fUncleanShutdown = false;
4132 pImage->pExtents[i].fMetaDirty = true;
4133 }
4134 }
4135 }
4136 (void)vmdkFlushImage(pImage);
4137
4138 if (pImage->pExtents != NULL)
4139 {
4140 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4141 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4142 RTMemFree(pImage->pExtents);
4143 pImage->pExtents = NULL;
4144 }
4145 pImage->cExtents = 0;
4146 if (pImage->pFile != NULL)
4147 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4148 vmdkFileCheckAllClose(pImage);
4149 if (pImage->pGTCache)
4150 {
4151 RTMemFree(pImage->pGTCache);
4152 pImage->pGTCache = NULL;
4153 }
4154 if (pImage->pDescData)
4155 {
4156 RTMemFree(pImage->pDescData);
4157 pImage->pDescData = NULL;
4158 }
4159}
4160
4161/**
4162 * Internal. Flush image data (and metadata) to disk.
4163 */
4164static int vmdkFlushImage(PVMDKIMAGE pImage)
4165{
4166 PVMDKEXTENT pExtent;
4167 int rc = VINF_SUCCESS;
4168
4169 /* Update descriptor if changed. */
4170 if (pImage->Descriptor.fDirty)
4171 {
4172 rc = vmdkWriteDescriptor(pImage);
4173 if (RT_FAILURE(rc))
4174 goto out;
4175 }
4176
4177 for (unsigned i = 0; i < pImage->cExtents; i++)
4178 {
4179 pExtent = &pImage->pExtents[i];
4180 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4181 {
4182 switch (pExtent->enmType)
4183 {
4184 case VMDKETYPE_HOSTED_SPARSE:
4185 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
4186 if (RT_FAILURE(rc))
4187 goto out;
4188 if (pExtent->fFooter)
4189 {
4190 uint64_t cbSize;
4191 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4192 if (RT_FAILURE(rc))
4193 goto out;
4194 cbSize = RT_ALIGN_64(cbSize, 512);
4195 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4196 if (RT_FAILURE(rc))
4197 goto out;
4198 }
4199 break;
4200#ifdef VBOX_WITH_VMDK_ESX
4201 case VMDKETYPE_ESX_SPARSE:
4202 /** @todo update the header. */
4203 break;
4204#endif /* VBOX_WITH_VMDK_ESX */
4205 case VMDKETYPE_VMFS:
4206 case VMDKETYPE_FLAT:
4207 /* Nothing to do. */
4208 break;
4209 case VMDKETYPE_ZERO:
4210 default:
4211 AssertMsgFailed(("extent with type %d marked as dirty\n",
4212 pExtent->enmType));
4213 break;
4214 }
4215 }
4216 switch (pExtent->enmType)
4217 {
4218 case VMDKETYPE_HOSTED_SPARSE:
4219#ifdef VBOX_WITH_VMDK_ESX
4220 case VMDKETYPE_ESX_SPARSE:
4221#endif /* VBOX_WITH_VMDK_ESX */
4222 case VMDKETYPE_VMFS:
4223 case VMDKETYPE_FLAT:
4224 /** @todo implement proper path absolute check. */
4225 if ( pExtent->pFile != NULL
4226 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4227 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4228 rc = vmdkFileFlush(pExtent->pFile);
4229 break;
4230 case VMDKETYPE_ZERO:
4231 /* No need to do anything for this extent. */
4232 break;
4233 default:
4234 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4235 break;
4236 }
4237 }
4238
4239out:
4240 return rc;
4241}
4242
4243/**
4244 * Internal. Flush image data (and metadata) to disk - async version.
4245 */
4246static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4247{
4248 PVMDKEXTENT pExtent;
4249 int rc = VINF_SUCCESS;
4250
4251 /* Update descriptor if changed. */
4252 if (pImage->Descriptor.fDirty)
4253 {
4254 rc = vmdkWriteDescriptor(pImage);
4255 if (RT_FAILURE(rc))
4256 goto out;
4257 }
4258
4259 for (unsigned i = 0; i < pImage->cExtents; i++)
4260 {
4261 pExtent = &pImage->pExtents[i];
4262 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4263 {
4264 switch (pExtent->enmType)
4265 {
4266 case VMDKETYPE_HOSTED_SPARSE:
4267 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4268 break;
4269#ifdef VBOX_WITH_VMDK_ESX
4270 case VMDKETYPE_ESX_SPARSE:
4271 /** @todo update the header. */
4272 break;
4273#endif /* VBOX_WITH_VMDK_ESX */
4274 case VMDKETYPE_VMFS:
4275 case VMDKETYPE_FLAT:
4276 /* Nothing to do. */
4277 break;
4278 case VMDKETYPE_ZERO:
4279 default:
4280 AssertMsgFailed(("extent with type %d marked as dirty\n",
4281 pExtent->enmType));
4282 break;
4283 }
4284 }
4285 switch (pExtent->enmType)
4286 {
4287 case VMDKETYPE_HOSTED_SPARSE:
4288#ifdef VBOX_WITH_VMDK_ESX
4289 case VMDKETYPE_ESX_SPARSE:
4290#endif /* VBOX_WITH_VMDK_ESX */
4291 case VMDKETYPE_VMFS:
4292 case VMDKETYPE_FLAT:
4293 /** @todo implement proper path absolute check. */
4294 if ( pExtent->pFile != NULL
4295 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4296 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4297 rc = vmdkFileFlushAsync(pExtent->pFile, pIoCtx);
4298 break;
4299 case VMDKETYPE_ZERO:
4300 /* No need to do anything for this extent. */
4301 break;
4302 default:
4303 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4304 break;
4305 }
4306 }
4307
4308out:
4309 return rc;
4310}
4311
4312/**
4313 * Internal. Find extent corresponding to the sector number in the disk.
4314 */
4315static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4316 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4317{
4318 PVMDKEXTENT pExtent = NULL;
4319 int rc = VINF_SUCCESS;
4320
4321 for (unsigned i = 0; i < pImage->cExtents; i++)
4322 {
4323 if (offSector < pImage->pExtents[i].cNominalSectors)
4324 {
4325 pExtent = &pImage->pExtents[i];
4326 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4327 break;
4328 }
4329 offSector -= pImage->pExtents[i].cNominalSectors;
4330 }
4331
4332 if (pExtent)
4333 *ppExtent = pExtent;
4334 else
4335 rc = VERR_IO_SECTOR_NOT_FOUND;
4336
4337 return rc;
4338}
4339
4340/**
4341 * Internal. Hash function for placing the grain table hash entries.
4342 */
4343static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4344 unsigned uExtent)
4345{
4346 /** @todo this hash function is quite simple, maybe use a better one which
4347 * scrambles the bits better. */
4348 return (uSector + uExtent) % pCache->cEntries;
4349}
4350
4351/**
4352 * Internal. Get sector number in the extent file from the relative sector
4353 * number in the extent.
4354 */
4355static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4356 uint64_t uSector, uint64_t *puExtentSector)
4357{
4358 uint64_t uGDIndex, uGTSector, uGTBlock;
4359 uint32_t uGTHash, uGTBlockIndex;
4360 PVMDKGTCACHEENTRY pGTCacheEntry;
4361 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4362 int rc;
4363
4364 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4365 if (uGDIndex >= pExtent->cGDEntries)
4366 return VERR_OUT_OF_RANGE;
4367 uGTSector = pExtent->pGD[uGDIndex];
4368 if (!uGTSector)
4369 {
4370 /* There is no grain table referenced by this grain directory
4371 * entry. So there is absolutely no data in this area. */
4372 *puExtentSector = 0;
4373 return VINF_SUCCESS;
4374 }
4375
4376 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4377 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4378 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4379 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4380 || pGTCacheEntry->uGTBlock != uGTBlock)
4381 {
4382 /* Cache miss, fetch data from disk. */
4383 rc = vmdkFileReadAt(pExtent->pFile,
4384 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4385 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4386 if (RT_FAILURE(rc))
4387 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4388 pGTCacheEntry->uExtent = pExtent->uExtent;
4389 pGTCacheEntry->uGTBlock = uGTBlock;
4390 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4391 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4392 }
4393 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4394 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4395 if (uGrainSector)
4396 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4397 else
4398 *puExtentSector = 0;
4399 return VINF_SUCCESS;
4400}
4401
4402/**
4403 * Internal. Allocates a new grain table (if necessary), writes the grain
4404 * and updates the grain table. The cache is also updated by this operation.
4405 * This is separate from vmdkGetSector, because that should be as fast as
4406 * possible. Most code from vmdkGetSector also appears here.
4407 */
4408static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4409 uint64_t uSector, const void *pvBuf,
4410 uint64_t cbWrite)
4411{
4412 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4413 uint64_t cbExtentSize;
4414 uint32_t uGTHash, uGTBlockIndex;
4415 PVMDKGTCACHEENTRY pGTCacheEntry;
4416 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4417 int rc;
4418
4419 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4420 if (uGDIndex >= pExtent->cGDEntries)
4421 return VERR_OUT_OF_RANGE;
4422 uGTSector = pExtent->pGD[uGDIndex];
4423 if (pExtent->pRGD)
4424 uRGTSector = pExtent->pRGD[uGDIndex];
4425 else
4426 uRGTSector = 0; /**< avoid compiler warning */
4427 if (!uGTSector)
4428 {
4429 /* There is no grain table referenced by this grain directory
4430 * entry. So there is absolutely no data in this area. Allocate
4431 * a new grain table and put the reference to it in the GDs. */
4432 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4433 if (RT_FAILURE(rc))
4434 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4435 Assert(!(cbExtentSize % 512));
4436 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4437 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4438 /* For writable streamOptimized extents the final sector is the
4439 * end-of-stream marker. Will be re-added after the grain table.
4440 * If the file has a footer it also will be re-added before EOS. */
4441 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4442 {
4443 uint64_t uEOSOff = 0;
4444 uGTSector--;
4445 if (pExtent->fFooter)
4446 {
4447 uGTSector--;
4448 uEOSOff = 512;
4449 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4450 if (RT_FAILURE(rc))
4451 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4452 }
4453 pExtent->uLastGrainSector = 0;
4454 uint8_t aEOS[512];
4455 memset(aEOS, '\0', sizeof(aEOS));
4456 rc = vmdkFileWriteAt(pExtent->pFile,
4457 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4458 aEOS, sizeof(aEOS), NULL);
4459 if (RT_FAILURE(rc))
4460 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4461 }
4462 /* Normally the grain table is preallocated for hosted sparse extents
4463 * that support more than 32 bit sector numbers. So this shouldn't
4464 * ever happen on a valid extent. */
4465 if (uGTSector > UINT32_MAX)
4466 return VERR_VD_VMDK_INVALID_HEADER;
4467 /* Write grain table by writing the required number of grain table
4468 * cache chunks. Avoids dynamic memory allocation, but is a bit
4469 * slower. But as this is a pretty infrequently occurring case it
4470 * should be acceptable. */
4471 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4472 for (unsigned i = 0;
4473 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4474 i++)
4475 {
4476 rc = vmdkFileWriteAt(pExtent->pFile,
4477 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4478 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4479 if (RT_FAILURE(rc))
4480 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4481 }
4482 if (pExtent->pRGD)
4483 {
4484 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4485 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4486 if (RT_FAILURE(rc))
4487 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4488 Assert(!(cbExtentSize % 512));
4489 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4490 /* For writable streamOptimized extents the final sector is the
4491 * end-of-stream marker. Will be re-added after the grain table.
4492 * If the file has a footer it also will be re-added before EOS. */
4493 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4494 {
4495 uint64_t uEOSOff = 0;
4496 uRGTSector--;
4497 if (pExtent->fFooter)
4498 {
4499 uRGTSector--;
4500 uEOSOff = 512;
4501 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4502 if (RT_FAILURE(rc))
4503 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4504 }
4505 pExtent->uLastGrainSector = 0;
4506 uint8_t aEOS[512];
4507 memset(aEOS, '\0', sizeof(aEOS));
4508 rc = vmdkFileWriteAt(pExtent->pFile,
4509 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4510 aEOS, sizeof(aEOS), NULL);
4511 if (RT_FAILURE(rc))
4512 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4513 }
4514 /* Normally the redundant grain table is preallocated for hosted
4515 * sparse extents that support more than 32 bit sector numbers. So
4516 * this shouldn't ever happen on a valid extent. */
4517 if (uRGTSector > UINT32_MAX)
4518 return VERR_VD_VMDK_INVALID_HEADER;
4519 /* Write backup grain table by writing the required number of grain
4520 * table cache chunks. Avoids dynamic memory allocation, but is a
4521 * bit slower. But as this is a pretty infrequently occurring case
4522 * it should be acceptable. */
4523 for (unsigned i = 0;
4524 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4525 i++)
4526 {
4527 rc = vmdkFileWriteAt(pExtent->pFile,
4528 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4529 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4530 if (RT_FAILURE(rc))
4531 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4532 }
4533 }
4534
4535 /* Update the grain directory on disk (doing it before writing the
4536 * grain table will result in a garbled extent if the operation is
4537 * aborted for some reason. Otherwise the worst that can happen is
4538 * some unused sectors in the extent. */
4539 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4540 rc = vmdkFileWriteAt(pExtent->pFile,
4541 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4542 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4543 if (RT_FAILURE(rc))
4544 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4545 if (pExtent->pRGD)
4546 {
4547 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4548 rc = vmdkFileWriteAt(pExtent->pFile,
4549 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4550 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4551 if (RT_FAILURE(rc))
4552 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4553 }
4554
4555 /* As the final step update the in-memory copy of the GDs. */
4556 pExtent->pGD[uGDIndex] = uGTSector;
4557 if (pExtent->pRGD)
4558 pExtent->pRGD[uGDIndex] = uRGTSector;
4559 }
4560
4561 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4562 if (RT_FAILURE(rc))
4563 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4564 Assert(!(cbExtentSize % 512));
4565
4566 /* Write the data. Always a full grain, or we're in big trouble. */
4567 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4568 {
4569 /* For streamOptimized extents this is a little more difficult, as the
4570 * cached data also needs to be updated, to handle updating the last
4571 * written block properly. Also we're trying to avoid unnecessary gaps.
4572 * Additionally the end-of-stream marker needs to be written. */
4573 if (!pExtent->uLastGrainSector)
4574 {
4575 cbExtentSize -= 512;
4576 if (pExtent->fFooter)
4577 cbExtentSize -= 512;
4578 }
4579 else
4580 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4581 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4582 uint32_t cbGrain = 0;
4583 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4584 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4585 if (RT_FAILURE(rc))
4586 {
4587 pExtent->uGrainSector = 0;
4588 pExtent->uLastGrainSector = 0;
4589 AssertRC(rc);
4590 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4591 }
4592 cbGrain = RT_ALIGN(cbGrain, 512);
4593 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4594 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4595 pExtent->cbLastGrainWritten = cbGrain;
4596 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4597 pExtent->uGrainSector = uSector;
4598
4599 uint64_t uEOSOff = 0;
4600 if (pExtent->fFooter)
4601 {
4602 uEOSOff = 512;
4603 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4604 if (RT_FAILURE(rc))
4605 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4606 }
4607 uint8_t aEOS[512];
4608 memset(aEOS, '\0', sizeof(aEOS));
4609 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4610 aEOS, sizeof(aEOS), NULL);
4611 if (RT_FAILURE(rc))
4612 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4613 }
4614 else
4615 {
4616 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4617 if (RT_FAILURE(rc))
4618 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4619 }
4620
4621 /* Update the grain table (and the cache). */
4622 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4623 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4624 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4625 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4626 || pGTCacheEntry->uGTBlock != uGTBlock)
4627 {
4628 /* Cache miss, fetch data from disk. */
4629 rc = vmdkFileReadAt(pExtent->pFile,
4630 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4631 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4632 if (RT_FAILURE(rc))
4633 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4634 pGTCacheEntry->uExtent = pExtent->uExtent;
4635 pGTCacheEntry->uGTBlock = uGTBlock;
4636 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4637 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4638 }
4639 else
4640 {
4641 /* Cache hit. Convert grain table block back to disk format, otherwise
4642 * the code below will write garbage for all but the updated entry. */
4643 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4644 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4645 }
4646 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4647 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4648 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4649 /* Update grain table on disk. */
4650 rc = vmdkFileWriteAt(pExtent->pFile,
4651 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4652 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4653 if (RT_FAILURE(rc))
4654 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4655 if (pExtent->pRGD)
4656 {
4657 /* Update backup grain table on disk. */
4658 rc = vmdkFileWriteAt(pExtent->pFile,
4659 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4660 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4661 if (RT_FAILURE(rc))
4662 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4663 }
4664#ifdef VBOX_WITH_VMDK_ESX
4665 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4666 {
4667 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4668 pExtent->fMetaDirty = true;
4669 }
4670#endif /* VBOX_WITH_VMDK_ESX */
4671 return rc;
4672}
4673
4674
4675/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
4676static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
4677{
4678 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
4679 int rc = VINF_SUCCESS;
4680 PVMDKIMAGE pImage;
4681
4682 if ( !pszFilename
4683 || !*pszFilename
4684 || strchr(pszFilename, '"'))
4685 {
4686 rc = VERR_INVALID_PARAMETER;
4687 goto out;
4688 }
4689
4690 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4691 if (!pImage)
4692 {
4693 rc = VERR_NO_MEMORY;
4694 goto out;
4695 }
4696 pImage->pszFilename = pszFilename;
4697 pImage->pFile = NULL;
4698 pImage->pExtents = NULL;
4699 pImage->pFiles = NULL;
4700 pImage->pGTCache = NULL;
4701 pImage->pDescData = NULL;
4702 pImage->pVDIfsDisk = pVDIfsDisk;
4703 pImage->pVDIfsImage = pVDIfsDisk;
4704 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
4705 * much as possible in vmdkOpenImage. */
4706 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
4707 vmdkFreeImage(pImage, false);
4708 RTMemFree(pImage);
4709
4710out:
4711 LogFlowFunc(("returns %Rrc\n", rc));
4712 return rc;
4713}
4714
4715/** @copydoc VBOXHDDBACKEND::pfnOpen */
4716static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
4717 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
4718 void **ppBackendData)
4719{
4720 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
4721 int rc;
4722 PVMDKIMAGE pImage;
4723
4724 /* Check open flags. All valid flags are supported. */
4725 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4726 {
4727 rc = VERR_INVALID_PARAMETER;
4728 goto out;
4729 }
4730
4731 /* Check remaining arguments. */
4732 if ( !VALID_PTR(pszFilename)
4733 || !*pszFilename
4734 || strchr(pszFilename, '"'))
4735 {
4736 rc = VERR_INVALID_PARAMETER;
4737 goto out;
4738 }
4739
4740
4741 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4742 if (!pImage)
4743 {
4744 rc = VERR_NO_MEMORY;
4745 goto out;
4746 }
4747 pImage->pszFilename = pszFilename;
4748 pImage->pFile = NULL;
4749 pImage->pExtents = NULL;
4750 pImage->pFiles = NULL;
4751 pImage->pGTCache = NULL;
4752 pImage->pDescData = NULL;
4753 pImage->pVDIfsDisk = pVDIfsDisk;
4754 pImage->pVDIfsImage = pVDIfsImage;
4755
4756 rc = vmdkOpenImage(pImage, uOpenFlags);
4757 if (RT_SUCCESS(rc))
4758 *ppBackendData = pImage;
4759
4760out:
4761 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4762 return rc;
4763}
4764
4765/** @copydoc VBOXHDDBACKEND::pfnCreate */
4766static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
4767 unsigned uImageFlags, const char *pszComment,
4768 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4769 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4770 unsigned uOpenFlags, unsigned uPercentStart,
4771 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
4772 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
4773 void **ppBackendData)
4774{
4775 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
4776 int rc;
4777 PVMDKIMAGE pImage;
4778
4779 PFNVDPROGRESS pfnProgress = NULL;
4780 void *pvUser = NULL;
4781 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
4782 VDINTERFACETYPE_PROGRESS);
4783 PVDINTERFACEPROGRESS pCbProgress = NULL;
4784 if (pIfProgress)
4785 {
4786 pCbProgress = VDGetInterfaceProgress(pIfProgress);
4787 pfnProgress = pCbProgress->pfnProgress;
4788 pvUser = pIfProgress->pvUser;
4789 }
4790
4791 /* Check open flags. All valid flags are supported. */
4792 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4793 {
4794 rc = VERR_INVALID_PARAMETER;
4795 goto out;
4796 }
4797
4798 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
4799 if ( !cbSize
4800 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
4801 {
4802 rc = VERR_VD_INVALID_SIZE;
4803 goto out;
4804 }
4805
4806 /* Check remaining arguments. */
4807 if ( !VALID_PTR(pszFilename)
4808 || !*pszFilename
4809 || strchr(pszFilename, '"')
4810 || !VALID_PTR(pPCHSGeometry)
4811 || !VALID_PTR(pLCHSGeometry)
4812#ifndef VBOX_WITH_VMDK_ESX
4813 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
4814 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4815#endif
4816 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4817 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
4818 {
4819 rc = VERR_INVALID_PARAMETER;
4820 goto out;
4821 }
4822
4823 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4824 if (!pImage)
4825 {
4826 rc = VERR_NO_MEMORY;
4827 goto out;
4828 }
4829 pImage->pszFilename = pszFilename;
4830 pImage->pFile = NULL;
4831 pImage->pExtents = NULL;
4832 pImage->pFiles = NULL;
4833 pImage->pGTCache = NULL;
4834 pImage->pDescData = NULL;
4835 pImage->pVDIfsDisk = pVDIfsDisk;
4836 /* Descriptors for split images can be pretty large, especially if the
4837 * filename is long. So prepare for the worst, and allocate quite some
4838 * memory for the descriptor in this case. */
4839 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4840 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
4841 else
4842 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
4843 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
4844 if (!pImage->pDescData)
4845 {
4846 rc = VERR_NO_MEMORY;
4847 goto out;
4848 }
4849
4850 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
4851 pPCHSGeometry, pLCHSGeometry, pUuid,
4852 pfnProgress, pvUser, uPercentStart, uPercentSpan);
4853 if (RT_SUCCESS(rc))
4854 {
4855 /* So far the image is opened in read/write mode. Make sure the
4856 * image is opened in read-only mode if the caller requested that. */
4857 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
4858 {
4859 vmdkFreeImage(pImage, false);
4860 rc = vmdkOpenImage(pImage, uOpenFlags);
4861 if (RT_FAILURE(rc))
4862 goto out;
4863 }
4864 *ppBackendData = pImage;
4865 }
4866 else
4867 {
4868 RTMemFree(pImage->pDescData);
4869 RTMemFree(pImage);
4870 }
4871
4872out:
4873 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4874 return rc;
4875}
4876
4877/**
4878 * Replaces a fragment of a string with the specified string.
4879 *
4880 * @returns Pointer to the allocated UTF-8 string.
4881 * @param pszWhere UTF-8 string to search in.
4882 * @param pszWhat UTF-8 string to search for.
4883 * @param pszByWhat UTF-8 string to replace the found string with.
4884 */
4885static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
4886{
4887 AssertPtr(pszWhere);
4888 AssertPtr(pszWhat);
4889 AssertPtr(pszByWhat);
4890 const char *pszFoundStr = strstr(pszWhere, pszWhat);
4891 if (!pszFoundStr)
4892 return NULL;
4893 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
4894 char *pszNewStr = (char *)RTMemAlloc(cFinal);
4895 if (pszNewStr)
4896 {
4897 char *pszTmp = pszNewStr;
4898 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
4899 pszTmp += pszFoundStr - pszWhere;
4900 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
4901 pszTmp += strlen(pszByWhat);
4902 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
4903 }
4904 return pszNewStr;
4905}
4906
4907/** @copydoc VBOXHDDBACKEND::pfnRename */
4908static int vmdkRename(void *pBackendData, const char *pszFilename)
4909{
4910 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
4911
4912 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4913 int rc = VINF_SUCCESS;
4914 char **apszOldName = NULL;
4915 char **apszNewName = NULL;
4916 char **apszNewLines = NULL;
4917 char *pszOldDescName = NULL;
4918 bool fImageFreed = false;
4919 bool fEmbeddedDesc = false;
4920 unsigned cExtents = pImage->cExtents;
4921 char *pszNewBaseName = NULL;
4922 char *pszOldBaseName = NULL;
4923 char *pszNewFullName = NULL;
4924 char *pszOldFullName = NULL;
4925 const char *pszOldImageName;
4926 unsigned i, line;
4927 VMDKDESCRIPTOR DescriptorCopy;
4928 VMDKEXTENT ExtentCopy;
4929
4930 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
4931
4932 /* Check arguments. */
4933 if ( !pImage
4934 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
4935 || !VALID_PTR(pszFilename)
4936 || !*pszFilename)
4937 {
4938 rc = VERR_INVALID_PARAMETER;
4939 goto out;
4940 }
4941
4942 /*
4943 * Allocate an array to store both old and new names of renamed files
4944 * in case we have to roll back the changes. Arrays are initialized
4945 * with zeros. We actually save stuff when and if we change it.
4946 */
4947 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4948 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4949 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
4950 if (!apszOldName || !apszNewName || !apszNewLines)
4951 {
4952 rc = VERR_NO_MEMORY;
4953 goto out;
4954 }
4955
4956 /* Save the descriptor size and position. */
4957 if (pImage->pDescData)
4958 {
4959 /* Separate descriptor file. */
4960 fEmbeddedDesc = false;
4961 }
4962 else
4963 {
4964 /* Embedded descriptor file. */
4965 ExtentCopy = pImage->pExtents[0];
4966 fEmbeddedDesc = true;
4967 }
4968 /* Save the descriptor content. */
4969 DescriptorCopy.cLines = pImage->Descriptor.cLines;
4970 for (i = 0; i < DescriptorCopy.cLines; i++)
4971 {
4972 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
4973 if (!DescriptorCopy.aLines[i])
4974 {
4975 rc = VERR_NO_MEMORY;
4976 goto out;
4977 }
4978 }
4979
4980 /* Prepare both old and new base names used for string replacement. */
4981 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
4982 RTPathStripExt(pszNewBaseName);
4983 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
4984 RTPathStripExt(pszOldBaseName);
4985 /* Prepare both old and new full names used for string replacement. */
4986 pszNewFullName = RTStrDup(pszFilename);
4987 RTPathStripExt(pszNewFullName);
4988 pszOldFullName = RTStrDup(pImage->pszFilename);
4989 RTPathStripExt(pszOldFullName);
4990
4991 /* --- Up to this point we have not done any damage yet. --- */
4992
4993 /* Save the old name for easy access to the old descriptor file. */
4994 pszOldDescName = RTStrDup(pImage->pszFilename);
4995 /* Save old image name. */
4996 pszOldImageName = pImage->pszFilename;
4997
4998 /* Update the descriptor with modified extent names. */
4999 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5000 i < cExtents;
5001 i++, line = pImage->Descriptor.aNextLines[line])
5002 {
5003 /* Assume that vmdkStrReplace will fail. */
5004 rc = VERR_NO_MEMORY;
5005 /* Update the descriptor. */
5006 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5007 pszOldBaseName, pszNewBaseName);
5008 if (!apszNewLines[i])
5009 goto rollback;
5010 pImage->Descriptor.aLines[line] = apszNewLines[i];
5011 }
5012 /* Make sure the descriptor gets written back. */
5013 pImage->Descriptor.fDirty = true;
5014 /* Flush the descriptor now, in case it is embedded. */
5015 (void)vmdkFlushImage(pImage);
5016
5017 /* Close and rename/move extents. */
5018 for (i = 0; i < cExtents; i++)
5019 {
5020 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5021 /* Compose new name for the extent. */
5022 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5023 pszOldFullName, pszNewFullName);
5024 if (!apszNewName[i])
5025 goto rollback;
5026 /* Close the extent file. */
5027 vmdkFileClose(pImage, &pExtent->pFile, false);
5028 /* Rename the extent file. */
5029 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
5030 if (RT_FAILURE(rc))
5031 goto rollback;
5032 /* Remember the old name. */
5033 apszOldName[i] = RTStrDup(pExtent->pszFullname);
5034 }
5035 /* Release all old stuff. */
5036 vmdkFreeImage(pImage, false);
5037
5038 fImageFreed = true;
5039
5040 /* Last elements of new/old name arrays are intended for
5041 * storing descriptor's names.
5042 */
5043 apszNewName[cExtents] = RTStrDup(pszFilename);
5044 /* Rename the descriptor file if it's separate. */
5045 if (!fEmbeddedDesc)
5046 {
5047 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
5048 if (RT_FAILURE(rc))
5049 goto rollback;
5050 /* Save old name only if we may need to change it back. */
5051 apszOldName[cExtents] = RTStrDup(pszFilename);
5052 }
5053
5054 /* Update pImage with the new information. */
5055 pImage->pszFilename = pszFilename;
5056
5057 /* Open the new image. */
5058 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5059 if (RT_SUCCESS(rc))
5060 goto out;
5061
5062rollback:
5063 /* Roll back all changes in case of failure. */
5064 if (RT_FAILURE(rc))
5065 {
5066 int rrc;
5067 if (!fImageFreed)
5068 {
5069 /*
5070 * Some extents may have been closed, close the rest. We will
5071 * re-open the whole thing later.
5072 */
5073 vmdkFreeImage(pImage, false);
5074 }
5075 /* Rename files back. */
5076 for (i = 0; i <= cExtents; i++)
5077 {
5078 if (apszOldName[i])
5079 {
5080 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
5081 AssertRC(rrc);
5082 }
5083 }
5084 /* Restore the old descriptor. */
5085 PVMDKFILE pFile;
5086 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
5087 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
5088 AssertRC(rrc);
5089 if (fEmbeddedDesc)
5090 {
5091 ExtentCopy.pFile = pFile;
5092 pImage->pExtents = &ExtentCopy;
5093 }
5094 else
5095 {
5096 /* Shouldn't be null for separate descriptor.
5097 * There will be no access to the actual content.
5098 */
5099 pImage->pDescData = pszOldDescName;
5100 pImage->pFile = pFile;
5101 }
5102 pImage->Descriptor = DescriptorCopy;
5103 vmdkWriteDescriptor(pImage);
5104 vmdkFileClose(pImage, &pFile, false);
5105 /* Get rid of the stuff we implanted. */
5106 pImage->pExtents = NULL;
5107 pImage->pFile = NULL;
5108 pImage->pDescData = NULL;
5109 /* Re-open the image back. */
5110 pImage->pszFilename = pszOldImageName;
5111 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5112 AssertRC(rrc);
5113 }
5114
5115out:
5116 for (i = 0; i < DescriptorCopy.cLines; i++)
5117 if (DescriptorCopy.aLines[i])
5118 RTStrFree(DescriptorCopy.aLines[i]);
5119 if (apszOldName)
5120 {
5121 for (i = 0; i <= cExtents; i++)
5122 if (apszOldName[i])
5123 RTStrFree(apszOldName[i]);
5124 RTMemTmpFree(apszOldName);
5125 }
5126 if (apszNewName)
5127 {
5128 for (i = 0; i <= cExtents; i++)
5129 if (apszNewName[i])
5130 RTStrFree(apszNewName[i]);
5131 RTMemTmpFree(apszNewName);
5132 }
5133 if (apszNewLines)
5134 {
5135 for (i = 0; i < cExtents; i++)
5136 if (apszNewLines[i])
5137 RTStrFree(apszNewLines[i]);
5138 RTMemTmpFree(apszNewLines);
5139 }
5140 if (pszOldDescName)
5141 RTStrFree(pszOldDescName);
5142 if (pszOldBaseName)
5143 RTStrFree(pszOldBaseName);
5144 if (pszNewBaseName)
5145 RTStrFree(pszNewBaseName);
5146 if (pszOldFullName)
5147 RTStrFree(pszOldFullName);
5148 if (pszNewFullName)
5149 RTStrFree(pszNewFullName);
5150 LogFlowFunc(("returns %Rrc\n", rc));
5151 return rc;
5152}
5153
5154/** @copydoc VBOXHDDBACKEND::pfnClose */
5155static int vmdkClose(void *pBackendData, bool fDelete)
5156{
5157 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5158 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5159 int rc = VINF_SUCCESS;
5160
5161 /* Freeing a never allocated image (e.g. because the open failed) is
5162 * not signalled as an error. After all nothing bad happens. */
5163 if (pImage)
5164 {
5165 vmdkFreeImage(pImage, fDelete);
5166 RTMemFree(pImage);
5167 }
5168
5169 LogFlowFunc(("returns %Rrc\n", rc));
5170 return rc;
5171}
5172
5173/** @copydoc VBOXHDDBACKEND::pfnRead */
5174static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5175 size_t cbToRead, size_t *pcbActuallyRead)
5176{
5177 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5178 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5179 PVMDKEXTENT pExtent;
5180 uint64_t uSectorExtentRel;
5181 uint64_t uSectorExtentAbs;
5182 int rc;
5183
5184 AssertPtr(pImage);
5185 Assert(uOffset % 512 == 0);
5186 Assert(cbToRead % 512 == 0);
5187
5188 if ( uOffset + cbToRead > pImage->cbSize
5189 || cbToRead == 0)
5190 {
5191 rc = VERR_INVALID_PARAMETER;
5192 goto out;
5193 }
5194
5195 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5196 &pExtent, &uSectorExtentRel);
5197 if (RT_FAILURE(rc))
5198 goto out;
5199
5200 /* Check access permissions as defined in the extent descriptor. */
5201 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5202 {
5203 rc = VERR_VD_VMDK_INVALID_STATE;
5204 goto out;
5205 }
5206
5207 /* Clip read range to remain in this extent. */
5208 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5209
5210 /* Handle the read according to the current extent type. */
5211 switch (pExtent->enmType)
5212 {
5213 case VMDKETYPE_HOSTED_SPARSE:
5214#ifdef VBOX_WITH_VMDK_ESX
5215 case VMDKETYPE_ESX_SPARSE:
5216#endif /* VBOX_WITH_VMDK_ESX */
5217 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5218 &uSectorExtentAbs);
5219 if (RT_FAILURE(rc))
5220 goto out;
5221 /* Clip read range to at most the rest of the grain. */
5222 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5223 Assert(!(cbToRead % 512));
5224 if (uSectorExtentAbs == 0)
5225 rc = VERR_VD_BLOCK_FREE;
5226 else
5227 {
5228 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5229 {
5230 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5231 uSectorExtentAbs -= uSectorInGrain;
5232 uint64_t uLBA;
5233 if (pExtent->uGrainSector != uSectorExtentAbs)
5234 {
5235 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5236 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5237 if (RT_FAILURE(rc))
5238 {
5239 pExtent->uGrainSector = 0;
5240 AssertRC(rc);
5241 goto out;
5242 }
5243 pExtent->uGrainSector = uSectorExtentAbs;
5244 Assert(uLBA == uSectorExtentRel);
5245 }
5246 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5247 }
5248 else
5249 {
5250 rc = vmdkFileReadAt(pExtent->pFile,
5251 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5252 pvBuf, cbToRead, NULL);
5253 }
5254 }
5255 break;
5256 case VMDKETYPE_VMFS:
5257 case VMDKETYPE_FLAT:
5258 rc = vmdkFileReadAt(pExtent->pFile,
5259 VMDK_SECTOR2BYTE(uSectorExtentRel),
5260 pvBuf, cbToRead, NULL);
5261 break;
5262 case VMDKETYPE_ZERO:
5263 memset(pvBuf, '\0', cbToRead);
5264 break;
5265 }
5266 if (pcbActuallyRead)
5267 *pcbActuallyRead = cbToRead;
5268
5269out:
5270 LogFlowFunc(("returns %Rrc\n", rc));
5271 return rc;
5272}
5273
5274/** @copydoc VBOXHDDBACKEND::pfnWrite */
5275static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5276 size_t cbToWrite, size_t *pcbWriteProcess,
5277 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5278{
5279 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5280 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5281 PVMDKEXTENT pExtent;
5282 uint64_t uSectorExtentRel;
5283 uint64_t uSectorExtentAbs;
5284 int rc;
5285
5286 AssertPtr(pImage);
5287 Assert(uOffset % 512 == 0);
5288 Assert(cbToWrite % 512 == 0);
5289
5290 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5291 {
5292 rc = VERR_VD_IMAGE_READ_ONLY;
5293 goto out;
5294 }
5295
5296 if (cbToWrite == 0)
5297 {
5298 rc = VERR_INVALID_PARAMETER;
5299 goto out;
5300 }
5301
5302 /* No size check here, will do that later when the extent is located.
5303 * There are sparse images out there which according to the spec are
5304 * invalid, because the total size is not a multiple of the grain size.
5305 * Also for sparse images which are stitched together in odd ways (not at
5306 * grain boundaries, and with the nominal size not being a multiple of the
5307 * grain size), this would prevent writing to the last grain. */
5308
5309 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5310 &pExtent, &uSectorExtentRel);
5311 if (RT_FAILURE(rc))
5312 goto out;
5313
5314 /* Check access permissions as defined in the extent descriptor. */
5315 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5316 {
5317 rc = VERR_VD_VMDK_INVALID_STATE;
5318 goto out;
5319 }
5320
5321 /* Handle the write according to the current extent type. */
5322 switch (pExtent->enmType)
5323 {
5324 case VMDKETYPE_HOSTED_SPARSE:
5325#ifdef VBOX_WITH_VMDK_ESX
5326 case VMDKETYPE_ESX_SPARSE:
5327#endif /* VBOX_WITH_VMDK_ESX */
5328 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5329 &uSectorExtentAbs);
5330 if (RT_FAILURE(rc))
5331 goto out;
5332 /* Clip write range to at most the rest of the grain. */
5333 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5334 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5335 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5336 {
5337 rc = VERR_VD_VMDK_INVALID_WRITE;
5338 goto out;
5339 }
5340 if (uSectorExtentAbs == 0)
5341 {
5342 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5343 {
5344 /* Full block write to a previously unallocated block.
5345 * Check if the caller wants to avoid the automatic alloc. */
5346 if (!(fWrite & VD_WRITE_NO_ALLOC))
5347 {
5348 /* Allocate GT and find out where to store the grain. */
5349 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5350 uSectorExtentRel, pvBuf, cbToWrite);
5351 }
5352 else
5353 rc = VERR_VD_BLOCK_FREE;
5354 *pcbPreRead = 0;
5355 *pcbPostRead = 0;
5356 }
5357 else
5358 {
5359 /* Clip write range to remain in this extent. */
5360 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5361 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5362 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5363 rc = VERR_VD_BLOCK_FREE;
5364 }
5365 }
5366 else
5367 {
5368 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5369 {
5370 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5371 uSectorExtentAbs -= uSectorInGrain;
5372 uint64_t uLBA = uSectorExtentRel;
5373 if ( pExtent->uGrainSector != uSectorExtentAbs
5374 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5375 {
5376 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5377 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5378 if (RT_FAILURE(rc))
5379 {
5380 pExtent->uGrainSector = 0;
5381 pExtent->uLastGrainSector = 0;
5382 AssertRC(rc);
5383 goto out;
5384 }
5385 pExtent->uGrainSector = uSectorExtentAbs;
5386 pExtent->uLastGrainSector = uSectorExtentAbs;
5387 Assert(uLBA == uSectorExtentRel);
5388 }
5389 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5390 uint32_t cbGrain = 0;
5391 rc = vmdkFileDeflateAt(pExtent->pFile,
5392 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5393 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5394 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5395 if (RT_FAILURE(rc))
5396 {
5397 pExtent->uGrainSector = 0;
5398 pExtent->uLastGrainSector = 0;
5399 AssertRC(rc);
5400 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5401 }
5402 cbGrain = RT_ALIGN(cbGrain, 512);
5403 pExtent->uLastGrainSector = uSectorExtentAbs;
5404 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5405 pExtent->cbLastGrainWritten = cbGrain;
5406
5407 uint64_t uEOSOff = 0;
5408 if (pExtent->fFooter)
5409 {
5410 uEOSOff = 512;
5411 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5412 if (RT_FAILURE(rc))
5413 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5414 }
5415 uint8_t aEOS[512];
5416 memset(aEOS, '\0', sizeof(aEOS));
5417 rc = vmdkFileWriteAt(pExtent->pFile,
5418 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5419 aEOS, sizeof(aEOS), NULL);
5420 if (RT_FAILURE(rc))
5421 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5422 }
5423 else
5424 {
5425 rc = vmdkFileWriteAt(pExtent->pFile,
5426 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5427 pvBuf, cbToWrite, NULL);
5428 }
5429 }
5430 break;
5431 case VMDKETYPE_VMFS:
5432 case VMDKETYPE_FLAT:
5433 /* Clip write range to remain in this extent. */
5434 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5435 rc = vmdkFileWriteAt(pExtent->pFile,
5436 VMDK_SECTOR2BYTE(uSectorExtentRel),
5437 pvBuf, cbToWrite, NULL);
5438 break;
5439 case VMDKETYPE_ZERO:
5440 /* Clip write range to remain in this extent. */
5441 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5442 break;
5443 }
5444 if (pcbWriteProcess)
5445 *pcbWriteProcess = cbToWrite;
5446
5447out:
5448 LogFlowFunc(("returns %Rrc\n", rc));
5449 return rc;
5450}
5451
5452/** @copydoc VBOXHDDBACKEND::pfnFlush */
5453static int vmdkFlush(void *pBackendData)
5454{
5455 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5456 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5457 int rc;
5458
5459 AssertPtr(pImage);
5460
5461 rc = vmdkFlushImage(pImage);
5462 LogFlowFunc(("returns %Rrc\n", rc));
5463 return rc;
5464}
5465
5466/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5467static unsigned vmdkGetVersion(void *pBackendData)
5468{
5469 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5470 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5471
5472 AssertPtr(pImage);
5473
5474 if (pImage)
5475 return VMDK_IMAGE_VERSION;
5476 else
5477 return 0;
5478}
5479
5480/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5481static uint64_t vmdkGetSize(void *pBackendData)
5482{
5483 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5484 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5485
5486 AssertPtr(pImage);
5487
5488 if (pImage)
5489 return pImage->cbSize;
5490 else
5491 return 0;
5492}
5493
5494/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5495static uint64_t vmdkGetFileSize(void *pBackendData)
5496{
5497 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5498 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5499 uint64_t cb = 0;
5500
5501 AssertPtr(pImage);
5502
5503 if (pImage)
5504 {
5505 uint64_t cbFile;
5506 if (pImage->pFile != NULL)
5507 {
5508 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5509 if (RT_SUCCESS(rc))
5510 cb += cbFile;
5511 }
5512 for (unsigned i = 0; i < pImage->cExtents; i++)
5513 {
5514 if (pImage->pExtents[i].pFile != NULL)
5515 {
5516 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5517 if (RT_SUCCESS(rc))
5518 cb += cbFile;
5519 }
5520 }
5521 }
5522
5523 LogFlowFunc(("returns %lld\n", cb));
5524 return cb;
5525}
5526
5527/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5528static int vmdkGetPCHSGeometry(void *pBackendData,
5529 PPDMMEDIAGEOMETRY pPCHSGeometry)
5530{
5531 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5532 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5533 int rc;
5534
5535 AssertPtr(pImage);
5536
5537 if (pImage)
5538 {
5539 if (pImage->PCHSGeometry.cCylinders)
5540 {
5541 *pPCHSGeometry = pImage->PCHSGeometry;
5542 rc = VINF_SUCCESS;
5543 }
5544 else
5545 rc = VERR_VD_GEOMETRY_NOT_SET;
5546 }
5547 else
5548 rc = VERR_VD_NOT_OPENED;
5549
5550 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5551 return rc;
5552}
5553
5554/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5555static int vmdkSetPCHSGeometry(void *pBackendData,
5556 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5557{
5558 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5559 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5560 int rc;
5561
5562 AssertPtr(pImage);
5563
5564 if (pImage)
5565 {
5566 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5567 {
5568 rc = VERR_VD_IMAGE_READ_ONLY;
5569 goto out;
5570 }
5571 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5572 if (RT_FAILURE(rc))
5573 goto out;
5574
5575 pImage->PCHSGeometry = *pPCHSGeometry;
5576 rc = VINF_SUCCESS;
5577 }
5578 else
5579 rc = VERR_VD_NOT_OPENED;
5580
5581out:
5582 LogFlowFunc(("returns %Rrc\n", rc));
5583 return rc;
5584}
5585
5586/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
5587static int vmdkGetLCHSGeometry(void *pBackendData,
5588 PPDMMEDIAGEOMETRY pLCHSGeometry)
5589{
5590 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5591 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5592 int rc;
5593
5594 AssertPtr(pImage);
5595
5596 if (pImage)
5597 {
5598 if (pImage->LCHSGeometry.cCylinders)
5599 {
5600 *pLCHSGeometry = pImage->LCHSGeometry;
5601 rc = VINF_SUCCESS;
5602 }
5603 else
5604 rc = VERR_VD_GEOMETRY_NOT_SET;
5605 }
5606 else
5607 rc = VERR_VD_NOT_OPENED;
5608
5609 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5610 return rc;
5611}
5612
5613/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5614static int vmdkSetLCHSGeometry(void *pBackendData,
5615 PCPDMMEDIAGEOMETRY pLCHSGeometry)
5616{
5617 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5618 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5619 int rc;
5620
5621 AssertPtr(pImage);
5622
5623 if (pImage)
5624 {
5625 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5626 {
5627 rc = VERR_VD_IMAGE_READ_ONLY;
5628 goto out;
5629 }
5630 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5631 if (RT_FAILURE(rc))
5632 goto out;
5633
5634 pImage->LCHSGeometry = *pLCHSGeometry;
5635 rc = VINF_SUCCESS;
5636 }
5637 else
5638 rc = VERR_VD_NOT_OPENED;
5639
5640out:
5641 LogFlowFunc(("returns %Rrc\n", rc));
5642 return rc;
5643}
5644
5645/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
5646static unsigned vmdkGetImageFlags(void *pBackendData)
5647{
5648 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5649 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5650 unsigned uImageFlags;
5651
5652 AssertPtr(pImage);
5653
5654 if (pImage)
5655 uImageFlags = pImage->uImageFlags;
5656 else
5657 uImageFlags = 0;
5658
5659 LogFlowFunc(("returns %#x\n", uImageFlags));
5660 return uImageFlags;
5661}
5662
5663/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
5664static unsigned vmdkGetOpenFlags(void *pBackendData)
5665{
5666 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5667 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5668 unsigned uOpenFlags;
5669
5670 AssertPtr(pImage);
5671
5672 if (pImage)
5673 uOpenFlags = pImage->uOpenFlags;
5674 else
5675 uOpenFlags = 0;
5676
5677 LogFlowFunc(("returns %#x\n", uOpenFlags));
5678 return uOpenFlags;
5679}
5680
5681/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
5682static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
5683{
5684 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
5685 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5686 int rc;
5687
5688 /* Image must be opened and the new flags must be valid. Just readonly and
5689 * info flags are supported. */
5690 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
5691 {
5692 rc = VERR_INVALID_PARAMETER;
5693 goto out;
5694 }
5695
5696 /* Implement this operation via reopening the image. */
5697 vmdkFreeImage(pImage, false);
5698 rc = vmdkOpenImage(pImage, uOpenFlags);
5699
5700out:
5701 LogFlowFunc(("returns %Rrc\n", rc));
5702 return rc;
5703}
5704
5705/** @copydoc VBOXHDDBACKEND::pfnGetComment */
5706static int vmdkGetComment(void *pBackendData, char *pszComment,
5707 size_t cbComment)
5708{
5709 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
5710 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5711 int rc;
5712
5713 AssertPtr(pImage);
5714
5715 if (pImage)
5716 {
5717 const char *pszCommentEncoded = NULL;
5718 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
5719 "ddb.comment", &pszCommentEncoded);
5720 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
5721 pszCommentEncoded = NULL;
5722 else if (RT_FAILURE(rc))
5723 goto out;
5724
5725 if (pszComment && pszCommentEncoded)
5726 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
5727 else
5728 {
5729 if (pszComment)
5730 *pszComment = '\0';
5731 rc = VINF_SUCCESS;
5732 }
5733 if (pszCommentEncoded)
5734 RTStrFree((char *)(void *)pszCommentEncoded);
5735 }
5736 else
5737 rc = VERR_VD_NOT_OPENED;
5738
5739out:
5740 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
5741 return rc;
5742}
5743
5744/** @copydoc VBOXHDDBACKEND::pfnSetComment */
5745static int vmdkSetComment(void *pBackendData, const char *pszComment)
5746{
5747 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
5748 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5749 int rc;
5750
5751 AssertPtr(pImage);
5752
5753 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5754 {
5755 rc = VERR_VD_IMAGE_READ_ONLY;
5756 goto out;
5757 }
5758
5759 if (pImage)
5760 rc = vmdkSetImageComment(pImage, pszComment);
5761 else
5762 rc = VERR_VD_NOT_OPENED;
5763
5764out:
5765 LogFlowFunc(("returns %Rrc\n", rc));
5766 return rc;
5767}
5768
5769/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
5770static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
5771{
5772 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5773 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5774 int rc;
5775
5776 AssertPtr(pImage);
5777
5778 if (pImage)
5779 {
5780 *pUuid = pImage->ImageUuid;
5781 rc = VINF_SUCCESS;
5782 }
5783 else
5784 rc = VERR_VD_NOT_OPENED;
5785
5786 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5787 return rc;
5788}
5789
5790/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
5791static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
5792{
5793 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5794 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5795 int rc;
5796
5797 LogFlowFunc(("%RTuuid\n", pUuid));
5798 AssertPtr(pImage);
5799
5800 if (pImage)
5801 {
5802 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5803 {
5804 pImage->ImageUuid = *pUuid;
5805 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5806 VMDK_DDB_IMAGE_UUID, pUuid);
5807 if (RT_FAILURE(rc))
5808 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
5809 rc = VINF_SUCCESS;
5810 }
5811 else
5812 rc = VERR_VD_IMAGE_READ_ONLY;
5813 }
5814 else
5815 rc = VERR_VD_NOT_OPENED;
5816
5817 LogFlowFunc(("returns %Rrc\n", rc));
5818 return rc;
5819}
5820
5821/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
5822static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
5823{
5824 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5825 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5826 int rc;
5827
5828 AssertPtr(pImage);
5829
5830 if (pImage)
5831 {
5832 *pUuid = pImage->ModificationUuid;
5833 rc = VINF_SUCCESS;
5834 }
5835 else
5836 rc = VERR_VD_NOT_OPENED;
5837
5838 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5839 return rc;
5840}
5841
5842/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
5843static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
5844{
5845 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5846 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5847 int rc;
5848
5849 AssertPtr(pImage);
5850
5851 if (pImage)
5852 {
5853 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5854 {
5855 /*
5856 * Only change the modification uuid if it changed.
5857 * Avoids a lot of unneccessary 1-byte writes during
5858 * vmdkFlush.
5859 */
5860 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
5861 {
5862 pImage->ModificationUuid = *pUuid;
5863 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5864 VMDK_DDB_MODIFICATION_UUID, pUuid);
5865 if (RT_FAILURE(rc))
5866 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
5867 }
5868 rc = VINF_SUCCESS;
5869 }
5870 else
5871 rc = VERR_VD_IMAGE_READ_ONLY;
5872 }
5873 else
5874 rc = VERR_VD_NOT_OPENED;
5875
5876 LogFlowFunc(("returns %Rrc\n", rc));
5877 return rc;
5878}
5879
5880/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
5881static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
5882{
5883 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5884 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5885 int rc;
5886
5887 AssertPtr(pImage);
5888
5889 if (pImage)
5890 {
5891 *pUuid = pImage->ParentUuid;
5892 rc = VINF_SUCCESS;
5893 }
5894 else
5895 rc = VERR_VD_NOT_OPENED;
5896
5897 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5898 return rc;
5899}
5900
5901/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
5902static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
5903{
5904 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5905 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5906 int rc;
5907
5908 AssertPtr(pImage);
5909
5910 if (pImage)
5911 {
5912 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5913 {
5914 pImage->ParentUuid = *pUuid;
5915 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5916 VMDK_DDB_PARENT_UUID, pUuid);
5917 if (RT_FAILURE(rc))
5918 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5919 rc = VINF_SUCCESS;
5920 }
5921 else
5922 rc = VERR_VD_IMAGE_READ_ONLY;
5923 }
5924 else
5925 rc = VERR_VD_NOT_OPENED;
5926
5927 LogFlowFunc(("returns %Rrc\n", rc));
5928 return rc;
5929}
5930
5931/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
5932static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
5933{
5934 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5935 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5936 int rc;
5937
5938 AssertPtr(pImage);
5939
5940 if (pImage)
5941 {
5942 *pUuid = pImage->ParentModificationUuid;
5943 rc = VINF_SUCCESS;
5944 }
5945 else
5946 rc = VERR_VD_NOT_OPENED;
5947
5948 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5949 return rc;
5950}
5951
5952/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
5953static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
5954{
5955 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5956 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5957 int rc;
5958
5959 AssertPtr(pImage);
5960
5961 if (pImage)
5962 {
5963 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5964 {
5965 pImage->ParentModificationUuid = *pUuid;
5966 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5967 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
5968 if (RT_FAILURE(rc))
5969 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5970 rc = VINF_SUCCESS;
5971 }
5972 else
5973 rc = VERR_VD_IMAGE_READ_ONLY;
5974 }
5975 else
5976 rc = VERR_VD_NOT_OPENED;
5977
5978 LogFlowFunc(("returns %Rrc\n", rc));
5979 return rc;
5980}
5981
5982/** @copydoc VBOXHDDBACKEND::pfnDump */
5983static void vmdkDump(void *pBackendData)
5984{
5985 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5986
5987 AssertPtr(pImage);
5988 if (pImage)
5989 {
5990 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
5991 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
5992 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
5993 VMDK_BYTE2SECTOR(pImage->cbSize));
5994 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
5995 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
5996 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
5997 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
5998 }
5999}
6000
6001
6002static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6003{
6004 int rc = VERR_NOT_IMPLEMENTED;
6005 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6006 return rc;
6007}
6008
6009static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
6010{
6011 int rc = VERR_NOT_IMPLEMENTED;
6012 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6013 return rc;
6014}
6015
6016static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
6017{
6018 int rc = VERR_NOT_IMPLEMENTED;
6019 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6020 return rc;
6021}
6022
6023static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
6024{
6025 int rc = VERR_NOT_IMPLEMENTED;
6026 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6027 return rc;
6028}
6029
6030static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
6031{
6032 int rc = VERR_NOT_IMPLEMENTED;
6033 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
6034 return rc;
6035}
6036
6037static bool vmdkIsAsyncIOSupported(void *pvBackendData)
6038{
6039 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6040 bool fAsyncIOSupported = false;
6041
6042 if (pImage)
6043 {
6044 fAsyncIOSupported = true;
6045 for (unsigned i = 0; i < pImage->cExtents; i++)
6046 {
6047 if ( pImage->pExtents[i].enmType != VMDKETYPE_FLAT
6048 && pImage->pExtents[i].enmType != VMDKETYPE_ZERO)
6049 {
6050 fAsyncIOSupported = false;
6051 break; /* Stop search */
6052 }
6053 }
6054 }
6055
6056 return fAsyncIOSupported;
6057}
6058
6059static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
6060 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6061{
6062 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6063 pvBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
6064 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6065 PVMDKEXTENT pExtent;
6066 uint64_t uSectorExtentRel;
6067 uint64_t uSectorExtentAbs;
6068 int rc;
6069
6070 AssertPtr(pImage);
6071 Assert(uOffset % 512 == 0);
6072 Assert(cbRead % 512 == 0);
6073
6074 if ( uOffset + cbRead > pImage->cbSize
6075 || cbRead == 0)
6076 {
6077 rc = VERR_INVALID_PARAMETER;
6078 goto out;
6079 }
6080
6081 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6082 &pExtent, &uSectorExtentRel);
6083 if (RT_FAILURE(rc))
6084 goto out;
6085
6086 /* Check access permissions as defined in the extent descriptor. */
6087 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6088 {
6089 rc = VERR_VD_VMDK_INVALID_STATE;
6090 goto out;
6091 }
6092
6093 /* Clip read range to remain in this extent. */
6094 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6095
6096 /* Handle the read according to the current extent type. */
6097 switch (pExtent->enmType)
6098 {
6099 case VMDKETYPE_HOSTED_SPARSE:
6100#ifdef VBOX_WITH_VMDK_ESX
6101 case VMDKETYPE_ESX_SPARSE:
6102#endif /* VBOX_WITH_VMDK_ESX */
6103 AssertMsgFailed(("Not supported\n"));
6104 break;
6105 case VMDKETYPE_VMFS:
6106 case VMDKETYPE_FLAT:
6107 rc = pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
6108 pExtent->pFile->pStorage,
6109 VMDK_SECTOR2BYTE(uSectorExtentRel),
6110 pIoCtx, cbRead);
6111 break;
6112 case VMDKETYPE_ZERO:
6113 size_t cbSet;
6114
6115 cbSet = pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
6116 pIoCtx, 0, cbRead);
6117 Assert(cbSet == cbRead);
6118
6119 rc = VINF_SUCCESS;
6120 break;
6121 }
6122 if (pcbActuallyRead)
6123 *pcbActuallyRead = cbRead;
6124
6125out:
6126 LogFlowFunc(("returns %Rrc\n", rc));
6127 return rc;
6128}
6129
6130static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
6131 PVDIOCTX pIoCtx,
6132 size_t *pcbWriteProcess, size_t *pcbPreRead,
6133 size_t *pcbPostRead, unsigned fWrite)
6134{
6135 LogFlowFunc(("pvBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6136 pvBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6137 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6138 PVMDKEXTENT pExtent;
6139 uint64_t uSectorExtentRel;
6140 uint64_t uSectorExtentAbs;
6141 int rc;
6142
6143 AssertPtr(pImage);
6144 Assert(uOffset % 512 == 0);
6145 Assert(cbWrite % 512 == 0);
6146
6147 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6148 {
6149 rc = VERR_VD_IMAGE_READ_ONLY;
6150 goto out;
6151 }
6152
6153 if (cbWrite == 0)
6154 {
6155 rc = VERR_INVALID_PARAMETER;
6156 goto out;
6157 }
6158
6159 /* No size check here, will do that later when the extent is located.
6160 * There are sparse images out there which according to the spec are
6161 * invalid, because the total size is not a multiple of the grain size.
6162 * Also for sparse images which are stitched together in odd ways (not at
6163 * grain boundaries, and with the nominal size not being a multiple of the
6164 * grain size), this would prevent writing to the last grain. */
6165
6166 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6167 &pExtent, &uSectorExtentRel);
6168 if (RT_FAILURE(rc))
6169 goto out;
6170
6171 /* Check access permissions as defined in the extent descriptor. */
6172 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6173 {
6174 rc = VERR_VD_VMDK_INVALID_STATE;
6175 goto out;
6176 }
6177
6178 /* Handle the write according to the current extent type. */
6179 switch (pExtent->enmType)
6180 {
6181 case VMDKETYPE_HOSTED_SPARSE:
6182#ifdef VBOX_WITH_VMDK_ESX
6183 case VMDKETYPE_ESX_SPARSE:
6184#endif /* VBOX_WITH_VMDK_ESX */
6185 AssertMsgFailed(("Not supported\n"));
6186 break;
6187 case VMDKETYPE_VMFS:
6188 case VMDKETYPE_FLAT:
6189 /* Clip write range to remain in this extent. */
6190 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6191 rc = pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
6192 pExtent->pFile->pStorage,
6193 VMDK_SECTOR2BYTE(uSectorExtentRel),
6194 pIoCtx, cbWrite);
6195 break;
6196 case VMDKETYPE_ZERO:
6197 /* Clip write range to remain in this extent. */
6198 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6199 break;
6200 }
6201 if (pcbWriteProcess)
6202 *pcbWriteProcess = cbWrite;
6203
6204out:
6205 LogFlowFunc(("returns %Rrc\n", rc));
6206 return rc;
6207}
6208
6209static int vmdkAsyncFlush(void *pvBackendData, PVDIOCTX pIoCtx)
6210{
6211 int rc = VERR_NOT_IMPLEMENTED;
6212 LogFlowFunc(("returns %Rrc\n", rc));
6213 return rc;
6214}
6215
6216
6217VBOXHDDBACKEND g_VmdkBackend =
6218{
6219 /* pszBackendName */
6220 "VMDK",
6221 /* cbSize */
6222 sizeof(VBOXHDDBACKEND),
6223 /* uBackendCaps */
6224 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6225 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6226 /* papszFileExtensions */
6227 s_apszVmdkFileExtensions,
6228 /* paConfigInfo */
6229 NULL,
6230 /* hPlugin */
6231 NIL_RTLDRMOD,
6232 /* pfnCheckIfValid */
6233 vmdkCheckIfValid,
6234 /* pfnOpen */
6235 vmdkOpen,
6236 /* pfnCreate */
6237 vmdkCreate,
6238 /* pfnRename */
6239 vmdkRename,
6240 /* pfnClose */
6241 vmdkClose,
6242 /* pfnRead */
6243 vmdkRead,
6244 /* pfnWrite */
6245 vmdkWrite,
6246 /* pfnFlush */
6247 vmdkFlush,
6248 /* pfnGetVersion */
6249 vmdkGetVersion,
6250 /* pfnGetSize */
6251 vmdkGetSize,
6252 /* pfnGetFileSize */
6253 vmdkGetFileSize,
6254 /* pfnGetPCHSGeometry */
6255 vmdkGetPCHSGeometry,
6256 /* pfnSetPCHSGeometry */
6257 vmdkSetPCHSGeometry,
6258 /* pfnGetLCHSGeometry */
6259 vmdkGetLCHSGeometry,
6260 /* pfnSetLCHSGeometry */
6261 vmdkSetLCHSGeometry,
6262 /* pfnGetImageFlags */
6263 vmdkGetImageFlags,
6264 /* pfnGetOpenFlags */
6265 vmdkGetOpenFlags,
6266 /* pfnSetOpenFlags */
6267 vmdkSetOpenFlags,
6268 /* pfnGetComment */
6269 vmdkGetComment,
6270 /* pfnSetComment */
6271 vmdkSetComment,
6272 /* pfnGetUuid */
6273 vmdkGetUuid,
6274 /* pfnSetUuid */
6275 vmdkSetUuid,
6276 /* pfnGetModificationUuid */
6277 vmdkGetModificationUuid,
6278 /* pfnSetModificationUuid */
6279 vmdkSetModificationUuid,
6280 /* pfnGetParentUuid */
6281 vmdkGetParentUuid,
6282 /* pfnSetParentUuid */
6283 vmdkSetParentUuid,
6284 /* pfnGetParentModificationUuid */
6285 vmdkGetParentModificationUuid,
6286 /* pfnSetParentModificationUuid */
6287 vmdkSetParentModificationUuid,
6288 /* pfnDump */
6289 vmdkDump,
6290 /* pfnGetTimeStamp */
6291 vmdkGetTimeStamp,
6292 /* pfnGetParentTimeStamp */
6293 vmdkGetParentTimeStamp,
6294 /* pfnSetParentTimeStamp */
6295 vmdkSetParentTimeStamp,
6296 /* pfnGetParentFilename */
6297 vmdkGetParentFilename,
6298 /* pfnSetParentFilename */
6299 vmdkSetParentFilename,
6300 /* pfnIsAsyncIOSupported */
6301 vmdkIsAsyncIOSupported,
6302 /* pfnAsyncRead */
6303 vmdkAsyncRead,
6304 /* pfnAsyncWrite */
6305 vmdkAsyncWrite,
6306 /* pfnAsyncFlush */
6307 vmdkAsyncFlush,
6308 /* pfnComposeLocation */
6309 genericFileComposeLocation,
6310 /* pfnComposeName */
6311 genericFileComposeName
6312};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette