VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 86592

Last change on this file since 86592 was 86298, checked in by vboxsync, 4 years ago

Storage/VMDK: vmdkStrReplace must use RTStrAlloc.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 311.3 KB
Line 
1/* $Id: VMDK.cpp 86298 2020-09-25 21:07:47Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD_VMDK
23#include <VBox/log.h> /* before VBox/vd-ifs.h */
24#include <VBox/vd-plugin.h>
25#include <VBox/err.h>
26
27#include <iprt/assert.h>
28#include <iprt/alloc.h>
29#include <iprt/base64.h>
30#include <iprt/ctype.h>
31#include <iprt/crc.h>
32#include <iprt/dvm.h>
33#include <iprt/uuid.h>
34#include <iprt/path.h>
35#include <iprt/rand.h>
36#include <iprt/string.h>
37#include <iprt/sort.h>
38#include <iprt/zip.h>
39#include <iprt/asm.h>
40#ifdef RT_OS_WINDOWS
41# include <iprt/utf16.h>
42# include <iprt/uni.h>
43# include <iprt/uni.h>
44# include <iprt/nt/nt-and-windows.h>
45# include <winioctl.h>
46#endif
47
48#include "VDBackends.h"
49
50
51/*********************************************************************************************************************************
52* Constants And Macros, Structures and Typedefs *
53*********************************************************************************************************************************/
54
55/** Maximum encoded string size (including NUL) we allow for VMDK images.
56 * Deliberately not set high to avoid running out of descriptor space. */
57#define VMDK_ENCODED_COMMENT_MAX 1024
58
59/** VMDK descriptor DDB entry for PCHS cylinders. */
60#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
61
62/** VMDK descriptor DDB entry for PCHS heads. */
63#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
64
65/** VMDK descriptor DDB entry for PCHS sectors. */
66#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
67
68/** VMDK descriptor DDB entry for LCHS cylinders. */
69#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
70
71/** VMDK descriptor DDB entry for LCHS heads. */
72#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
73
74/** VMDK descriptor DDB entry for LCHS sectors. */
75#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
76
77/** VMDK descriptor DDB entry for image UUID. */
78#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
79
80/** VMDK descriptor DDB entry for image modification UUID. */
81#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
82
83/** VMDK descriptor DDB entry for parent image UUID. */
84#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
85
86/** VMDK descriptor DDB entry for parent image modification UUID. */
87#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
88
89/** No compression for streamOptimized files. */
90#define VMDK_COMPRESSION_NONE 0
91
92/** Deflate compression for streamOptimized files. */
93#define VMDK_COMPRESSION_DEFLATE 1
94
95/** Marker that the actual GD value is stored in the footer. */
96#define VMDK_GD_AT_END 0xffffffffffffffffULL
97
98/** Marker for end-of-stream in streamOptimized images. */
99#define VMDK_MARKER_EOS 0
100
101/** Marker for grain table block in streamOptimized images. */
102#define VMDK_MARKER_GT 1
103
104/** Marker for grain directory block in streamOptimized images. */
105#define VMDK_MARKER_GD 2
106
107/** Marker for footer in streamOptimized images. */
108#define VMDK_MARKER_FOOTER 3
109
110/** Marker for unknown purpose in streamOptimized images.
111 * Shows up in very recent images created by vSphere, but only sporadically.
112 * They "forgot" to document that one in the VMDK specification. */
113#define VMDK_MARKER_UNSPECIFIED 4
114
115/** Dummy marker for "don't check the marker value". */
116#define VMDK_MARKER_IGNORE 0xffffffffU
117
118/**
119 * Magic number for hosted images created by VMware Workstation 4, VMware
120 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
121 */
122#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
123
124/**
125 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
126 * this header is also used for monolithic flat images.
127 */
128#pragma pack(1)
129typedef struct SparseExtentHeader
130{
131 uint32_t magicNumber;
132 uint32_t version;
133 uint32_t flags;
134 uint64_t capacity;
135 uint64_t grainSize;
136 uint64_t descriptorOffset;
137 uint64_t descriptorSize;
138 uint32_t numGTEsPerGT;
139 uint64_t rgdOffset;
140 uint64_t gdOffset;
141 uint64_t overHead;
142 bool uncleanShutdown;
143 char singleEndLineChar;
144 char nonEndLineChar;
145 char doubleEndLineChar1;
146 char doubleEndLineChar2;
147 uint16_t compressAlgorithm;
148 uint8_t pad[433];
149} SparseExtentHeader;
150#pragma pack()
151
152/** The maximum allowed descriptor size in the extent header in sectors. */
153#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
154
155/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
156 * divisible by the default grain size (64K) */
157#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
158
159/** VMDK streamOptimized file format marker. The type field may or may not
160 * be actually valid, but there's always data to read there. */
161#pragma pack(1)
162typedef struct VMDKMARKER
163{
164 uint64_t uSector;
165 uint32_t cbSize;
166 uint32_t uType;
167} VMDKMARKER, *PVMDKMARKER;
168#pragma pack()
169
170
171/** Convert sector number/size to byte offset/size. */
172#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
173
174/** Convert byte offset/size to sector number/size. */
175#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
176
177/**
178 * VMDK extent type.
179 */
180typedef enum VMDKETYPE
181{
182 /** Hosted sparse extent. */
183 VMDKETYPE_HOSTED_SPARSE = 1,
184 /** Flat extent. */
185 VMDKETYPE_FLAT,
186 /** Zero extent. */
187 VMDKETYPE_ZERO,
188 /** VMFS extent, used by ESX. */
189 VMDKETYPE_VMFS
190} VMDKETYPE, *PVMDKETYPE;
191
192/**
193 * VMDK access type for a extent.
194 */
195typedef enum VMDKACCESS
196{
197 /** No access allowed. */
198 VMDKACCESS_NOACCESS = 0,
199 /** Read-only access. */
200 VMDKACCESS_READONLY,
201 /** Read-write access. */
202 VMDKACCESS_READWRITE
203} VMDKACCESS, *PVMDKACCESS;
204
205/** Forward declaration for PVMDKIMAGE. */
206typedef struct VMDKIMAGE *PVMDKIMAGE;
207
208/**
209 * Extents files entry. Used for opening a particular file only once.
210 */
211typedef struct VMDKFILE
212{
213 /** Pointer to file path. Local copy. */
214 const char *pszFilename;
215 /** Pointer to base name. Local copy. */
216 const char *pszBasename;
217 /** File open flags for consistency checking. */
218 unsigned fOpen;
219 /** Handle for sync/async file abstraction.*/
220 PVDIOSTORAGE pStorage;
221 /** Reference counter. */
222 unsigned uReferences;
223 /** Flag whether the file should be deleted on last close. */
224 bool fDelete;
225 /** Pointer to the image we belong to (for debugging purposes). */
226 PVMDKIMAGE pImage;
227 /** Pointer to next file descriptor. */
228 struct VMDKFILE *pNext;
229 /** Pointer to the previous file descriptor. */
230 struct VMDKFILE *pPrev;
231} VMDKFILE, *PVMDKFILE;
232
233/**
234 * VMDK extent data structure.
235 */
236typedef struct VMDKEXTENT
237{
238 /** File handle. */
239 PVMDKFILE pFile;
240 /** Base name of the image extent. */
241 const char *pszBasename;
242 /** Full name of the image extent. */
243 const char *pszFullname;
244 /** Number of sectors in this extent. */
245 uint64_t cSectors;
246 /** Number of sectors per block (grain in VMDK speak). */
247 uint64_t cSectorsPerGrain;
248 /** Starting sector number of descriptor. */
249 uint64_t uDescriptorSector;
250 /** Size of descriptor in sectors. */
251 uint64_t cDescriptorSectors;
252 /** Starting sector number of grain directory. */
253 uint64_t uSectorGD;
254 /** Starting sector number of redundant grain directory. */
255 uint64_t uSectorRGD;
256 /** Total number of metadata sectors. */
257 uint64_t cOverheadSectors;
258 /** Nominal size (i.e. as described by the descriptor) of this extent. */
259 uint64_t cNominalSectors;
260 /** Sector offset (i.e. as described by the descriptor) of this extent. */
261 uint64_t uSectorOffset;
262 /** Number of entries in a grain table. */
263 uint32_t cGTEntries;
264 /** Number of sectors reachable via a grain directory entry. */
265 uint32_t cSectorsPerGDE;
266 /** Number of entries in the grain directory. */
267 uint32_t cGDEntries;
268 /** Pointer to the next free sector. Legacy information. Do not use. */
269 uint32_t uFreeSector;
270 /** Number of this extent in the list of images. */
271 uint32_t uExtent;
272 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
273 char *pDescData;
274 /** Pointer to the grain directory. */
275 uint32_t *pGD;
276 /** Pointer to the redundant grain directory. */
277 uint32_t *pRGD;
278 /** VMDK version of this extent. 1=1.0/1.1 */
279 uint32_t uVersion;
280 /** Type of this extent. */
281 VMDKETYPE enmType;
282 /** Access to this extent. */
283 VMDKACCESS enmAccess;
284 /** Flag whether this extent is marked as unclean. */
285 bool fUncleanShutdown;
286 /** Flag whether the metadata in the extent header needs to be updated. */
287 bool fMetaDirty;
288 /** Flag whether there is a footer in this extent. */
289 bool fFooter;
290 /** Compression type for this extent. */
291 uint16_t uCompression;
292 /** Append position for writing new grain. Only for sparse extents. */
293 uint64_t uAppendPosition;
294 /** Last grain which was accessed. Only for streamOptimized extents. */
295 uint32_t uLastGrainAccess;
296 /** Starting sector corresponding to the grain buffer. */
297 uint32_t uGrainSectorAbs;
298 /** Grain number corresponding to the grain buffer. */
299 uint32_t uGrain;
300 /** Actual size of the compressed data, only valid for reading. */
301 uint32_t cbGrainStreamRead;
302 /** Size of compressed grain buffer for streamOptimized extents. */
303 size_t cbCompGrain;
304 /** Compressed grain buffer for streamOptimized extents, with marker. */
305 void *pvCompGrain;
306 /** Decompressed grain buffer for streamOptimized extents. */
307 void *pvGrain;
308 /** Reference to the image in which this extent is used. Do not use this
309 * on a regular basis to avoid passing pImage references to functions
310 * explicitly. */
311 struct VMDKIMAGE *pImage;
312} VMDKEXTENT, *PVMDKEXTENT;
313
314/**
315 * Grain table cache size. Allocated per image.
316 */
317#define VMDK_GT_CACHE_SIZE 256
318
319/**
320 * Grain table block size. Smaller than an actual grain table block to allow
321 * more grain table blocks to be cached without having to allocate excessive
322 * amounts of memory for the cache.
323 */
324#define VMDK_GT_CACHELINE_SIZE 128
325
326
327/**
328 * Maximum number of lines in a descriptor file. Not worth the effort of
329 * making it variable. Descriptor files are generally very short (~20 lines),
330 * with the exception of sparse files split in 2G chunks, which need for the
331 * maximum size (almost 2T) exactly 1025 lines for the disk database.
332 */
333#define VMDK_DESCRIPTOR_LINES_MAX 1100U
334
335/**
336 * Parsed descriptor information. Allows easy access and update of the
337 * descriptor (whether separate file or not). Free form text files suck.
338 */
339typedef struct VMDKDESCRIPTOR
340{
341 /** Line number of first entry of the disk descriptor. */
342 unsigned uFirstDesc;
343 /** Line number of first entry in the extent description. */
344 unsigned uFirstExtent;
345 /** Line number of first disk database entry. */
346 unsigned uFirstDDB;
347 /** Total number of lines. */
348 unsigned cLines;
349 /** Total amount of memory available for the descriptor. */
350 size_t cbDescAlloc;
351 /** Set if descriptor has been changed and not yet written to disk. */
352 bool fDirty;
353 /** Array of pointers to the data in the descriptor. */
354 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
355 /** Array of line indices pointing to the next non-comment line. */
356 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
357} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
358
359
360/**
361 * Cache entry for translating extent/sector to a sector number in that
362 * extent.
363 */
364typedef struct VMDKGTCACHEENTRY
365{
366 /** Extent number for which this entry is valid. */
367 uint32_t uExtent;
368 /** GT data block number. */
369 uint64_t uGTBlock;
370 /** Data part of the cache entry. */
371 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
372} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
373
374/**
375 * Cache data structure for blocks of grain table entries. For now this is a
376 * fixed size direct mapping cache, but this should be adapted to the size of
377 * the sparse image and maybe converted to a set-associative cache. The
378 * implementation below implements a write-through cache with write allocate.
379 */
380typedef struct VMDKGTCACHE
381{
382 /** Cache entries. */
383 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
384 /** Number of cache entries (currently unused). */
385 unsigned cEntries;
386} VMDKGTCACHE, *PVMDKGTCACHE;
387
388/**
389 * Complete VMDK image data structure. Mainly a collection of extents and a few
390 * extra global data fields.
391 */
392typedef struct VMDKIMAGE
393{
394 /** Image name. */
395 const char *pszFilename;
396 /** Descriptor file if applicable. */
397 PVMDKFILE pFile;
398
399 /** Pointer to the per-disk VD interface list. */
400 PVDINTERFACE pVDIfsDisk;
401 /** Pointer to the per-image VD interface list. */
402 PVDINTERFACE pVDIfsImage;
403
404 /** Error interface. */
405 PVDINTERFACEERROR pIfError;
406 /** I/O interface. */
407 PVDINTERFACEIOINT pIfIo;
408
409
410 /** Pointer to the image extents. */
411 PVMDKEXTENT pExtents;
412 /** Number of image extents. */
413 unsigned cExtents;
414 /** Pointer to the files list, for opening a file referenced multiple
415 * times only once (happens mainly with raw partition access). */
416 PVMDKFILE pFiles;
417
418 /**
419 * Pointer to an array of segment entries for async I/O.
420 * This is an optimization because the task number to submit is not known
421 * and allocating/freeing an array in the read/write functions every time
422 * is too expensive.
423 */
424 PPDMDATASEG paSegments;
425 /** Entries available in the segments array. */
426 unsigned cSegments;
427
428 /** Open flags passed by VBoxHD layer. */
429 unsigned uOpenFlags;
430 /** Image flags defined during creation or determined during open. */
431 unsigned uImageFlags;
432 /** Total size of the image. */
433 uint64_t cbSize;
434 /** Physical geometry of this image. */
435 VDGEOMETRY PCHSGeometry;
436 /** Logical geometry of this image. */
437 VDGEOMETRY LCHSGeometry;
438 /** Image UUID. */
439 RTUUID ImageUuid;
440 /** Image modification UUID. */
441 RTUUID ModificationUuid;
442 /** Parent image UUID. */
443 RTUUID ParentUuid;
444 /** Parent image modification UUID. */
445 RTUUID ParentModificationUuid;
446
447 /** Pointer to grain table cache, if this image contains sparse extents. */
448 PVMDKGTCACHE pGTCache;
449 /** Pointer to the descriptor (NULL if no separate descriptor file). */
450 char *pDescData;
451 /** Allocation size of the descriptor file. */
452 size_t cbDescAlloc;
453 /** Parsed descriptor file content. */
454 VMDKDESCRIPTOR Descriptor;
455 /** The static region list. */
456 VDREGIONLIST RegionList;
457} VMDKIMAGE;
458
459
460/** State for the input/output callout of the inflate reader/deflate writer. */
461typedef struct VMDKCOMPRESSIO
462{
463 /* Image this operation relates to. */
464 PVMDKIMAGE pImage;
465 /* Current read position. */
466 ssize_t iOffset;
467 /* Size of the compressed grain buffer (available data). */
468 size_t cbCompGrain;
469 /* Pointer to the compressed grain buffer. */
470 void *pvCompGrain;
471} VMDKCOMPRESSIO;
472
473
474/** Tracks async grain allocation. */
475typedef struct VMDKGRAINALLOCASYNC
476{
477 /** Flag whether the allocation failed. */
478 bool fIoErr;
479 /** Current number of transfers pending.
480 * If reached 0 and there is an error the old state is restored. */
481 unsigned cIoXfersPending;
482 /** Sector number */
483 uint64_t uSector;
484 /** Flag whether the grain table needs to be updated. */
485 bool fGTUpdateNeeded;
486 /** Extent the allocation happens. */
487 PVMDKEXTENT pExtent;
488 /** Position of the new grain, required for the grain table update. */
489 uint64_t uGrainOffset;
490 /** Grain table sector. */
491 uint64_t uGTSector;
492 /** Backup grain table sector. */
493 uint64_t uRGTSector;
494} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
495
496/**
497 * State information for vmdkRename() and helpers.
498 */
499typedef struct VMDKRENAMESTATE
500{
501 /** Array of old filenames. */
502 char **apszOldName;
503 /** Array of new filenames. */
504 char **apszNewName;
505 /** Array of new lines in the extent descriptor. */
506 char **apszNewLines;
507 /** Name of the old descriptor file if not a sparse image. */
508 char *pszOldDescName;
509 /** Flag whether we called vmdkFreeImage(). */
510 bool fImageFreed;
511 /** Flag whther the descriptor is embedded in the image (sparse) or
512 * in a separate file. */
513 bool fEmbeddedDesc;
514 /** Number of extents in the image. */
515 unsigned cExtents;
516 /** New base filename. */
517 char *pszNewBaseName;
518 /** The old base filename. */
519 char *pszOldBaseName;
520 /** New full filename. */
521 char *pszNewFullName;
522 /** Old full filename. */
523 char *pszOldFullName;
524 /** The old image name. */
525 const char *pszOldImageName;
526 /** Copy of the original VMDK descriptor. */
527 VMDKDESCRIPTOR DescriptorCopy;
528 /** Copy of the extent state for sparse images. */
529 VMDKEXTENT ExtentCopy;
530} VMDKRENAMESTATE;
531/** Pointer to a VMDK rename state. */
532typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
533
534
535/*********************************************************************************************************************************
536* Static Variables *
537*********************************************************************************************************************************/
538
539/** NULL-terminated array of supported file extensions. */
540static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
541{
542 {"vmdk", VDTYPE_HDD},
543 {NULL, VDTYPE_INVALID}
544};
545
546/** NULL-terminated array of configuration option. */
547static const VDCONFIGINFO s_aVmdkConfigInfo[] =
548{
549 /* Options for VMDK raw disks */
550 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
551 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
552 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
553 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
554
555 /* End of options list */
556 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
557};
558
559
560/*********************************************************************************************************************************
561* Internal Functions *
562*********************************************************************************************************************************/
563
564static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
565static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
566 bool fDelete);
567
568static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
569static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
570static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
571static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
572
573static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
574 void *pvUser, int rcReq);
575
576/**
577 * Internal: open a file (using a file descriptor cache to ensure each file
578 * is only opened once - anything else can cause locking problems).
579 */
580static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
581 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
582{
583 int rc = VINF_SUCCESS;
584 PVMDKFILE pVmdkFile;
585
586 for (pVmdkFile = pImage->pFiles;
587 pVmdkFile != NULL;
588 pVmdkFile = pVmdkFile->pNext)
589 {
590 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
591 {
592 Assert(fOpen == pVmdkFile->fOpen);
593 pVmdkFile->uReferences++;
594
595 *ppVmdkFile = pVmdkFile;
596
597 return rc;
598 }
599 }
600
601 /* If we get here, there's no matching entry in the cache. */
602 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
603 if (!pVmdkFile)
604 {
605 *ppVmdkFile = NULL;
606 return VERR_NO_MEMORY;
607 }
608
609 pVmdkFile->pszFilename = RTStrDup(pszFilename);
610 if (!pVmdkFile->pszFilename)
611 {
612 RTMemFree(pVmdkFile);
613 *ppVmdkFile = NULL;
614 return VERR_NO_MEMORY;
615 }
616
617 if (pszBasename)
618 {
619 pVmdkFile->pszBasename = RTStrDup(pszBasename);
620 if (!pVmdkFile->pszBasename)
621 {
622 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
623 RTMemFree(pVmdkFile);
624 *ppVmdkFile = NULL;
625 return VERR_NO_MEMORY;
626 }
627 }
628
629 pVmdkFile->fOpen = fOpen;
630
631 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
632 &pVmdkFile->pStorage);
633 if (RT_SUCCESS(rc))
634 {
635 pVmdkFile->uReferences = 1;
636 pVmdkFile->pImage = pImage;
637 pVmdkFile->pNext = pImage->pFiles;
638 if (pImage->pFiles)
639 pImage->pFiles->pPrev = pVmdkFile;
640 pImage->pFiles = pVmdkFile;
641 *ppVmdkFile = pVmdkFile;
642 }
643 else
644 {
645 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
646 RTMemFree(pVmdkFile);
647 *ppVmdkFile = NULL;
648 }
649
650 return rc;
651}
652
653/**
654 * Internal: close a file, updating the file descriptor cache.
655 */
656static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
657{
658 int rc = VINF_SUCCESS;
659 PVMDKFILE pVmdkFile = *ppVmdkFile;
660
661 AssertPtr(pVmdkFile);
662
663 pVmdkFile->fDelete |= fDelete;
664 Assert(pVmdkFile->uReferences);
665 pVmdkFile->uReferences--;
666 if (pVmdkFile->uReferences == 0)
667 {
668 PVMDKFILE pPrev;
669 PVMDKFILE pNext;
670
671 /* Unchain the element from the list. */
672 pPrev = pVmdkFile->pPrev;
673 pNext = pVmdkFile->pNext;
674
675 if (pNext)
676 pNext->pPrev = pPrev;
677 if (pPrev)
678 pPrev->pNext = pNext;
679 else
680 pImage->pFiles = pNext;
681
682 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
683
684 bool fFileDel = pVmdkFile->fDelete;
685 if ( pVmdkFile->pszBasename
686 && fFileDel)
687 {
688 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
689 if ( RTPathHasPath(pVmdkFile->pszBasename)
690 || !pszSuffix
691 || ( strcmp(pszSuffix, ".vmdk")
692 && strcmp(pszSuffix, ".bin")
693 && strcmp(pszSuffix, ".img")))
694 fFileDel = false;
695 }
696
697 if (fFileDel)
698 {
699 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
700 if (RT_SUCCESS(rc))
701 rc = rc2;
702 }
703 else if (pVmdkFile->fDelete)
704 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
705 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
706 if (pVmdkFile->pszBasename)
707 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
708 RTMemFree(pVmdkFile);
709 }
710
711 *ppVmdkFile = NULL;
712 return rc;
713}
714
715/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
716#ifndef VMDK_USE_BLOCK_DECOMP_API
717static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
718{
719 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
720 size_t cbInjected = 0;
721
722 Assert(cbBuf);
723 if (pInflateState->iOffset < 0)
724 {
725 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
726 pvBuf = (uint8_t *)pvBuf + 1;
727 cbBuf--;
728 cbInjected = 1;
729 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
730 }
731 if (!cbBuf)
732 {
733 if (pcbBuf)
734 *pcbBuf = cbInjected;
735 return VINF_SUCCESS;
736 }
737 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
738 memcpy(pvBuf,
739 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
740 cbBuf);
741 pInflateState->iOffset += cbBuf;
742 Assert(pcbBuf);
743 *pcbBuf = cbBuf + cbInjected;
744 return VINF_SUCCESS;
745}
746#endif
747
748/**
749 * Internal: read from a file and inflate the compressed data,
750 * distinguishing between async and normal operation
751 */
752DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
753 uint64_t uOffset, void *pvBuf,
754 size_t cbToRead, const void *pcvMarker,
755 uint64_t *puLBA, uint32_t *pcbMarkerData)
756{
757 int rc;
758#ifndef VMDK_USE_BLOCK_DECOMP_API
759 PRTZIPDECOMP pZip = NULL;
760#endif
761 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
762 size_t cbCompSize, cbActuallyRead;
763
764 if (!pcvMarker)
765 {
766 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
767 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
768 if (RT_FAILURE(rc))
769 return rc;
770 }
771 else
772 {
773 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
774 /* pcvMarker endianness has already been partially transformed, fix it */
775 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
776 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
777 }
778
779 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
780 if (cbCompSize == 0)
781 {
782 AssertMsgFailed(("VMDK: corrupted marker\n"));
783 return VERR_VD_VMDK_INVALID_FORMAT;
784 }
785
786 /* Sanity check - the expansion ratio should be much less than 2. */
787 Assert(cbCompSize < 2 * cbToRead);
788 if (cbCompSize >= 2 * cbToRead)
789 return VERR_VD_VMDK_INVALID_FORMAT;
790
791 /* Compressed grain marker. Data follows immediately. */
792 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
793 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
794 (uint8_t *)pExtent->pvCompGrain
795 + RT_UOFFSETOF(VMDKMARKER, uType),
796 RT_ALIGN_Z( cbCompSize
797 + RT_UOFFSETOF(VMDKMARKER, uType),
798 512)
799 - RT_UOFFSETOF(VMDKMARKER, uType));
800
801 if (puLBA)
802 *puLBA = RT_LE2H_U64(pMarker->uSector);
803 if (pcbMarkerData)
804 *pcbMarkerData = RT_ALIGN( cbCompSize
805 + RT_UOFFSETOF(VMDKMARKER, uType),
806 512);
807
808#ifdef VMDK_USE_BLOCK_DECOMP_API
809 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
810 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
811 pvBuf, cbToRead, &cbActuallyRead);
812#else
813 VMDKCOMPRESSIO InflateState;
814 InflateState.pImage = pImage;
815 InflateState.iOffset = -1;
816 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
817 InflateState.pvCompGrain = pExtent->pvCompGrain;
818
819 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
820 if (RT_FAILURE(rc))
821 return rc;
822 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
823 RTZipDecompDestroy(pZip);
824#endif /* !VMDK_USE_BLOCK_DECOMP_API */
825 if (RT_FAILURE(rc))
826 {
827 if (rc == VERR_ZIP_CORRUPTED)
828 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
829 return rc;
830 }
831 if (cbActuallyRead != cbToRead)
832 rc = VERR_VD_VMDK_INVALID_FORMAT;
833 return rc;
834}
835
836static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
837{
838 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
839
840 Assert(cbBuf);
841 if (pDeflateState->iOffset < 0)
842 {
843 pvBuf = (const uint8_t *)pvBuf + 1;
844 cbBuf--;
845 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
846 }
847 if (!cbBuf)
848 return VINF_SUCCESS;
849 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
850 return VERR_BUFFER_OVERFLOW;
851 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
852 pvBuf, cbBuf);
853 pDeflateState->iOffset += cbBuf;
854 return VINF_SUCCESS;
855}
856
857/**
858 * Internal: deflate the uncompressed data and write to a file,
859 * distinguishing between async and normal operation
860 */
861DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
862 uint64_t uOffset, const void *pvBuf,
863 size_t cbToWrite, uint64_t uLBA,
864 uint32_t *pcbMarkerData)
865{
866 int rc;
867 PRTZIPCOMP pZip = NULL;
868 VMDKCOMPRESSIO DeflateState;
869
870 DeflateState.pImage = pImage;
871 DeflateState.iOffset = -1;
872 DeflateState.cbCompGrain = pExtent->cbCompGrain;
873 DeflateState.pvCompGrain = pExtent->pvCompGrain;
874
875 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
876 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
877 if (RT_FAILURE(rc))
878 return rc;
879 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
880 if (RT_SUCCESS(rc))
881 rc = RTZipCompFinish(pZip);
882 RTZipCompDestroy(pZip);
883 if (RT_SUCCESS(rc))
884 {
885 Assert( DeflateState.iOffset > 0
886 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
887
888 /* pad with zeroes to get to a full sector size */
889 uint32_t uSize = DeflateState.iOffset;
890 if (uSize % 512)
891 {
892 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
893 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
894 uSizeAlign - uSize);
895 uSize = uSizeAlign;
896 }
897
898 if (pcbMarkerData)
899 *pcbMarkerData = uSize;
900
901 /* Compressed grain marker. Data follows immediately. */
902 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
903 pMarker->uSector = RT_H2LE_U64(uLBA);
904 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
905 - RT_UOFFSETOF(VMDKMARKER, uType));
906 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
907 uOffset, pMarker, uSize);
908 if (RT_FAILURE(rc))
909 return rc;
910 }
911 return rc;
912}
913
914
915/**
916 * Internal: check if all files are closed, prevent leaking resources.
917 */
918static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
919{
920 int rc = VINF_SUCCESS, rc2;
921 PVMDKFILE pVmdkFile;
922
923 Assert(pImage->pFiles == NULL);
924 for (pVmdkFile = pImage->pFiles;
925 pVmdkFile != NULL;
926 pVmdkFile = pVmdkFile->pNext)
927 {
928 LogRel(("VMDK: leaking reference to file \"%s\"\n",
929 pVmdkFile->pszFilename));
930 pImage->pFiles = pVmdkFile->pNext;
931
932 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
933
934 if (RT_SUCCESS(rc))
935 rc = rc2;
936 }
937 return rc;
938}
939
940/**
941 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
942 * critical non-ASCII characters.
943 */
944static char *vmdkEncodeString(const char *psz)
945{
946 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
947 char *pszDst = szEnc;
948
949 AssertPtr(psz);
950
951 for (; *psz; psz = RTStrNextCp(psz))
952 {
953 char *pszDstPrev = pszDst;
954 RTUNICP Cp = RTStrGetCp(psz);
955 if (Cp == '\\')
956 {
957 pszDst = RTStrPutCp(pszDst, Cp);
958 pszDst = RTStrPutCp(pszDst, Cp);
959 }
960 else if (Cp == '\n')
961 {
962 pszDst = RTStrPutCp(pszDst, '\\');
963 pszDst = RTStrPutCp(pszDst, 'n');
964 }
965 else if (Cp == '\r')
966 {
967 pszDst = RTStrPutCp(pszDst, '\\');
968 pszDst = RTStrPutCp(pszDst, 'r');
969 }
970 else
971 pszDst = RTStrPutCp(pszDst, Cp);
972 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
973 {
974 pszDst = pszDstPrev;
975 break;
976 }
977 }
978 *pszDst = '\0';
979 return RTStrDup(szEnc);
980}
981
982/**
983 * Internal: decode a string and store it into the specified string.
984 */
985static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
986{
987 int rc = VINF_SUCCESS;
988 char szBuf[4];
989
990 if (!cb)
991 return VERR_BUFFER_OVERFLOW;
992
993 AssertPtr(psz);
994
995 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
996 {
997 char *pszDst = szBuf;
998 RTUNICP Cp = RTStrGetCp(pszEncoded);
999 if (Cp == '\\')
1000 {
1001 pszEncoded = RTStrNextCp(pszEncoded);
1002 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1003 if (CpQ == 'n')
1004 RTStrPutCp(pszDst, '\n');
1005 else if (CpQ == 'r')
1006 RTStrPutCp(pszDst, '\r');
1007 else if (CpQ == '\0')
1008 {
1009 rc = VERR_VD_VMDK_INVALID_HEADER;
1010 break;
1011 }
1012 else
1013 RTStrPutCp(pszDst, CpQ);
1014 }
1015 else
1016 pszDst = RTStrPutCp(pszDst, Cp);
1017
1018 /* Need to leave space for terminating NUL. */
1019 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1020 {
1021 rc = VERR_BUFFER_OVERFLOW;
1022 break;
1023 }
1024 memcpy(psz, szBuf, pszDst - szBuf);
1025 psz += pszDst - szBuf;
1026 }
1027 *psz = '\0';
1028 return rc;
1029}
1030
1031/**
1032 * Internal: free all buffers associated with grain directories.
1033 */
1034static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1035{
1036 if (pExtent->pGD)
1037 {
1038 RTMemFree(pExtent->pGD);
1039 pExtent->pGD = NULL;
1040 }
1041 if (pExtent->pRGD)
1042 {
1043 RTMemFree(pExtent->pRGD);
1044 pExtent->pRGD = NULL;
1045 }
1046}
1047
1048/**
1049 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1050 * images.
1051 */
1052static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1053{
1054 int rc = VINF_SUCCESS;
1055
1056 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1057 {
1058 /* streamOptimized extents need a compressed grain buffer, which must
1059 * be big enough to hold uncompressible data (which needs ~8 bytes
1060 * more than the uncompressed data), the marker and padding. */
1061 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1062 + 8 + sizeof(VMDKMARKER), 512);
1063 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1064 if (RT_LIKELY(pExtent->pvCompGrain))
1065 {
1066 /* streamOptimized extents need a decompressed grain buffer. */
1067 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1068 if (!pExtent->pvGrain)
1069 rc = VERR_NO_MEMORY;
1070 }
1071 else
1072 rc = VERR_NO_MEMORY;
1073 }
1074
1075 if (RT_FAILURE(rc))
1076 vmdkFreeStreamBuffers(pExtent);
1077 return rc;
1078}
1079
1080/**
1081 * Internal: allocate all buffers associated with grain directories.
1082 */
1083static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1084{
1085 RT_NOREF1(pImage);
1086 int rc = VINF_SUCCESS;
1087 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1088
1089 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1090 if (RT_LIKELY(pExtent->pGD))
1091 {
1092 if (pExtent->uSectorRGD)
1093 {
1094 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1095 if (RT_UNLIKELY(!pExtent->pRGD))
1096 rc = VERR_NO_MEMORY;
1097 }
1098 }
1099 else
1100 rc = VERR_NO_MEMORY;
1101
1102 if (RT_FAILURE(rc))
1103 vmdkFreeGrainDirectory(pExtent);
1104 return rc;
1105}
1106
1107/**
1108 * Converts the grain directory from little to host endianess.
1109 *
1110 * @returns nothing.
1111 * @param pGD The grain directory.
1112 * @param cGDEntries Number of entries in the grain directory to convert.
1113 */
1114DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1115{
1116 uint32_t *pGDTmp = pGD;
1117
1118 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1119 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1120}
1121
1122/**
1123 * Read the grain directory and allocated grain tables verifying them against
1124 * their back up copies if available.
1125 *
1126 * @returns VBox status code.
1127 * @param pImage Image instance data.
1128 * @param pExtent The VMDK extent.
1129 */
1130static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1131{
1132 int rc = VINF_SUCCESS;
1133 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1134
1135 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1136 && pExtent->uSectorGD != VMDK_GD_AT_END
1137 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1138
1139 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1140 if (RT_SUCCESS(rc))
1141 {
1142 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1143 * but in reality they are not compressed. */
1144 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1145 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1146 pExtent->pGD, cbGD);
1147 if (RT_SUCCESS(rc))
1148 {
1149 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1150
1151 if ( pExtent->uSectorRGD
1152 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1153 {
1154 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1155 * but in reality they are not compressed. */
1156 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1157 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1158 pExtent->pRGD, cbGD);
1159 if (RT_SUCCESS(rc))
1160 {
1161 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1162
1163 /* Check grain table and redundant grain table for consistency. */
1164 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1165 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1166 size_t cbGTBuffersMax = _1M;
1167
1168 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1169 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1170
1171 if ( !pTmpGT1
1172 || !pTmpGT2)
1173 rc = VERR_NO_MEMORY;
1174
1175 size_t i = 0;
1176 uint32_t *pGDTmp = pExtent->pGD;
1177 uint32_t *pRGDTmp = pExtent->pRGD;
1178
1179 /* Loop through all entries. */
1180 while (i < pExtent->cGDEntries)
1181 {
1182 uint32_t uGTStart = *pGDTmp;
1183 uint32_t uRGTStart = *pRGDTmp;
1184 size_t cbGTRead = cbGT;
1185
1186 /* If no grain table is allocated skip the entry. */
1187 if (*pGDTmp == 0 && *pRGDTmp == 0)
1188 {
1189 i++;
1190 continue;
1191 }
1192
1193 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1194 {
1195 /* Just one grain directory entry refers to a not yet allocated
1196 * grain table or both grain directory copies refer to the same
1197 * grain table. Not allowed. */
1198 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1199 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1200 break;
1201 }
1202
1203 i++;
1204 pGDTmp++;
1205 pRGDTmp++;
1206
1207 /*
1208 * Read a few tables at once if adjacent to decrease the number
1209 * of I/O requests. Read at maximum 1MB at once.
1210 */
1211 while ( i < pExtent->cGDEntries
1212 && cbGTRead < cbGTBuffersMax)
1213 {
1214 /* If no grain table is allocated skip the entry. */
1215 if (*pGDTmp == 0 && *pRGDTmp == 0)
1216 {
1217 i++;
1218 continue;
1219 }
1220
1221 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1222 {
1223 /* Just one grain directory entry refers to a not yet allocated
1224 * grain table or both grain directory copies refer to the same
1225 * grain table. Not allowed. */
1226 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1227 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1228 break;
1229 }
1230
1231 /* Check that the start offsets are adjacent.*/
1232 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1233 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1234 break;
1235
1236 i++;
1237 pGDTmp++;
1238 pRGDTmp++;
1239 cbGTRead += cbGT;
1240 }
1241
1242 /* Increase buffers if required. */
1243 if ( RT_SUCCESS(rc)
1244 && cbGTBuffers < cbGTRead)
1245 {
1246 uint32_t *pTmp;
1247 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1248 if (pTmp)
1249 {
1250 pTmpGT1 = pTmp;
1251 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1252 if (pTmp)
1253 pTmpGT2 = pTmp;
1254 else
1255 rc = VERR_NO_MEMORY;
1256 }
1257 else
1258 rc = VERR_NO_MEMORY;
1259
1260 if (rc == VERR_NO_MEMORY)
1261 {
1262 /* Reset to the old values. */
1263 rc = VINF_SUCCESS;
1264 i -= cbGTRead / cbGT;
1265 cbGTRead = cbGT;
1266
1267 /* Don't try to increase the buffer again in the next run. */
1268 cbGTBuffersMax = cbGTBuffers;
1269 }
1270 }
1271
1272 if (RT_SUCCESS(rc))
1273 {
1274 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1275 * but in reality they are not compressed. */
1276 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1277 VMDK_SECTOR2BYTE(uGTStart),
1278 pTmpGT1, cbGTRead);
1279 if (RT_FAILURE(rc))
1280 {
1281 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1282 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1283 break;
1284 }
1285 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1286 * but in reality they are not compressed. */
1287 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1288 VMDK_SECTOR2BYTE(uRGTStart),
1289 pTmpGT2, cbGTRead);
1290 if (RT_FAILURE(rc))
1291 {
1292 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1293 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1294 break;
1295 }
1296 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1297 {
1298 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1299 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1300 break;
1301 }
1302 }
1303 } /* while (i < pExtent->cGDEntries) */
1304
1305 /** @todo figure out what to do for unclean VMDKs. */
1306 if (pTmpGT1)
1307 RTMemFree(pTmpGT1);
1308 if (pTmpGT2)
1309 RTMemFree(pTmpGT2);
1310 }
1311 else
1312 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1313 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1314 }
1315 }
1316 else
1317 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1318 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1319 }
1320
1321 if (RT_FAILURE(rc))
1322 vmdkFreeGrainDirectory(pExtent);
1323 return rc;
1324}
1325
1326/**
1327 * Creates a new grain directory for the given extent at the given start sector.
1328 *
1329 * @returns VBox status code.
1330 * @param pImage Image instance data.
1331 * @param pExtent The VMDK extent.
1332 * @param uStartSector Where the grain directory should be stored in the image.
1333 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1334 */
1335static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1336 uint64_t uStartSector, bool fPreAlloc)
1337{
1338 int rc = VINF_SUCCESS;
1339 unsigned i;
1340 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1341 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1342 size_t cbGTRounded;
1343 uint64_t cbOverhead;
1344
1345 if (fPreAlloc)
1346 {
1347 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1348 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1349 }
1350 else
1351 {
1352 /* Use a dummy start sector for layout computation. */
1353 if (uStartSector == VMDK_GD_AT_END)
1354 uStartSector = 1;
1355 cbGTRounded = 0;
1356 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1357 }
1358
1359 /* For streamOptimized extents there is only one grain directory,
1360 * and for all others take redundant grain directory into account. */
1361 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1362 {
1363 cbOverhead = RT_ALIGN_64(cbOverhead,
1364 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1365 }
1366 else
1367 {
1368 cbOverhead += cbGDRounded + cbGTRounded;
1369 cbOverhead = RT_ALIGN_64(cbOverhead,
1370 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1371 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1372 }
1373
1374 if (RT_SUCCESS(rc))
1375 {
1376 pExtent->uAppendPosition = cbOverhead;
1377 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1378
1379 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1380 {
1381 pExtent->uSectorRGD = 0;
1382 pExtent->uSectorGD = uStartSector;
1383 }
1384 else
1385 {
1386 pExtent->uSectorRGD = uStartSector;
1387 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1388 }
1389
1390 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1391 if (RT_SUCCESS(rc))
1392 {
1393 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1394 if ( RT_SUCCESS(rc)
1395 && fPreAlloc)
1396 {
1397 uint32_t uGTSectorLE;
1398 uint64_t uOffsetSectors;
1399
1400 if (pExtent->pRGD)
1401 {
1402 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1403 for (i = 0; i < pExtent->cGDEntries; i++)
1404 {
1405 pExtent->pRGD[i] = uOffsetSectors;
1406 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1407 /* Write the redundant grain directory entry to disk. */
1408 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1409 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1410 &uGTSectorLE, sizeof(uGTSectorLE));
1411 if (RT_FAILURE(rc))
1412 {
1413 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1414 break;
1415 }
1416 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1417 }
1418 }
1419
1420 if (RT_SUCCESS(rc))
1421 {
1422 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1423 for (i = 0; i < pExtent->cGDEntries; i++)
1424 {
1425 pExtent->pGD[i] = uOffsetSectors;
1426 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1427 /* Write the grain directory entry to disk. */
1428 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1429 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1430 &uGTSectorLE, sizeof(uGTSectorLE));
1431 if (RT_FAILURE(rc))
1432 {
1433 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1434 break;
1435 }
1436 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1437 }
1438 }
1439 }
1440 }
1441 }
1442
1443 if (RT_FAILURE(rc))
1444 vmdkFreeGrainDirectory(pExtent);
1445 return rc;
1446}
1447
1448/**
1449 * Unquotes the given string returning the result in a separate buffer.
1450 *
1451 * @returns VBox status code.
1452 * @param pImage The VMDK image state.
1453 * @param pszStr The string to unquote.
1454 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1455 * free.
1456 * @param ppszNext Where to store the pointer to any character following
1457 * the quoted value, optional.
1458 */
1459static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1460 char **ppszUnquoted, char **ppszNext)
1461{
1462 const char *pszStart = pszStr;
1463 char *pszQ;
1464 char *pszUnquoted;
1465
1466 /* Skip over whitespace. */
1467 while (*pszStr == ' ' || *pszStr == '\t')
1468 pszStr++;
1469
1470 if (*pszStr != '"')
1471 {
1472 pszQ = (char *)pszStr;
1473 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1474 pszQ++;
1475 }
1476 else
1477 {
1478 pszStr++;
1479 pszQ = (char *)strchr(pszStr, '"');
1480 if (pszQ == NULL)
1481 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1482 pImage->pszFilename, pszStart);
1483 }
1484
1485 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1486 if (!pszUnquoted)
1487 return VERR_NO_MEMORY;
1488 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1489 pszUnquoted[pszQ - pszStr] = '\0';
1490 *ppszUnquoted = pszUnquoted;
1491 if (ppszNext)
1492 *ppszNext = pszQ + 1;
1493 return VINF_SUCCESS;
1494}
1495
1496static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1497 const char *pszLine)
1498{
1499 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1500 ssize_t cbDiff = strlen(pszLine) + 1;
1501
1502 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1503 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1504 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1505
1506 memcpy(pEnd, pszLine, cbDiff);
1507 pDescriptor->cLines++;
1508 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1509 pDescriptor->fDirty = true;
1510
1511 return VINF_SUCCESS;
1512}
1513
1514static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1515 const char *pszKey, const char **ppszValue)
1516{
1517 size_t cbKey = strlen(pszKey);
1518 const char *pszValue;
1519
1520 while (uStart != 0)
1521 {
1522 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1523 {
1524 /* Key matches, check for a '=' (preceded by whitespace). */
1525 pszValue = pDescriptor->aLines[uStart] + cbKey;
1526 while (*pszValue == ' ' || *pszValue == '\t')
1527 pszValue++;
1528 if (*pszValue == '=')
1529 {
1530 *ppszValue = pszValue + 1;
1531 break;
1532 }
1533 }
1534 uStart = pDescriptor->aNextLines[uStart];
1535 }
1536 return !!uStart;
1537}
1538
1539static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1540 unsigned uStart,
1541 const char *pszKey, const char *pszValue)
1542{
1543 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1544 size_t cbKey = strlen(pszKey);
1545 unsigned uLast = 0;
1546
1547 while (uStart != 0)
1548 {
1549 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1550 {
1551 /* Key matches, check for a '=' (preceded by whitespace). */
1552 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1553 while (*pszTmp == ' ' || *pszTmp == '\t')
1554 pszTmp++;
1555 if (*pszTmp == '=')
1556 {
1557 pszTmp++;
1558 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1559 * bloat and potentially out of space error? */
1560 while (*pszTmp == ' ' || *pszTmp == '\t')
1561 pszTmp++;
1562 break;
1563 }
1564 }
1565 if (!pDescriptor->aNextLines[uStart])
1566 uLast = uStart;
1567 uStart = pDescriptor->aNextLines[uStart];
1568 }
1569 if (uStart)
1570 {
1571 if (pszValue)
1572 {
1573 /* Key already exists, replace existing value. */
1574 size_t cbOldVal = strlen(pszTmp);
1575 size_t cbNewVal = strlen(pszValue);
1576 ssize_t cbDiff = cbNewVal - cbOldVal;
1577 /* Check for buffer overflow. */
1578 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1579 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1580 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1581
1582 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1583 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1584 memcpy(pszTmp, pszValue, cbNewVal + 1);
1585 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1586 pDescriptor->aLines[i] += cbDiff;
1587 }
1588 else
1589 {
1590 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1591 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1592 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1593 {
1594 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1595 if (pDescriptor->aNextLines[i])
1596 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1597 else
1598 pDescriptor->aNextLines[i-1] = 0;
1599 }
1600 pDescriptor->cLines--;
1601 /* Adjust starting line numbers of following descriptor sections. */
1602 if (uStart < pDescriptor->uFirstExtent)
1603 pDescriptor->uFirstExtent--;
1604 if (uStart < pDescriptor->uFirstDDB)
1605 pDescriptor->uFirstDDB--;
1606 }
1607 }
1608 else
1609 {
1610 /* Key doesn't exist, append after the last entry in this category. */
1611 if (!pszValue)
1612 {
1613 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1614 return VINF_SUCCESS;
1615 }
1616 cbKey = strlen(pszKey);
1617 size_t cbValue = strlen(pszValue);
1618 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1619 /* Check for buffer overflow. */
1620 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1621 || ( pDescriptor->aLines[pDescriptor->cLines]
1622 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1623 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1624 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1625 {
1626 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1627 if (pDescriptor->aNextLines[i - 1])
1628 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1629 else
1630 pDescriptor->aNextLines[i] = 0;
1631 }
1632 uStart = uLast + 1;
1633 pDescriptor->aNextLines[uLast] = uStart;
1634 pDescriptor->aNextLines[uStart] = 0;
1635 pDescriptor->cLines++;
1636 pszTmp = pDescriptor->aLines[uStart];
1637 memmove(pszTmp + cbDiff, pszTmp,
1638 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1639 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1640 pDescriptor->aLines[uStart][cbKey] = '=';
1641 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1642 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1643 pDescriptor->aLines[i] += cbDiff;
1644
1645 /* Adjust starting line numbers of following descriptor sections. */
1646 if (uStart <= pDescriptor->uFirstExtent)
1647 pDescriptor->uFirstExtent++;
1648 if (uStart <= pDescriptor->uFirstDDB)
1649 pDescriptor->uFirstDDB++;
1650 }
1651 pDescriptor->fDirty = true;
1652 return VINF_SUCCESS;
1653}
1654
1655static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1656 uint32_t *puValue)
1657{
1658 const char *pszValue;
1659
1660 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1661 &pszValue))
1662 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1663 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1664}
1665
1666/**
1667 * Returns the value of the given key as a string allocating the necessary memory.
1668 *
1669 * @returns VBox status code.
1670 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1671 * @param pImage The VMDK image state.
1672 * @param pDescriptor The descriptor to fetch the value from.
1673 * @param pszKey The key to get the value from.
1674 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1675 * free.
1676 */
1677static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1678 const char *pszKey, char **ppszValue)
1679{
1680 const char *pszValue;
1681 char *pszValueUnquoted;
1682
1683 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1684 &pszValue))
1685 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1686 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1687 if (RT_FAILURE(rc))
1688 return rc;
1689 *ppszValue = pszValueUnquoted;
1690 return rc;
1691}
1692
1693static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1694 const char *pszKey, const char *pszValue)
1695{
1696 char *pszValueQuoted;
1697
1698 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1699 if (!pszValueQuoted)
1700 return VERR_NO_STR_MEMORY;
1701 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1702 pszValueQuoted);
1703 RTStrFree(pszValueQuoted);
1704 return rc;
1705}
1706
1707static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1708 PVMDKDESCRIPTOR pDescriptor)
1709{
1710 RT_NOREF1(pImage);
1711 unsigned uEntry = pDescriptor->uFirstExtent;
1712 ssize_t cbDiff;
1713
1714 if (!uEntry)
1715 return;
1716
1717 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1718 /* Move everything including \0 in the entry marking the end of buffer. */
1719 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1720 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1721 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1722 {
1723 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1724 if (pDescriptor->aNextLines[i])
1725 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1726 else
1727 pDescriptor->aNextLines[i - 1] = 0;
1728 }
1729 pDescriptor->cLines--;
1730 if (pDescriptor->uFirstDDB)
1731 pDescriptor->uFirstDDB--;
1732
1733 return;
1734}
1735
1736static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1737 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1738 VMDKETYPE enmType, const char *pszBasename,
1739 uint64_t uSectorOffset)
1740{
1741 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1742 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1743 char *pszTmp;
1744 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1745 char szExt[1024];
1746 ssize_t cbDiff;
1747
1748 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1749 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1750
1751 /* Find last entry in extent description. */
1752 while (uStart)
1753 {
1754 if (!pDescriptor->aNextLines[uStart])
1755 uLast = uStart;
1756 uStart = pDescriptor->aNextLines[uStart];
1757 }
1758
1759 if (enmType == VMDKETYPE_ZERO)
1760 {
1761 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1762 cNominalSectors, apszType[enmType]);
1763 }
1764 else if (enmType == VMDKETYPE_FLAT)
1765 {
1766 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1767 apszAccess[enmAccess], cNominalSectors,
1768 apszType[enmType], pszBasename, uSectorOffset);
1769 }
1770 else
1771 {
1772 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1773 apszAccess[enmAccess], cNominalSectors,
1774 apszType[enmType], pszBasename);
1775 }
1776 cbDiff = strlen(szExt) + 1;
1777
1778 /* Check for buffer overflow. */
1779 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1780 || ( pDescriptor->aLines[pDescriptor->cLines]
1781 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1782 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1783
1784 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1785 {
1786 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1787 if (pDescriptor->aNextLines[i - 1])
1788 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1789 else
1790 pDescriptor->aNextLines[i] = 0;
1791 }
1792 uStart = uLast + 1;
1793 pDescriptor->aNextLines[uLast] = uStart;
1794 pDescriptor->aNextLines[uStart] = 0;
1795 pDescriptor->cLines++;
1796 pszTmp = pDescriptor->aLines[uStart];
1797 memmove(pszTmp + cbDiff, pszTmp,
1798 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1799 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1800 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1801 pDescriptor->aLines[i] += cbDiff;
1802
1803 /* Adjust starting line numbers of following descriptor sections. */
1804 if (uStart <= pDescriptor->uFirstDDB)
1805 pDescriptor->uFirstDDB++;
1806
1807 pDescriptor->fDirty = true;
1808 return VINF_SUCCESS;
1809}
1810
1811/**
1812 * Returns the value of the given key from the DDB as a string allocating
1813 * the necessary memory.
1814 *
1815 * @returns VBox status code.
1816 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1817 * @param pImage The VMDK image state.
1818 * @param pDescriptor The descriptor to fetch the value from.
1819 * @param pszKey The key to get the value from.
1820 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1821 * free.
1822 */
1823static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1824 const char *pszKey, char **ppszValue)
1825{
1826 const char *pszValue;
1827 char *pszValueUnquoted;
1828
1829 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1830 &pszValue))
1831 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1832 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1833 if (RT_FAILURE(rc))
1834 return rc;
1835 *ppszValue = pszValueUnquoted;
1836 return rc;
1837}
1838
1839static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1840 const char *pszKey, uint32_t *puValue)
1841{
1842 const char *pszValue;
1843 char *pszValueUnquoted;
1844
1845 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1846 &pszValue))
1847 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1848 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1849 if (RT_FAILURE(rc))
1850 return rc;
1851 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1852 RTMemTmpFree(pszValueUnquoted);
1853 return rc;
1854}
1855
1856static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1857 const char *pszKey, PRTUUID pUuid)
1858{
1859 const char *pszValue;
1860 char *pszValueUnquoted;
1861
1862 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1863 &pszValue))
1864 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1865 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1866 if (RT_FAILURE(rc))
1867 return rc;
1868 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1869 RTMemTmpFree(pszValueUnquoted);
1870 return rc;
1871}
1872
1873static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1874 const char *pszKey, const char *pszVal)
1875{
1876 int rc;
1877 char *pszValQuoted;
1878
1879 if (pszVal)
1880 {
1881 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1882 if (!pszValQuoted)
1883 return VERR_NO_STR_MEMORY;
1884 }
1885 else
1886 pszValQuoted = NULL;
1887 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1888 pszValQuoted);
1889 if (pszValQuoted)
1890 RTStrFree(pszValQuoted);
1891 return rc;
1892}
1893
1894static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1895 const char *pszKey, PCRTUUID pUuid)
1896{
1897 char *pszUuid;
1898
1899 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1900 if (!pszUuid)
1901 return VERR_NO_STR_MEMORY;
1902 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1903 pszUuid);
1904 RTStrFree(pszUuid);
1905 return rc;
1906}
1907
1908static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1909 const char *pszKey, uint32_t uValue)
1910{
1911 char *pszValue;
1912
1913 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1914 if (!pszValue)
1915 return VERR_NO_STR_MEMORY;
1916 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1917 pszValue);
1918 RTStrFree(pszValue);
1919 return rc;
1920}
1921
1922/**
1923 * Splits the descriptor data into individual lines checking for correct line
1924 * endings and descriptor size.
1925 *
1926 * @returns VBox status code.
1927 * @param pImage The image instance.
1928 * @param pDesc The descriptor.
1929 * @param pszTmp The raw descriptor data from the image.
1930 */
1931static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1932{
1933 unsigned cLine = 0;
1934 int rc = VINF_SUCCESS;
1935
1936 while ( RT_SUCCESS(rc)
1937 && *pszTmp != '\0')
1938 {
1939 pDesc->aLines[cLine++] = pszTmp;
1940 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1941 {
1942 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1943 rc = VERR_VD_VMDK_INVALID_HEADER;
1944 break;
1945 }
1946
1947 while (*pszTmp != '\0' && *pszTmp != '\n')
1948 {
1949 if (*pszTmp == '\r')
1950 {
1951 if (*(pszTmp + 1) != '\n')
1952 {
1953 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1954 break;
1955 }
1956 else
1957 {
1958 /* Get rid of CR character. */
1959 *pszTmp = '\0';
1960 }
1961 }
1962 pszTmp++;
1963 }
1964
1965 if (RT_FAILURE(rc))
1966 break;
1967
1968 /* Get rid of LF character. */
1969 if (*pszTmp == '\n')
1970 {
1971 *pszTmp = '\0';
1972 pszTmp++;
1973 }
1974 }
1975
1976 if (RT_SUCCESS(rc))
1977 {
1978 pDesc->cLines = cLine;
1979 /* Pointer right after the end of the used part of the buffer. */
1980 pDesc->aLines[cLine] = pszTmp;
1981 }
1982
1983 return rc;
1984}
1985
1986static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1987 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1988{
1989 pDescriptor->cbDescAlloc = cbDescData;
1990 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1991 if (RT_SUCCESS(rc))
1992 {
1993 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1994 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1995 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1996 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1997 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1998 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1999 else
2000 {
2001 unsigned uLastNonEmptyLine = 0;
2002
2003 /* Initialize those, because we need to be able to reopen an image. */
2004 pDescriptor->uFirstDesc = 0;
2005 pDescriptor->uFirstExtent = 0;
2006 pDescriptor->uFirstDDB = 0;
2007 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2008 {
2009 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2010 {
2011 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2012 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2013 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2014 {
2015 /* An extent descriptor. */
2016 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2017 {
2018 /* Incorrect ordering of entries. */
2019 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2020 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2021 break;
2022 }
2023 if (!pDescriptor->uFirstExtent)
2024 {
2025 pDescriptor->uFirstExtent = i;
2026 uLastNonEmptyLine = 0;
2027 }
2028 }
2029 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2030 {
2031 /* A disk database entry. */
2032 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2033 {
2034 /* Incorrect ordering of entries. */
2035 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2036 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2037 break;
2038 }
2039 if (!pDescriptor->uFirstDDB)
2040 {
2041 pDescriptor->uFirstDDB = i;
2042 uLastNonEmptyLine = 0;
2043 }
2044 }
2045 else
2046 {
2047 /* A normal entry. */
2048 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2049 {
2050 /* Incorrect ordering of entries. */
2051 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2052 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2053 break;
2054 }
2055 if (!pDescriptor->uFirstDesc)
2056 {
2057 pDescriptor->uFirstDesc = i;
2058 uLastNonEmptyLine = 0;
2059 }
2060 }
2061 if (uLastNonEmptyLine)
2062 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2063 uLastNonEmptyLine = i;
2064 }
2065 }
2066 }
2067 }
2068
2069 return rc;
2070}
2071
2072static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2073 PCVDGEOMETRY pPCHSGeometry)
2074{
2075 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2076 VMDK_DDB_GEO_PCHS_CYLINDERS,
2077 pPCHSGeometry->cCylinders);
2078 if (RT_FAILURE(rc))
2079 return rc;
2080 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2081 VMDK_DDB_GEO_PCHS_HEADS,
2082 pPCHSGeometry->cHeads);
2083 if (RT_FAILURE(rc))
2084 return rc;
2085 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2086 VMDK_DDB_GEO_PCHS_SECTORS,
2087 pPCHSGeometry->cSectors);
2088 return rc;
2089}
2090
2091static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2092 PCVDGEOMETRY pLCHSGeometry)
2093{
2094 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2095 VMDK_DDB_GEO_LCHS_CYLINDERS,
2096 pLCHSGeometry->cCylinders);
2097 if (RT_FAILURE(rc))
2098 return rc;
2099 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2100 VMDK_DDB_GEO_LCHS_HEADS,
2101
2102 pLCHSGeometry->cHeads);
2103 if (RT_FAILURE(rc))
2104 return rc;
2105 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2106 VMDK_DDB_GEO_LCHS_SECTORS,
2107 pLCHSGeometry->cSectors);
2108 return rc;
2109}
2110
2111static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2112 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2113{
2114 pDescriptor->uFirstDesc = 0;
2115 pDescriptor->uFirstExtent = 0;
2116 pDescriptor->uFirstDDB = 0;
2117 pDescriptor->cLines = 0;
2118 pDescriptor->cbDescAlloc = cbDescData;
2119 pDescriptor->fDirty = false;
2120 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2121 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2122
2123 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2124 if (RT_SUCCESS(rc))
2125 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2126 if (RT_SUCCESS(rc))
2127 {
2128 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2129 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2130 }
2131 if (RT_SUCCESS(rc))
2132 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2133 if (RT_SUCCESS(rc))
2134 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2135 if (RT_SUCCESS(rc))
2136 {
2137 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2138 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2139 }
2140 if (RT_SUCCESS(rc))
2141 {
2142 /* The trailing space is created by VMware, too. */
2143 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2144 }
2145 if (RT_SUCCESS(rc))
2146 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2147 if (RT_SUCCESS(rc))
2148 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2149 if (RT_SUCCESS(rc))
2150 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2151 if (RT_SUCCESS(rc))
2152 {
2153 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2154
2155 /* Now that the framework is in place, use the normal functions to insert
2156 * the remaining keys. */
2157 char szBuf[9];
2158 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2159 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2160 "CID", szBuf);
2161 }
2162 if (RT_SUCCESS(rc))
2163 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2164 "parentCID", "ffffffff");
2165 if (RT_SUCCESS(rc))
2166 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2167
2168 return rc;
2169}
2170
2171static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2172{
2173 int rc;
2174 unsigned cExtents;
2175 unsigned uLine;
2176 unsigned i;
2177
2178 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2179 &pImage->Descriptor);
2180 if (RT_FAILURE(rc))
2181 return rc;
2182
2183 /* Check version, must be 1. */
2184 uint32_t uVersion;
2185 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2186 if (RT_FAILURE(rc))
2187 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2188 if (uVersion != 1)
2189 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2190
2191 /* Get image creation type and determine image flags. */
2192 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2193 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2194 &pszCreateType);
2195 if (RT_FAILURE(rc))
2196 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2197 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2198 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2199 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2200 else if ( !strcmp(pszCreateType, "partitionedDevice")
2201 || !strcmp(pszCreateType, "fullDevice"))
2202 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2203 else if (!strcmp(pszCreateType, "streamOptimized"))
2204 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2205 else if (!strcmp(pszCreateType, "vmfs"))
2206 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2207 RTMemTmpFree(pszCreateType);
2208
2209 /* Count the number of extent config entries. */
2210 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2211 uLine != 0;
2212 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2213 /* nothing */;
2214
2215 if (!pImage->pDescData && cExtents != 1)
2216 {
2217 /* Monolithic image, must have only one extent (already opened). */
2218 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2219 }
2220
2221 if (pImage->pDescData)
2222 {
2223 /* Non-monolithic image, extents need to be allocated. */
2224 rc = vmdkCreateExtents(pImage, cExtents);
2225 if (RT_FAILURE(rc))
2226 return rc;
2227 }
2228
2229 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2230 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2231 {
2232 char *pszLine = pImage->Descriptor.aLines[uLine];
2233
2234 /* Access type of the extent. */
2235 if (!strncmp(pszLine, "RW", 2))
2236 {
2237 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2238 pszLine += 2;
2239 }
2240 else if (!strncmp(pszLine, "RDONLY", 6))
2241 {
2242 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2243 pszLine += 6;
2244 }
2245 else if (!strncmp(pszLine, "NOACCESS", 8))
2246 {
2247 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2248 pszLine += 8;
2249 }
2250 else
2251 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2252 if (*pszLine++ != ' ')
2253 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2254
2255 /* Nominal size of the extent. */
2256 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2257 &pImage->pExtents[i].cNominalSectors);
2258 if (RT_FAILURE(rc))
2259 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2260 if (*pszLine++ != ' ')
2261 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2262
2263 /* Type of the extent. */
2264 if (!strncmp(pszLine, "SPARSE", 6))
2265 {
2266 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2267 pszLine += 6;
2268 }
2269 else if (!strncmp(pszLine, "FLAT", 4))
2270 {
2271 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2272 pszLine += 4;
2273 }
2274 else if (!strncmp(pszLine, "ZERO", 4))
2275 {
2276 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2277 pszLine += 4;
2278 }
2279 else if (!strncmp(pszLine, "VMFS", 4))
2280 {
2281 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2282 pszLine += 4;
2283 }
2284 else
2285 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2286
2287 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2288 {
2289 /* This one has no basename or offset. */
2290 if (*pszLine == ' ')
2291 pszLine++;
2292 if (*pszLine != '\0')
2293 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2294 pImage->pExtents[i].pszBasename = NULL;
2295 }
2296 else
2297 {
2298 /* All other extent types have basename and optional offset. */
2299 if (*pszLine++ != ' ')
2300 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2301
2302 /* Basename of the image. Surrounded by quotes. */
2303 char *pszBasename;
2304 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2305 if (RT_FAILURE(rc))
2306 return rc;
2307 pImage->pExtents[i].pszBasename = pszBasename;
2308 if (*pszLine == ' ')
2309 {
2310 pszLine++;
2311 if (*pszLine != '\0')
2312 {
2313 /* Optional offset in extent specified. */
2314 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2315 &pImage->pExtents[i].uSectorOffset);
2316 if (RT_FAILURE(rc))
2317 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2318 }
2319 }
2320
2321 if (*pszLine != '\0')
2322 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2323 }
2324 }
2325
2326 /* Determine PCHS geometry (autogenerate if necessary). */
2327 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2328 VMDK_DDB_GEO_PCHS_CYLINDERS,
2329 &pImage->PCHSGeometry.cCylinders);
2330 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2331 pImage->PCHSGeometry.cCylinders = 0;
2332 else if (RT_FAILURE(rc))
2333 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2334 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2335 VMDK_DDB_GEO_PCHS_HEADS,
2336 &pImage->PCHSGeometry.cHeads);
2337 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2338 pImage->PCHSGeometry.cHeads = 0;
2339 else if (RT_FAILURE(rc))
2340 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2341 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2342 VMDK_DDB_GEO_PCHS_SECTORS,
2343 &pImage->PCHSGeometry.cSectors);
2344 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2345 pImage->PCHSGeometry.cSectors = 0;
2346 else if (RT_FAILURE(rc))
2347 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2348 if ( pImage->PCHSGeometry.cCylinders == 0
2349 || pImage->PCHSGeometry.cHeads == 0
2350 || pImage->PCHSGeometry.cHeads > 16
2351 || pImage->PCHSGeometry.cSectors == 0
2352 || pImage->PCHSGeometry.cSectors > 63)
2353 {
2354 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2355 * as the total image size isn't known yet). */
2356 pImage->PCHSGeometry.cCylinders = 0;
2357 pImage->PCHSGeometry.cHeads = 16;
2358 pImage->PCHSGeometry.cSectors = 63;
2359 }
2360
2361 /* Determine LCHS geometry (set to 0 if not specified). */
2362 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2363 VMDK_DDB_GEO_LCHS_CYLINDERS,
2364 &pImage->LCHSGeometry.cCylinders);
2365 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2366 pImage->LCHSGeometry.cCylinders = 0;
2367 else if (RT_FAILURE(rc))
2368 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2369 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2370 VMDK_DDB_GEO_LCHS_HEADS,
2371 &pImage->LCHSGeometry.cHeads);
2372 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2373 pImage->LCHSGeometry.cHeads = 0;
2374 else if (RT_FAILURE(rc))
2375 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2376 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2377 VMDK_DDB_GEO_LCHS_SECTORS,
2378 &pImage->LCHSGeometry.cSectors);
2379 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2380 pImage->LCHSGeometry.cSectors = 0;
2381 else if (RT_FAILURE(rc))
2382 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2383 if ( pImage->LCHSGeometry.cCylinders == 0
2384 || pImage->LCHSGeometry.cHeads == 0
2385 || pImage->LCHSGeometry.cSectors == 0)
2386 {
2387 pImage->LCHSGeometry.cCylinders = 0;
2388 pImage->LCHSGeometry.cHeads = 0;
2389 pImage->LCHSGeometry.cSectors = 0;
2390 }
2391
2392 /* Get image UUID. */
2393 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2394 &pImage->ImageUuid);
2395 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2396 {
2397 /* Image without UUID. Probably created by VMware and not yet used
2398 * by VirtualBox. Can only be added for images opened in read/write
2399 * mode, so don't bother producing a sensible UUID otherwise. */
2400 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2401 RTUuidClear(&pImage->ImageUuid);
2402 else
2403 {
2404 rc = RTUuidCreate(&pImage->ImageUuid);
2405 if (RT_FAILURE(rc))
2406 return rc;
2407 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2408 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2409 if (RT_FAILURE(rc))
2410 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2411 }
2412 }
2413 else if (RT_FAILURE(rc))
2414 return rc;
2415
2416 /* Get image modification UUID. */
2417 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2418 VMDK_DDB_MODIFICATION_UUID,
2419 &pImage->ModificationUuid);
2420 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2421 {
2422 /* Image without UUID. Probably created by VMware and not yet used
2423 * by VirtualBox. Can only be added for images opened in read/write
2424 * mode, so don't bother producing a sensible UUID otherwise. */
2425 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2426 RTUuidClear(&pImage->ModificationUuid);
2427 else
2428 {
2429 rc = RTUuidCreate(&pImage->ModificationUuid);
2430 if (RT_FAILURE(rc))
2431 return rc;
2432 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2433 VMDK_DDB_MODIFICATION_UUID,
2434 &pImage->ModificationUuid);
2435 if (RT_FAILURE(rc))
2436 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2437 }
2438 }
2439 else if (RT_FAILURE(rc))
2440 return rc;
2441
2442 /* Get UUID of parent image. */
2443 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2444 &pImage->ParentUuid);
2445 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2446 {
2447 /* Image without UUID. Probably created by VMware and not yet used
2448 * by VirtualBox. Can only be added for images opened in read/write
2449 * mode, so don't bother producing a sensible UUID otherwise. */
2450 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2451 RTUuidClear(&pImage->ParentUuid);
2452 else
2453 {
2454 rc = RTUuidClear(&pImage->ParentUuid);
2455 if (RT_FAILURE(rc))
2456 return rc;
2457 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2458 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2459 if (RT_FAILURE(rc))
2460 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2461 }
2462 }
2463 else if (RT_FAILURE(rc))
2464 return rc;
2465
2466 /* Get parent image modification UUID. */
2467 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2468 VMDK_DDB_PARENT_MODIFICATION_UUID,
2469 &pImage->ParentModificationUuid);
2470 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2471 {
2472 /* Image without UUID. Probably created by VMware and not yet used
2473 * by VirtualBox. Can only be added for images opened in read/write
2474 * mode, so don't bother producing a sensible UUID otherwise. */
2475 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2476 RTUuidClear(&pImage->ParentModificationUuid);
2477 else
2478 {
2479 RTUuidClear(&pImage->ParentModificationUuid);
2480 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2481 VMDK_DDB_PARENT_MODIFICATION_UUID,
2482 &pImage->ParentModificationUuid);
2483 if (RT_FAILURE(rc))
2484 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2485 }
2486 }
2487 else if (RT_FAILURE(rc))
2488 return rc;
2489
2490 return VINF_SUCCESS;
2491}
2492
2493/**
2494 * Internal : Prepares the descriptor to write to the image.
2495 */
2496static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2497 void **ppvData, size_t *pcbData)
2498{
2499 int rc = VINF_SUCCESS;
2500
2501 /*
2502 * Allocate temporary descriptor buffer.
2503 * In case there is no limit allocate a default
2504 * and increase if required.
2505 */
2506 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2507 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2508 size_t offDescriptor = 0;
2509
2510 if (!pszDescriptor)
2511 return VERR_NO_MEMORY;
2512
2513 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2514 {
2515 const char *psz = pImage->Descriptor.aLines[i];
2516 size_t cb = strlen(psz);
2517
2518 /*
2519 * Increase the descriptor if there is no limit and
2520 * there is not enough room left for this line.
2521 */
2522 if (offDescriptor + cb + 1 > cbDescriptor)
2523 {
2524 if (cbLimit)
2525 {
2526 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2527 break;
2528 }
2529 else
2530 {
2531 char *pszDescriptorNew = NULL;
2532 LogFlow(("Increasing descriptor cache\n"));
2533
2534 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2535 if (!pszDescriptorNew)
2536 {
2537 rc = VERR_NO_MEMORY;
2538 break;
2539 }
2540 pszDescriptor = pszDescriptorNew;
2541 cbDescriptor += cb + 4 * _1K;
2542 }
2543 }
2544
2545 if (cb > 0)
2546 {
2547 memcpy(pszDescriptor + offDescriptor, psz, cb);
2548 offDescriptor += cb;
2549 }
2550
2551 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2552 offDescriptor++;
2553 }
2554
2555 if (RT_SUCCESS(rc))
2556 {
2557 *ppvData = pszDescriptor;
2558 *pcbData = offDescriptor;
2559 }
2560 else if (pszDescriptor)
2561 RTMemFree(pszDescriptor);
2562
2563 return rc;
2564}
2565
2566/**
2567 * Internal: write/update the descriptor part of the image.
2568 */
2569static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2570{
2571 int rc = VINF_SUCCESS;
2572 uint64_t cbLimit;
2573 uint64_t uOffset;
2574 PVMDKFILE pDescFile;
2575 void *pvDescriptor = NULL;
2576 size_t cbDescriptor;
2577
2578 if (pImage->pDescData)
2579 {
2580 /* Separate descriptor file. */
2581 uOffset = 0;
2582 cbLimit = 0;
2583 pDescFile = pImage->pFile;
2584 }
2585 else
2586 {
2587 /* Embedded descriptor file. */
2588 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2589 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2590 pDescFile = pImage->pExtents[0].pFile;
2591 }
2592 /* Bail out if there is no file to write to. */
2593 if (pDescFile == NULL)
2594 return VERR_INVALID_PARAMETER;
2595
2596 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2597 if (RT_SUCCESS(rc))
2598 {
2599 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2600 uOffset, pvDescriptor,
2601 cbLimit ? cbLimit : cbDescriptor,
2602 pIoCtx, NULL, NULL);
2603 if ( RT_FAILURE(rc)
2604 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2605 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2606 }
2607
2608 if (RT_SUCCESS(rc) && !cbLimit)
2609 {
2610 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2611 if (RT_FAILURE(rc))
2612 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2613 }
2614
2615 if (RT_SUCCESS(rc))
2616 pImage->Descriptor.fDirty = false;
2617
2618 if (pvDescriptor)
2619 RTMemFree(pvDescriptor);
2620 return rc;
2621
2622}
2623
2624/**
2625 * Internal: validate the consistency check values in a binary header.
2626 */
2627static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2628{
2629 int rc = VINF_SUCCESS;
2630 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2631 {
2632 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2633 return rc;
2634 }
2635 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2636 {
2637 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2638 return rc;
2639 }
2640 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2641 && ( pHeader->singleEndLineChar != '\n'
2642 || pHeader->nonEndLineChar != ' '
2643 || pHeader->doubleEndLineChar1 != '\r'
2644 || pHeader->doubleEndLineChar2 != '\n') )
2645 {
2646 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2647 return rc;
2648 }
2649 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2650 {
2651 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2652 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2653 return rc;
2654 }
2655 return rc;
2656}
2657
2658/**
2659 * Internal: read metadata belonging to an extent with binary header, i.e.
2660 * as found in monolithic files.
2661 */
2662static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2663 bool fMagicAlreadyRead)
2664{
2665 SparseExtentHeader Header;
2666 int rc;
2667
2668 if (!fMagicAlreadyRead)
2669 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2670 &Header, sizeof(Header));
2671 else
2672 {
2673 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2674 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2675 RT_UOFFSETOF(SparseExtentHeader, version),
2676 &Header.version,
2677 sizeof(Header)
2678 - RT_UOFFSETOF(SparseExtentHeader, version));
2679 }
2680
2681 if (RT_SUCCESS(rc))
2682 {
2683 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2684 if (RT_SUCCESS(rc))
2685 {
2686 uint64_t cbFile = 0;
2687
2688 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2689 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2690 pExtent->fFooter = true;
2691
2692 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2693 || ( pExtent->fFooter
2694 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2695 {
2696 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2697 if (RT_FAILURE(rc))
2698 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2699 }
2700
2701 if (RT_SUCCESS(rc))
2702 {
2703 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2704 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2705
2706 if ( pExtent->fFooter
2707 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2708 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2709 {
2710 /* Read the footer, which comes before the end-of-stream marker. */
2711 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2712 cbFile - 2*512, &Header,
2713 sizeof(Header));
2714 if (RT_FAILURE(rc))
2715 {
2716 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2717 rc = VERR_VD_VMDK_INVALID_HEADER;
2718 }
2719
2720 if (RT_SUCCESS(rc))
2721 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2722 /* Prohibit any writes to this extent. */
2723 pExtent->uAppendPosition = 0;
2724 }
2725
2726 if (RT_SUCCESS(rc))
2727 {
2728 pExtent->uVersion = RT_LE2H_U32(Header.version);
2729 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2730 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2731 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2732 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2733 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2734 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2735 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2736 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2737 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2738 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2739 {
2740 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2741 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2742 }
2743 else
2744 {
2745 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2746 pExtent->uSectorRGD = 0;
2747 }
2748
2749 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2750 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2751 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2752
2753 if ( RT_SUCCESS(rc)
2754 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2755 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2756 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2757 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2758 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2759 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2760
2761 if (RT_SUCCESS(rc))
2762 {
2763 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2764 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2765 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2766 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2767 else
2768 {
2769 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2770 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2771
2772 /* Fix up the number of descriptor sectors, as some flat images have
2773 * really just one, and this causes failures when inserting the UUID
2774 * values and other extra information. */
2775 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2776 {
2777 /* Do it the easy way - just fix it for flat images which have no
2778 * other complicated metadata which needs space too. */
2779 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2780 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2781 pExtent->cDescriptorSectors = 4;
2782 }
2783 }
2784 }
2785 }
2786 }
2787 }
2788 }
2789 else
2790 {
2791 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2792 rc = VERR_VD_VMDK_INVALID_HEADER;
2793 }
2794
2795 if (RT_FAILURE(rc))
2796 vmdkFreeExtentData(pImage, pExtent, false);
2797
2798 return rc;
2799}
2800
2801/**
2802 * Internal: read additional metadata belonging to an extent. For those
2803 * extents which have no additional metadata just verify the information.
2804 */
2805static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2806{
2807 int rc = VINF_SUCCESS;
2808
2809/* disabled the check as there are too many truncated vmdk images out there */
2810#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2811 uint64_t cbExtentSize;
2812 /* The image must be a multiple of a sector in size and contain the data
2813 * area (flat images only). If not, it means the image is at least
2814 * truncated, or even seriously garbled. */
2815 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2816 if (RT_FAILURE(rc))
2817 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2818 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2819 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2820 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2821 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2822#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2823 if ( RT_SUCCESS(rc)
2824 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2825 {
2826 /* The spec says that this must be a power of two and greater than 8,
2827 * but probably they meant not less than 8. */
2828 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2829 || pExtent->cSectorsPerGrain < 8)
2830 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2831 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2832 else
2833 {
2834 /* This code requires that a grain table must hold a power of two multiple
2835 * of the number of entries per GT cache entry. */
2836 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2837 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2838 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2839 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2840 else
2841 {
2842 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2843 if (RT_SUCCESS(rc))
2844 {
2845 /* Prohibit any writes to this streamOptimized extent. */
2846 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2847 pExtent->uAppendPosition = 0;
2848
2849 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2850 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2851 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2852 rc = vmdkReadGrainDirectory(pImage, pExtent);
2853 else
2854 {
2855 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2856 pExtent->cbGrainStreamRead = 0;
2857 }
2858 }
2859 }
2860 }
2861 }
2862
2863 if (RT_FAILURE(rc))
2864 vmdkFreeExtentData(pImage, pExtent, false);
2865
2866 return rc;
2867}
2868
2869/**
2870 * Internal: write/update the metadata for a sparse extent.
2871 */
2872static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2873 uint64_t uOffset, PVDIOCTX pIoCtx)
2874{
2875 SparseExtentHeader Header;
2876
2877 memset(&Header, '\0', sizeof(Header));
2878 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2879 Header.version = RT_H2LE_U32(pExtent->uVersion);
2880 Header.flags = RT_H2LE_U32(RT_BIT(0));
2881 if (pExtent->pRGD)
2882 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2883 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2884 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2885 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2886 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2887 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2888 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2889 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2890 if (pExtent->fFooter && uOffset == 0)
2891 {
2892 if (pExtent->pRGD)
2893 {
2894 Assert(pExtent->uSectorRGD);
2895 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2896 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2897 }
2898 else
2899 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2900 }
2901 else
2902 {
2903 if (pExtent->pRGD)
2904 {
2905 Assert(pExtent->uSectorRGD);
2906 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2907 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2908 }
2909 else
2910 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2911 }
2912 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2913 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2914 Header.singleEndLineChar = '\n';
2915 Header.nonEndLineChar = ' ';
2916 Header.doubleEndLineChar1 = '\r';
2917 Header.doubleEndLineChar2 = '\n';
2918 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2919
2920 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2921 uOffset, &Header, sizeof(Header),
2922 pIoCtx, NULL, NULL);
2923 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2924 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2925 return rc;
2926}
2927
2928/**
2929 * Internal: free the buffers used for streamOptimized images.
2930 */
2931static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2932{
2933 if (pExtent->pvCompGrain)
2934 {
2935 RTMemFree(pExtent->pvCompGrain);
2936 pExtent->pvCompGrain = NULL;
2937 }
2938 if (pExtent->pvGrain)
2939 {
2940 RTMemFree(pExtent->pvGrain);
2941 pExtent->pvGrain = NULL;
2942 }
2943}
2944
2945/**
2946 * Internal: free the memory used by the extent data structure, optionally
2947 * deleting the referenced files.
2948 *
2949 * @returns VBox status code.
2950 * @param pImage Pointer to the image instance data.
2951 * @param pExtent The extent to free.
2952 * @param fDelete Flag whether to delete the backing storage.
2953 */
2954static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2955 bool fDelete)
2956{
2957 int rc = VINF_SUCCESS;
2958
2959 vmdkFreeGrainDirectory(pExtent);
2960 if (pExtent->pDescData)
2961 {
2962 RTMemFree(pExtent->pDescData);
2963 pExtent->pDescData = NULL;
2964 }
2965 if (pExtent->pFile != NULL)
2966 {
2967 /* Do not delete raw extents, these have full and base names equal. */
2968 rc = vmdkFileClose(pImage, &pExtent->pFile,
2969 fDelete
2970 && pExtent->pszFullname
2971 && pExtent->pszBasename
2972 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2973 }
2974 if (pExtent->pszBasename)
2975 {
2976 RTMemTmpFree((void *)pExtent->pszBasename);
2977 pExtent->pszBasename = NULL;
2978 }
2979 if (pExtent->pszFullname)
2980 {
2981 RTStrFree((char *)(void *)pExtent->pszFullname);
2982 pExtent->pszFullname = NULL;
2983 }
2984 vmdkFreeStreamBuffers(pExtent);
2985
2986 return rc;
2987}
2988
2989/**
2990 * Internal: allocate grain table cache if necessary for this image.
2991 */
2992static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2993{
2994 PVMDKEXTENT pExtent;
2995
2996 /* Allocate grain table cache if any sparse extent is present. */
2997 for (unsigned i = 0; i < pImage->cExtents; i++)
2998 {
2999 pExtent = &pImage->pExtents[i];
3000 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3001 {
3002 /* Allocate grain table cache. */
3003 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3004 if (!pImage->pGTCache)
3005 return VERR_NO_MEMORY;
3006 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3007 {
3008 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3009 pGCE->uExtent = UINT32_MAX;
3010 }
3011 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3012 break;
3013 }
3014 }
3015
3016 return VINF_SUCCESS;
3017}
3018
3019/**
3020 * Internal: allocate the given number of extents.
3021 */
3022static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3023{
3024 int rc = VINF_SUCCESS;
3025 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3026 if (pExtents)
3027 {
3028 for (unsigned i = 0; i < cExtents; i++)
3029 {
3030 pExtents[i].pFile = NULL;
3031 pExtents[i].pszBasename = NULL;
3032 pExtents[i].pszFullname = NULL;
3033 pExtents[i].pGD = NULL;
3034 pExtents[i].pRGD = NULL;
3035 pExtents[i].pDescData = NULL;
3036 pExtents[i].uVersion = 1;
3037 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3038 pExtents[i].uExtent = i;
3039 pExtents[i].pImage = pImage;
3040 }
3041 pImage->pExtents = pExtents;
3042 pImage->cExtents = cExtents;
3043 }
3044 else
3045 rc = VERR_NO_MEMORY;
3046
3047 return rc;
3048}
3049
3050/**
3051 * Reads and processes the descriptor embedded in sparse images.
3052 *
3053 * @returns VBox status code.
3054 * @param pImage VMDK image instance.
3055 * @param pFile The sparse file handle.
3056 */
3057static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3058{
3059 /* It's a hosted single-extent image. */
3060 int rc = vmdkCreateExtents(pImage, 1);
3061 if (RT_SUCCESS(rc))
3062 {
3063 /* The opened file is passed to the extent. No separate descriptor
3064 * file, so no need to keep anything open for the image. */
3065 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3066 pExtent->pFile = pFile;
3067 pImage->pFile = NULL;
3068 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3069 if (RT_LIKELY(pExtent->pszFullname))
3070 {
3071 /* As we're dealing with a monolithic image here, there must
3072 * be a descriptor embedded in the image file. */
3073 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3074 if ( RT_SUCCESS(rc)
3075 && pExtent->uDescriptorSector
3076 && pExtent->cDescriptorSectors)
3077 {
3078 /* HACK: extend the descriptor if it is unusually small and it fits in
3079 * the unused space after the image header. Allows opening VMDK files
3080 * with extremely small descriptor in read/write mode.
3081 *
3082 * The previous version introduced a possible regression for VMDK stream
3083 * optimized images from VMware which tend to have only a single sector sized
3084 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3085 * entries required to make it work with VBox but for stream optimized images
3086 * the updated binary header wasn't written to the disk creating a mismatch
3087 * between advertised and real descriptor size.
3088 *
3089 * The descriptor size will be increased even if opened readonly now if there
3090 * enough room but the new value will not be written back to the image.
3091 */
3092 if ( pExtent->cDescriptorSectors < 3
3093 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3094 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3095 {
3096 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3097
3098 pExtent->cDescriptorSectors = 4;
3099 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3100 {
3101 /*
3102 * Update the on disk number now to make sure we don't introduce inconsistencies
3103 * in case of stream optimized images from VMware where the descriptor is just
3104 * one sector big (the binary header is not written to disk for complete
3105 * stream optimized images in vmdkFlushImage()).
3106 */
3107 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3108 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3109 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3110 &u64DescSizeNew, sizeof(u64DescSizeNew));
3111 if (RT_FAILURE(rc))
3112 {
3113 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3114 /* Restore the old size and carry on. */
3115 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3116 }
3117 }
3118 }
3119 /* Read the descriptor from the extent. */
3120 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3121 if (RT_LIKELY(pExtent->pDescData))
3122 {
3123 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3124 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3125 pExtent->pDescData,
3126 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3127 if (RT_SUCCESS(rc))
3128 {
3129 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3130 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3131 if ( RT_SUCCESS(rc)
3132 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3133 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3134 {
3135 rc = vmdkReadMetaExtent(pImage, pExtent);
3136 if (RT_SUCCESS(rc))
3137 {
3138 /* Mark the extent as unclean if opened in read-write mode. */
3139 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3140 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3141 {
3142 pExtent->fUncleanShutdown = true;
3143 pExtent->fMetaDirty = true;
3144 }
3145 }
3146 }
3147 else if (RT_SUCCESS(rc))
3148 rc = VERR_NOT_SUPPORTED;
3149 }
3150 else
3151 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3152 }
3153 else
3154 rc = VERR_NO_MEMORY;
3155 }
3156 else if (RT_SUCCESS(rc))
3157 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3158 }
3159 else
3160 rc = VERR_NO_MEMORY;
3161 }
3162
3163 return rc;
3164}
3165
3166/**
3167 * Reads the descriptor from a pure text file.
3168 *
3169 * @returns VBox status code.
3170 * @param pImage VMDK image instance.
3171 * @param pFile The descriptor file handle.
3172 */
3173static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3174{
3175 /* Allocate at least 10K, and make sure that there is 5K free space
3176 * in case new entries need to be added to the descriptor. Never
3177 * allocate more than 128K, because that's no valid descriptor file
3178 * and will result in the correct "truncated read" error handling. */
3179 uint64_t cbFileSize;
3180 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3181 if ( RT_SUCCESS(rc)
3182 && cbFileSize >= 50)
3183 {
3184 uint64_t cbSize = cbFileSize;
3185 if (cbSize % VMDK_SECTOR2BYTE(10))
3186 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3187 else
3188 cbSize += VMDK_SECTOR2BYTE(10);
3189 cbSize = RT_MIN(cbSize, _128K);
3190 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3191 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3192 if (RT_LIKELY(pImage->pDescData))
3193 {
3194 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3195 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3196 if (RT_SUCCESS(rc))
3197 {
3198#if 0 /** @todo Revisit */
3199 cbRead += sizeof(u32Magic);
3200 if (cbRead == pImage->cbDescAlloc)
3201 {
3202 /* Likely the read is truncated. Better fail a bit too early
3203 * (normally the descriptor is much smaller than our buffer). */
3204 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3205 goto out;
3206 }
3207#endif
3208 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3209 pImage->cbDescAlloc);
3210 if (RT_SUCCESS(rc))
3211 {
3212 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3213 {
3214 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3215 if (pExtent->pszBasename)
3216 {
3217 /* Hack to figure out whether the specified name in the
3218 * extent descriptor is absolute. Doesn't always work, but
3219 * should be good enough for now. */
3220 char *pszFullname;
3221 /** @todo implement proper path absolute check. */
3222 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3223 {
3224 pszFullname = RTStrDup(pExtent->pszBasename);
3225 if (!pszFullname)
3226 {
3227 rc = VERR_NO_MEMORY;
3228 break;
3229 }
3230 }
3231 else
3232 {
3233 char *pszDirname = RTStrDup(pImage->pszFilename);
3234 if (!pszDirname)
3235 {
3236 rc = VERR_NO_MEMORY;
3237 break;
3238 }
3239 RTPathStripFilename(pszDirname);
3240 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3241 RTStrFree(pszDirname);
3242 if (!pszFullname)
3243 {
3244 rc = VERR_NO_STR_MEMORY;
3245 break;
3246 }
3247 }
3248 pExtent->pszFullname = pszFullname;
3249 }
3250 else
3251 pExtent->pszFullname = NULL;
3252
3253 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3254 switch (pExtent->enmType)
3255 {
3256 case VMDKETYPE_HOSTED_SPARSE:
3257 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3258 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3259 if (RT_FAILURE(rc))
3260 {
3261 /* Do NOT signal an appropriate error here, as the VD
3262 * layer has the choice of retrying the open if it
3263 * failed. */
3264 break;
3265 }
3266 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3267 false /* fMagicAlreadyRead */);
3268 if (RT_FAILURE(rc))
3269 break;
3270 rc = vmdkReadMetaExtent(pImage, pExtent);
3271 if (RT_FAILURE(rc))
3272 break;
3273
3274 /* Mark extent as unclean if opened in read-write mode. */
3275 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3276 {
3277 pExtent->fUncleanShutdown = true;
3278 pExtent->fMetaDirty = true;
3279 }
3280 break;
3281 case VMDKETYPE_VMFS:
3282 case VMDKETYPE_FLAT:
3283 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3284 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3285 if (RT_FAILURE(rc))
3286 {
3287 /* Do NOT signal an appropriate error here, as the VD
3288 * layer has the choice of retrying the open if it
3289 * failed. */
3290 break;
3291 }
3292 break;
3293 case VMDKETYPE_ZERO:
3294 /* Nothing to do. */
3295 break;
3296 default:
3297 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3298 }
3299 }
3300 }
3301 }
3302 else
3303 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3304 }
3305 else
3306 rc = VERR_NO_MEMORY;
3307 }
3308 else if (RT_SUCCESS(rc))
3309 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3310
3311 return rc;
3312}
3313
3314/**
3315 * Read and process the descriptor based on the image type.
3316 *
3317 * @returns VBox status code.
3318 * @param pImage VMDK image instance.
3319 * @param pFile VMDK file handle.
3320 */
3321static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3322{
3323 uint32_t u32Magic;
3324
3325 /* Read magic (if present). */
3326 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3327 &u32Magic, sizeof(u32Magic));
3328 if (RT_SUCCESS(rc))
3329 {
3330 /* Handle the file according to its magic number. */
3331 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3332 rc = vmdkDescriptorReadSparse(pImage, pFile);
3333 else
3334 rc = vmdkDescriptorReadAscii(pImage, pFile);
3335 }
3336 else
3337 {
3338 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3339 rc = VERR_VD_VMDK_INVALID_HEADER;
3340 }
3341
3342 return rc;
3343}
3344
3345/**
3346 * Internal: Open an image, constructing all necessary data structures.
3347 */
3348static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3349{
3350 pImage->uOpenFlags = uOpenFlags;
3351 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3352 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3353 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3354
3355 /*
3356 * Open the image.
3357 * We don't have to check for asynchronous access because
3358 * we only support raw access and the opened file is a description
3359 * file were no data is stored.
3360 */
3361 PVMDKFILE pFile;
3362 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3363 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3364 if (RT_SUCCESS(rc))
3365 {
3366 pImage->pFile = pFile;
3367
3368 rc = vmdkDescriptorRead(pImage, pFile);
3369 if (RT_SUCCESS(rc))
3370 {
3371 /* Determine PCHS geometry if not set. */
3372 if (pImage->PCHSGeometry.cCylinders == 0)
3373 {
3374 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3375 / pImage->PCHSGeometry.cHeads
3376 / pImage->PCHSGeometry.cSectors;
3377 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3378 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3379 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3380 {
3381 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3382 AssertRC(rc);
3383 }
3384 }
3385
3386 /* Update the image metadata now in case has changed. */
3387 rc = vmdkFlushImage(pImage, NULL);
3388 if (RT_SUCCESS(rc))
3389 {
3390 /* Figure out a few per-image constants from the extents. */
3391 pImage->cbSize = 0;
3392 for (unsigned i = 0; i < pImage->cExtents; i++)
3393 {
3394 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3395 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3396 {
3397 /* Here used to be a check whether the nominal size of an extent
3398 * is a multiple of the grain size. The spec says that this is
3399 * always the case, but unfortunately some files out there in the
3400 * wild violate the spec (e.g. ReactOS 0.3.1). */
3401 }
3402 else if ( pExtent->enmType == VMDKETYPE_FLAT
3403 || pExtent->enmType == VMDKETYPE_ZERO)
3404 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3405
3406 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3407 }
3408
3409 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3410 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3411 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3412 rc = vmdkAllocateGrainTableCache(pImage);
3413 }
3414 }
3415 }
3416 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3417 * choice of retrying the open if it failed. */
3418
3419 if (RT_SUCCESS(rc))
3420 {
3421 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3422 pImage->RegionList.fFlags = 0;
3423 pImage->RegionList.cRegions = 1;
3424
3425 pRegion->offRegion = 0; /* Disk start. */
3426 pRegion->cbBlock = 512;
3427 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3428 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3429 pRegion->cbData = 512;
3430 pRegion->cbMetadata = 0;
3431 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3432 }
3433 else
3434 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3435 return rc;
3436}
3437
3438/**
3439 * Frees a raw descriptor.
3440 * @internal
3441 */
3442static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3443{
3444 if (!pRawDesc)
3445 return VINF_SUCCESS;
3446
3447 RTStrFree(pRawDesc->pszRawDisk);
3448 pRawDesc->pszRawDisk = NULL;
3449
3450 /* Partitions: */
3451 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3452 {
3453 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3454 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3455
3456 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3457 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3458 }
3459
3460 RTMemFree(pRawDesc->pPartDescs);
3461 pRawDesc->pPartDescs = NULL;
3462
3463 RTMemFree(pRawDesc);
3464 return VINF_SUCCESS;
3465}
3466
3467/**
3468 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3469 * returning the pointer to the first new entry.
3470 * @internal
3471 */
3472static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3473{
3474 uint32_t const cOld = pRawDesc->cPartDescs;
3475 uint32_t const cNew = cOld + cToAdd;
3476 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3477 cOld * sizeof(pRawDesc->pPartDescs[0]),
3478 cNew * sizeof(pRawDesc->pPartDescs[0]));
3479 if (paNew)
3480 {
3481 pRawDesc->cPartDescs = cNew;
3482 pRawDesc->pPartDescs = paNew;
3483
3484 *ppRet = &paNew[cOld];
3485 return VINF_SUCCESS;
3486 }
3487 *ppRet = NULL;
3488 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3489 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3490 pImage->pszFilename, cOld, cNew);
3491}
3492
3493/**
3494 * @callback_method_impl{FNRTSORTCMP}
3495 */
3496static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3497{
3498 RT_NOREF(pvUser);
3499 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3500 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3501}
3502
3503/**
3504 * Post processes the partition descriptors.
3505 *
3506 * Sorts them and check that they don't overlap.
3507 */
3508static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3509{
3510 /*
3511 * Sort data areas in ascending order of start.
3512 */
3513 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3514
3515 /*
3516 * Check that we don't have overlapping descriptors. If we do, that's an
3517 * indication that the drive is corrupt or that the RTDvm code is buggy.
3518 */
3519 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3520 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3521 {
3522 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3523 if (offLast <= paPartDescs[i].offStartInVDisk)
3524 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3525 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3526 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3527 paPartDescs[i].pvPartitionData ? " (data)" : "");
3528 offLast -= 1;
3529
3530 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3531 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3532 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3533 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3534 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3535 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3536 if (offLast >= cbSize)
3537 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3538 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3539 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3540 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3541 }
3542
3543 return VINF_SUCCESS;
3544}
3545
3546/**
3547 * Attempts to verify the raw partition path.
3548 *
3549 * We don't want to trust RTDvm and the partition device node morphing blindly.
3550 */
3551static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3552 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3553{
3554 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3555
3556 /*
3557 * Try open the raw partition device.
3558 */
3559 RTFILE hRawPart = NIL_RTFILE;
3560 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3561 if (RT_FAILURE(rc))
3562 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3563 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3564 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3565
3566 /*
3567 * Compare the partition UUID if we can get it.
3568 */
3569#ifdef RT_OS_WINDOWS
3570 DWORD cbReturned;
3571
3572 /* 1. Get the device numbers for both handles, they should have the same disk. */
3573 STORAGE_DEVICE_NUMBER DevNum1;
3574 RT_ZERO(DevNum1);
3575 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3576 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3577 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3578 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3579 pImage->pszFilename, pszRawDrive, GetLastError());
3580
3581 STORAGE_DEVICE_NUMBER DevNum2;
3582 RT_ZERO(DevNum2);
3583 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3584 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3585 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3586 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3587 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3588 if ( RT_SUCCESS(rc)
3589 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3590 || DevNum1.DeviceType != DevNum2.DeviceType))
3591 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3592 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3593 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3594 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3595 if (RT_SUCCESS(rc))
3596 {
3597 /* Get the partitions from the raw drive and match up with the volume info
3598 from RTDvm. The partition number is found in DevNum2. */
3599 DWORD cbNeeded = 0;
3600 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3601 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3602 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3603 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3604 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3605 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3606 if (pLayout)
3607 {
3608 cbReturned = 0;
3609 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3610 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3611 {
3612 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3613 unsigned iEntry = 0;
3614 while ( iEntry < pLayout->PartitionCount
3615 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3616 iEntry++;
3617 if (iEntry < pLayout->PartitionCount)
3618 {
3619 /* Compare the basics */
3620 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3621 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3622 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3623 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3624 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3625 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3626 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3627 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3628 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3629 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3630 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3631 /** @todo We could compare the MBR type, GPT type and ID. */
3632 RT_NOREF(hVol);
3633 }
3634 else
3635 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3636 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3637 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3638 DevNum2.PartitionNumber, pLayout->PartitionCount);
3639# ifndef LOG_ENABLED
3640 if (RT_FAILURE(rc))
3641# endif
3642 {
3643 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3644 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3645 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3646 {
3647 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3648 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3649 pEntry->PartitionStyle, pEntry->RewritePartition));
3650 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3651 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3652 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3653 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3654 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3655 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3656 else
3657 LogRel(("\n"));
3658 }
3659 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3660 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3661 }
3662 }
3663 else
3664 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3665 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3666 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3667 RTMemTmpFree(pLayout);
3668 }
3669 else
3670 rc = VERR_NO_TMP_MEMORY;
3671 }
3672#else
3673 RT_NOREF(hVol); /* PORTME */
3674#endif
3675 if (RT_SUCCESS(rc))
3676 {
3677 /*
3678 * Compare the first 32 sectors of the partition.
3679 *
3680 * This might not be conclusive, but for partitions formatted with the more
3681 * common file systems it should be as they have a superblock copy at or near
3682 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3683 */
3684 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3685 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3686 if (pbSector1 != NULL)
3687 {
3688 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3689
3690 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3691 uint64_t uPrevCrc1 = 0;
3692 uint64_t uPrevCrc2 = 0;
3693 uint32_t cStable = 0;
3694 for (unsigned iTry = 0; iTry < 256; iTry++)
3695 {
3696 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3697 if (RT_SUCCESS(rc))
3698 {
3699 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3700 if (RT_SUCCESS(rc))
3701 {
3702 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3703 {
3704 rc = VERR_MISMATCH;
3705
3706 /* Do data stability checks before repeating: */
3707 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
3708 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
3709 if ( uPrevCrc1 != uCrc1
3710 || uPrevCrc2 != uCrc2)
3711 cStable = 0;
3712 else if (++cStable > 4)
3713 break;
3714 uPrevCrc1 = uCrc1;
3715 uPrevCrc2 = uCrc2;
3716 continue;
3717 }
3718 rc = VINF_SUCCESS;
3719 }
3720 else
3721 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3722 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3723 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
3724 }
3725 else
3726 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3727 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3728 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
3729 break;
3730 }
3731 if (rc == VERR_MISMATCH)
3732 {
3733 /* Find the first mismatching bytes: */
3734 size_t offMissmatch = 0;
3735 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
3736 offMissmatch++;
3737 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
3738
3739 if (cStable > 0)
3740 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3741 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
3742 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
3743 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
3744 else
3745 {
3746 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
3747 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3748 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
3749 rc = -rc;
3750 }
3751 }
3752
3753 RTMemTmpFree(pbSector1);
3754 }
3755 else
3756 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
3757 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
3758 pImage->pszFilename, cbToCompare * 2);
3759 }
3760 RTFileClose(hRawPart);
3761 return rc;
3762}
3763
3764#ifdef RT_OS_WINDOWS
3765/**
3766 * Construct the device name for the given partition number.
3767 */
3768static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
3769 char **ppszRawPartition)
3770{
3771 int rc = VINF_SUCCESS;
3772 DWORD cbReturned = 0;
3773 STORAGE_DEVICE_NUMBER DevNum;
3774 RT_ZERO(DevNum);
3775 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3776 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
3777 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
3778 else
3779 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3780 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3781 pImage->pszFilename, pszRawDrive, GetLastError());
3782 return rc;
3783}
3784#endif /* RT_OS_WINDOWS */
3785
3786/**
3787 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
3788 * 'Partitions' configuration value is present.
3789 *
3790 * @returns VBox status code, error message has been set on failure.
3791 *
3792 * @note Caller is assumed to clean up @a pRawDesc and release
3793 * @a *phVolToRelease.
3794 * @internal
3795 */
3796static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
3797 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
3798 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
3799 PRTDVMVOLUME phVolToRelease)
3800{
3801 *phVolToRelease = NIL_RTDVMVOLUME;
3802
3803 /* Check sanity/understanding. */
3804 Assert(fPartitions);
3805 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
3806
3807 /*
3808 * Allocate on descriptor for each volume up front.
3809 */
3810 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
3811
3812 PVDISKRAWPARTDESC paPartDescs = NULL;
3813 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
3814 AssertRCReturn(rc, rc);
3815
3816 /*
3817 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
3818 */
3819 uint32_t fPartitionsLeft = fPartitions;
3820 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
3821 for (uint32_t i = 0; i < cVolumes; i++)
3822 {
3823 /*
3824 * Get the next/first volume and release the current.
3825 */
3826 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
3827 if (i == 0)
3828 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
3829 else
3830 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
3831 if (RT_FAILURE(rc))
3832 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3833 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
3834 pImage->pszFilename, i, pszRawDrive, rc);
3835 uint32_t cRefs = RTDvmVolumeRelease(hVol);
3836 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
3837 *phVolToRelease = hVol = hVolNext;
3838
3839 /*
3840 * Depending on the fPartitions selector and associated read-only mask,
3841 * the guest either gets read-write or read-only access (bits set)
3842 * or no access (selector bit clear, access directed to the VMDK).
3843 */
3844 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
3845
3846 uint64_t offVolumeEndIgnored = 0;
3847 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
3848 if (RT_FAILURE(rc))
3849 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3850 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
3851 pImage->pszFilename, i, pszRawDrive, rc);
3852 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
3853
3854 /* Note! The index must match IHostDrivePartition::number. */
3855 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
3856 if ( idxPartition < 32
3857 && (fPartitions & RT_BIT_32(idxPartition)))
3858 {
3859 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
3860 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
3861 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
3862
3863 if (!fRelative)
3864 {
3865 /*
3866 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
3867 */
3868 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
3869 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
3870 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
3871 }
3872 else
3873 {
3874 /*
3875 * Relative means access the partition data via the device node for that
3876 * partition, allowing the sysadmin/OS to allow a user access to individual
3877 * partitions without necessarily being able to compromise the host OS.
3878 * Obviously, the creation of the VMDK requires read access to the main
3879 * device node for the drive, but that's a one-time thing and can be done
3880 * by the sysadmin. Here data starts at offset zero in the device node.
3881 */
3882 paPartDescs[i].offStartInDevice = 0;
3883
3884#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
3885 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
3886 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
3887#elif defined(RT_OS_LINUX)
3888 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
3889 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
3890 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
3891#elif defined(RT_OS_WINDOWS)
3892 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
3893 AssertRCReturn(rc, rc);
3894#else
3895 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
3896#endif
3897 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
3898
3899 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3900 AssertRCReturn(rc, rc);
3901 }
3902 }
3903 else
3904 {
3905 /* Not accessible to the guest. */
3906 paPartDescs[i].offStartInDevice = 0;
3907 paPartDescs[i].pszRawDevice = NULL;
3908 }
3909 } /* for each volume */
3910
3911 RTDvmVolumeRelease(hVol);
3912 *phVolToRelease = NIL_RTDVMVOLUME;
3913
3914 /*
3915 * Check that we found all the partitions the user selected.
3916 */
3917 if (fPartitionsLeft)
3918 {
3919 char szLeft[3 * sizeof(fPartitions) * 8];
3920 size_t cchLeft = 0;
3921 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
3922 if (fPartitionsLeft & RT_BIT_32(i))
3923 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
3924 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3925 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
3926 pImage->pszFilename, pszRawDrive, szLeft);
3927 }
3928
3929 return VINF_SUCCESS;
3930}
3931
3932/**
3933 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
3934 * of the partition tables and associated padding areas when the 'Partitions'
3935 * configuration value is present.
3936 *
3937 * The guest is not allowed access to the partition tables, however it needs
3938 * them to be able to access the drive. So, create descriptors for each of the
3939 * tables and attach the current disk content. vmdkCreateRawImage() will later
3940 * write the content to the VMDK. Any changes the guest later makes to the
3941 * partition tables will then go to the VMDK copy, rather than the host drive.
3942 *
3943 * @returns VBox status code, error message has been set on failure.
3944 *
3945 * @note Caller is assumed to clean up @a pRawDesc
3946 * @internal
3947 */
3948static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
3949 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
3950{
3951 /*
3952 * Query the locations.
3953 */
3954 /* Determin how many locations there are: */
3955 size_t cLocations = 0;
3956 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
3957 if (rc != VERR_BUFFER_OVERFLOW)
3958 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3959 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
3960 pImage->pszFilename, pszRawDrive, rc);
3961 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
3962
3963 /* We can allocate the partition descriptors here to save an intentation level. */
3964 PVDISKRAWPARTDESC paPartDescs = NULL;
3965 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
3966 AssertRCReturn(rc, rc);
3967
3968 /* Allocate the result table and repeat the location table query: */
3969 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
3970 if (!paLocations)
3971 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
3972 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
3973 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
3974 if (RT_SUCCESS(rc))
3975 {
3976 /*
3977 * Translate them into descriptors.
3978 *
3979 * We restrict the amount of partition alignment padding to 4MiB as more
3980 * will just be a waste of space. The use case for including the padding
3981 * are older boot loaders and boot manager (including one by a team member)
3982 * that put data and code in the 62 sectors between the MBR and the first
3983 * partition (total of 63). Later CHS was abandond and partition started
3984 * being aligned on power of two sector boundraries (typically 64KiB or
3985 * 1MiB depending on the media size).
3986 */
3987 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
3988 {
3989 Assert(paLocations[i].cb > 0);
3990 if (paLocations[i].cb <= _64M)
3991 {
3992 /* Create the partition descriptor entry: */
3993 //paPartDescs[i].pszRawDevice = NULL;
3994 //paPartDescs[i].offStartInDevice = 0;
3995 //paPartDescs[i].uFlags = 0;
3996 paPartDescs[i].offStartInVDisk = paLocations[i].off;
3997 paPartDescs[i].cbData = paLocations[i].cb;
3998 if (paPartDescs[i].cbData < _4M)
3999 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4000 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4001 if (paPartDescs[i].pvPartitionData)
4002 {
4003 /* Read the content from the drive: */
4004 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4005 (size_t)paPartDescs[i].cbData, NULL);
4006 if (RT_SUCCESS(rc))
4007 {
4008 /* Do we have custom boot sector code? */
4009 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4010 {
4011 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4012 Instead we fail as we weren't able to do what the user requested us to do.
4013 Better if the user knows than starts questioning why the guest isn't
4014 booting as expected. */
4015 if (cbBootSector <= paPartDescs[i].cbData)
4016 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4017 else
4018 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4019 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4020 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4021 }
4022 }
4023 else
4024 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4025 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4026 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4027 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4028 }
4029 else
4030 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4031 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4032 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4033 }
4034 else
4035 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4036 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4037 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4038 }
4039 }
4040 else
4041 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4042 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4043 pImage->pszFilename, pszRawDrive, rc);
4044 RTMemFree(paLocations);
4045 return rc;
4046}
4047
4048/**
4049 * Opens the volume manager for the raw drive when in selected-partition mode.
4050 *
4051 * @param pImage The VMDK image (for errors).
4052 * @param hRawDrive The raw drive handle.
4053 * @param pszRawDrive The raw drive device path (for errors).
4054 * @param cbSector The sector size.
4055 * @param phVolMgr Where to return the handle to the volume manager on
4056 * success.
4057 * @returns VBox status code, errors have been reported.
4058 * @internal
4059 */
4060static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4061{
4062 *phVolMgr = NIL_RTDVM;
4063
4064 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4065 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4066 if (RT_FAILURE(rc))
4067 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4068 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4069 pImage->pszFilename, pszRawDrive, rc);
4070
4071 RTDVM hVolMgr = NIL_RTDVM;
4072 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4073
4074 RTVfsFileRelease(hVfsFile);
4075
4076 if (RT_FAILURE(rc))
4077 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4078 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4079 pImage->pszFilename, pszRawDrive, rc);
4080
4081 rc = RTDvmMapOpen(hVolMgr);
4082 if (RT_SUCCESS(rc))
4083 {
4084 *phVolMgr = hVolMgr;
4085 return VINF_SUCCESS;
4086 }
4087 RTDvmRelease(hVolMgr);
4088 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4089 pImage->pszFilename, pszRawDrive, rc);
4090}
4091
4092/**
4093 * Opens the raw drive device and get the sizes for it.
4094 *
4095 * @param pImage The image (for error reporting).
4096 * @param pszRawDrive The device/whatever to open.
4097 * @param phRawDrive Where to return the file handle.
4098 * @param pcbRawDrive Where to return the size.
4099 * @param pcbSector Where to return the sector size.
4100 * @returns IPRT status code, errors have been reported.
4101 * @internal
4102 */
4103static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4104 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4105{
4106 /*
4107 * Open the device for the raw drive.
4108 */
4109 RTFILE hRawDrive = NIL_RTFILE;
4110 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4111 if (RT_FAILURE(rc))
4112 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4113 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4114 pImage->pszFilename, pszRawDrive, rc);
4115
4116 /*
4117 * Get the sector size.
4118 */
4119 uint32_t cbSector = 0;
4120 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4121 if (RT_SUCCESS(rc))
4122 {
4123 /* sanity checks */
4124 if ( cbSector >= 512
4125 && cbSector <= _64K
4126 && RT_IS_POWER_OF_TWO(cbSector))
4127 {
4128 /*
4129 * Get the size.
4130 */
4131 uint64_t cbRawDrive = 0;
4132 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4133 if (RT_SUCCESS(rc))
4134 {
4135 /* Check whether cbSize is actually sensible. */
4136 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4137 {
4138 *phRawDrive = hRawDrive;
4139 *pcbRawDrive = cbRawDrive;
4140 *pcbSector = cbSector;
4141 return VINF_SUCCESS;
4142 }
4143 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4144 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4145 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4146 }
4147 else
4148 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4149 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4150 pImage->pszFilename, pszRawDrive, rc);
4151 }
4152 else
4153 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4154 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4155 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4156 }
4157 else
4158 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4159 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4160 pImage->pszFilename, pszRawDrive, rc);
4161 RTFileClose(hRawDrive);
4162 return rc;
4163}
4164
4165/**
4166 * Reads the raw disk configuration, leaving initalization and cleanup to the
4167 * caller (regardless of return status).
4168 *
4169 * @returns VBox status code, errors properly reported.
4170 * @internal
4171 */
4172static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4173 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4174 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4175 char **ppszFreeMe)
4176{
4177 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4178 if (!pImgCfg)
4179 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4180 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4181
4182 /*
4183 * RawDrive = path
4184 */
4185 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4186 if (RT_FAILURE(rc))
4187 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4188 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4189 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4190
4191 /*
4192 * Partitions=n[r][,...]
4193 */
4194 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4195 *pfPartitions = *pfPartitionsReadOnly = 0;
4196
4197 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4198 if (RT_SUCCESS(rc))
4199 {
4200 char *psz = *ppszFreeMe;
4201 while (*psz != '\0')
4202 {
4203 char *pszNext;
4204 uint32_t u32;
4205 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4206 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4207 rc = -rc;
4208 if (RT_FAILURE(rc))
4209 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4210 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4211 pImage->pszFilename, rc, psz);
4212 if (u32 >= cMaxPartitionBits)
4213 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4214 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4215 pImage->pszFilename, u32, cMaxPartitionBits);
4216 *pfPartitions |= RT_BIT_32(u32);
4217 psz = pszNext;
4218 if (*psz == 'r')
4219 {
4220 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4221 psz++;
4222 }
4223 if (*psz == ',')
4224 psz++;
4225 else if (*psz != '\0')
4226 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4227 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4228 pImage->pszFilename, psz);
4229 }
4230
4231 RTStrFree(*ppszFreeMe);
4232 *ppszFreeMe = NULL;
4233 }
4234 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4235 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4236 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4237
4238 /*
4239 * BootSector=base64
4240 */
4241 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4242 if (RT_SUCCESS(rc))
4243 {
4244 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4245 if (cbBootSector < 0)
4246 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4247 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4248 pImage->pszFilename, *ppszRawDrive);
4249 if (cbBootSector == 0)
4250 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4251 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4252 pImage->pszFilename, *ppszRawDrive);
4253 if (cbBootSector > _4M) /* this is just a preliminary max */
4254 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4255 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4256 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4257
4258 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4259 however, bird disagrees and thinks the user should be told that what
4260 he/she/it tries to do isn't possible. There should be less head
4261 scratching this way when the guest doesn't do the expected thing. */
4262 if (!*pfPartitions)
4263 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4264 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4265 pImage->pszFilename, *ppszRawDrive);
4266
4267 *pcbBootSector = (size_t)cbBootSector;
4268 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4269 if (!*ppvBootSector)
4270 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4271 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4272 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4273
4274 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4275 if (RT_FAILURE(rc))
4276 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4277 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4278 pImage->pszFilename, *ppszRawDrive, rc);
4279
4280 RTStrFree(*ppszFreeMe);
4281 *ppszFreeMe = NULL;
4282 }
4283 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4284 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4285 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4286
4287 /*
4288 * Relative=0/1
4289 */
4290 *pfRelative = false;
4291 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4292 if (RT_SUCCESS(rc))
4293 {
4294 if (!*pfPartitions && *pfRelative != false)
4295 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4296 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4297 pImage->pszFilename);
4298#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) /* PORTME */
4299 if (*pfRelative == true)
4300 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4301 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4302 pImage->pszFilename);
4303#endif
4304 }
4305 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4306 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4307 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4308 else
4309#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4310 *pfRelative = true;
4311#else
4312 *pfRelative = false;
4313#endif
4314
4315 return VINF_SUCCESS;
4316}
4317
4318/**
4319 * Creates a raw drive (nee disk) descriptor.
4320 *
4321 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4322 * here much later. That's one of the reasons why we produce a descriptor just
4323 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4324 *
4325 * @returns VBox status code.
4326 * @param pImage The image.
4327 * @param ppRaw Where to return the raw drive descriptor. Caller must
4328 * free it using vmdkRawDescFree regardless of the status
4329 * code.
4330 * @internal
4331 */
4332static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4333{
4334 /* Make sure it's NULL. */
4335 *ppRaw = NULL;
4336
4337 /*
4338 * Read the configuration.
4339 */
4340 char *pszRawDrive = NULL;
4341 uint32_t fPartitions = 0; /* zero if whole-drive */
4342 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4343 void *pvBootSector = NULL;
4344 size_t cbBootSector = 0;
4345 bool fRelative = false;
4346 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4347 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4348 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4349 RTStrFree(pszFreeMe);
4350 if (RT_SUCCESS(rc))
4351 {
4352 /*
4353 * Open the device, getting the sector size and drive size.
4354 */
4355 uint64_t cbSize = 0;
4356 uint32_t cbSector = 0;
4357 RTFILE hRawDrive = NIL_RTFILE;
4358 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4359 if (RT_SUCCESS(rc))
4360 {
4361 /*
4362 * Create the raw-drive descriptor
4363 */
4364 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4365 if (pRawDesc)
4366 {
4367 pRawDesc->szSignature[0] = 'R';
4368 pRawDesc->szSignature[1] = 'A';
4369 pRawDesc->szSignature[2] = 'W';
4370 //pRawDesc->szSignature[3] = '\0';
4371 if (!fPartitions)
4372 {
4373 /*
4374 * It's simple for when doing the whole drive.
4375 */
4376 pRawDesc->uFlags = VDISKRAW_DISK;
4377 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4378 }
4379 else
4380 {
4381 /*
4382 * In selected partitions mode we've got a lot more work ahead of us.
4383 */
4384 pRawDesc->uFlags = VDISKRAW_NORMAL;
4385 //pRawDesc->pszRawDisk = NULL;
4386 //pRawDesc->cPartDescs = 0;
4387 //pRawDesc->pPartDescs = NULL;
4388
4389 /* We need to parse the partition map to complete the descriptor: */
4390 RTDVM hVolMgr = NIL_RTDVM;
4391 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4392 if (RT_SUCCESS(rc))
4393 {
4394 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4395 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4396 || enmFormatType == RTDVMFORMATTYPE_GPT)
4397 {
4398 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4399 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4400
4401 /* Add copies of the partition tables: */
4402 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4403 pvBootSector, cbBootSector);
4404 if (RT_SUCCESS(rc))
4405 {
4406 /* Add descriptors for the partitions/volumes, indicating which
4407 should be accessible and how to access them: */
4408 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4409 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4410 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4411 RTDvmVolumeRelease(hVolRelease);
4412
4413 /* Finally, sort the partition and check consistency (overlaps, etc): */
4414 if (RT_SUCCESS(rc))
4415 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4416 }
4417 }
4418 else
4419 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4420 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4421 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4422 RTDvmRelease(hVolMgr);
4423 }
4424 }
4425 if (RT_SUCCESS(rc))
4426 {
4427 /*
4428 * We succeeded.
4429 */
4430 *ppRaw = pRawDesc;
4431 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4432 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4433 if (pRawDesc->cPartDescs)
4434 {
4435 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4436 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4437 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4438 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4439 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4440 }
4441 }
4442 else
4443 vmdkRawDescFree(pRawDesc);
4444 }
4445 else
4446 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4447 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4448 pImage->pszFilename, sizeof(*pRawDesc));
4449 RTFileClose(hRawDrive);
4450 }
4451 }
4452 RTStrFree(pszRawDrive);
4453 RTMemFree(pvBootSector);
4454 return rc;
4455}
4456
4457/**
4458 * Internal: create VMDK images for raw disk/partition access.
4459 */
4460static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4461 uint64_t cbSize)
4462{
4463 int rc = VINF_SUCCESS;
4464 PVMDKEXTENT pExtent;
4465
4466 if (pRaw->uFlags & VDISKRAW_DISK)
4467 {
4468 /* Full raw disk access. This requires setting up a descriptor
4469 * file and open the (flat) raw disk. */
4470 rc = vmdkCreateExtents(pImage, 1);
4471 if (RT_FAILURE(rc))
4472 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4473 pExtent = &pImage->pExtents[0];
4474 /* Create raw disk descriptor file. */
4475 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4476 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4477 true /* fCreate */));
4478 if (RT_FAILURE(rc))
4479 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4480
4481 /* Set up basename for extent description. Cannot use StrDup. */
4482 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4483 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4484 if (!pszBasename)
4485 return VERR_NO_MEMORY;
4486 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4487 pExtent->pszBasename = pszBasename;
4488 /* For raw disks the full name is identical to the base name. */
4489 pExtent->pszFullname = RTStrDup(pszBasename);
4490 if (!pExtent->pszFullname)
4491 return VERR_NO_MEMORY;
4492 pExtent->enmType = VMDKETYPE_FLAT;
4493 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4494 pExtent->uSectorOffset = 0;
4495 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4496 pExtent->fMetaDirty = false;
4497
4498 /* Open flat image, the raw disk. */
4499 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4500 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4501 false /* fCreate */));
4502 if (RT_FAILURE(rc))
4503 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4504 }
4505 else
4506 {
4507 /* Raw partition access. This requires setting up a descriptor
4508 * file, write the partition information to a flat extent and
4509 * open all the (flat) raw disk partitions. */
4510
4511 /* First pass over the partition data areas to determine how many
4512 * extents we need. One data area can require up to 2 extents, as
4513 * it might be necessary to skip over unpartitioned space. */
4514 unsigned cExtents = 0;
4515 uint64_t uStart = 0;
4516 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4517 {
4518 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4519 if (uStart > pPart->offStartInVDisk)
4520 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4521 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4522
4523 if (uStart < pPart->offStartInVDisk)
4524 cExtents++;
4525 uStart = pPart->offStartInVDisk + pPart->cbData;
4526 cExtents++;
4527 }
4528 /* Another extent for filling up the rest of the image. */
4529 if (uStart != cbSize)
4530 cExtents++;
4531
4532 rc = vmdkCreateExtents(pImage, cExtents);
4533 if (RT_FAILURE(rc))
4534 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4535
4536 /* Create raw partition descriptor file. */
4537 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4538 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4539 true /* fCreate */));
4540 if (RT_FAILURE(rc))
4541 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4542
4543 /* Create base filename for the partition table extent. */
4544 /** @todo remove fixed buffer without creating memory leaks. */
4545 char pszPartition[1024];
4546 const char *pszBase = RTPathFilename(pImage->pszFilename);
4547 const char *pszSuff = RTPathSuffix(pszBase);
4548 if (pszSuff == NULL)
4549 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4550 char *pszBaseBase = RTStrDup(pszBase);
4551 if (!pszBaseBase)
4552 return VERR_NO_MEMORY;
4553 RTPathStripSuffix(pszBaseBase);
4554 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4555 pszBaseBase, pszSuff);
4556 RTStrFree(pszBaseBase);
4557
4558 /* Second pass over the partitions, now define all extents. */
4559 uint64_t uPartOffset = 0;
4560 cExtents = 0;
4561 uStart = 0;
4562 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4563 {
4564 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4565 pExtent = &pImage->pExtents[cExtents++];
4566
4567 if (uStart < pPart->offStartInVDisk)
4568 {
4569 pExtent->pszBasename = NULL;
4570 pExtent->pszFullname = NULL;
4571 pExtent->enmType = VMDKETYPE_ZERO;
4572 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4573 pExtent->uSectorOffset = 0;
4574 pExtent->enmAccess = VMDKACCESS_READWRITE;
4575 pExtent->fMetaDirty = false;
4576 /* go to next extent */
4577 pExtent = &pImage->pExtents[cExtents++];
4578 }
4579 uStart = pPart->offStartInVDisk + pPart->cbData;
4580
4581 if (pPart->pvPartitionData)
4582 {
4583 /* Set up basename for extent description. Can't use StrDup. */
4584 size_t cbBasename = strlen(pszPartition) + 1;
4585 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4586 if (!pszBasename)
4587 return VERR_NO_MEMORY;
4588 memcpy(pszBasename, pszPartition, cbBasename);
4589 pExtent->pszBasename = pszBasename;
4590
4591 /* Set up full name for partition extent. */
4592 char *pszDirname = RTStrDup(pImage->pszFilename);
4593 if (!pszDirname)
4594 return VERR_NO_STR_MEMORY;
4595 RTPathStripFilename(pszDirname);
4596 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4597 RTStrFree(pszDirname);
4598 if (!pszFullname)
4599 return VERR_NO_STR_MEMORY;
4600 pExtent->pszFullname = pszFullname;
4601 pExtent->enmType = VMDKETYPE_FLAT;
4602 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4603 pExtent->uSectorOffset = uPartOffset;
4604 pExtent->enmAccess = VMDKACCESS_READWRITE;
4605 pExtent->fMetaDirty = false;
4606
4607 /* Create partition table flat image. */
4608 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4609 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4610 true /* fCreate */));
4611 if (RT_FAILURE(rc))
4612 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4613 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4614 VMDK_SECTOR2BYTE(uPartOffset),
4615 pPart->pvPartitionData,
4616 pPart->cbData);
4617 if (RT_FAILURE(rc))
4618 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4619 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4620 }
4621 else
4622 {
4623 if (pPart->pszRawDevice)
4624 {
4625 /* Set up basename for extent descr. Can't use StrDup. */
4626 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4627 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4628 if (!pszBasename)
4629 return VERR_NO_MEMORY;
4630 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4631 pExtent->pszBasename = pszBasename;
4632 /* For raw disks full name is identical to base name. */
4633 pExtent->pszFullname = RTStrDup(pszBasename);
4634 if (!pExtent->pszFullname)
4635 return VERR_NO_MEMORY;
4636 pExtent->enmType = VMDKETYPE_FLAT;
4637 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4638 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4639 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4640 pExtent->fMetaDirty = false;
4641
4642 /* Open flat image, the raw partition. */
4643 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4644 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4645 false /* fCreate */));
4646 if (RT_FAILURE(rc))
4647 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4648 }
4649 else
4650 {
4651 pExtent->pszBasename = NULL;
4652 pExtent->pszFullname = NULL;
4653 pExtent->enmType = VMDKETYPE_ZERO;
4654 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4655 pExtent->uSectorOffset = 0;
4656 pExtent->enmAccess = VMDKACCESS_READWRITE;
4657 pExtent->fMetaDirty = false;
4658 }
4659 }
4660 }
4661 /* Another extent for filling up the rest of the image. */
4662 if (uStart != cbSize)
4663 {
4664 pExtent = &pImage->pExtents[cExtents++];
4665 pExtent->pszBasename = NULL;
4666 pExtent->pszFullname = NULL;
4667 pExtent->enmType = VMDKETYPE_ZERO;
4668 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4669 pExtent->uSectorOffset = 0;
4670 pExtent->enmAccess = VMDKACCESS_READWRITE;
4671 pExtent->fMetaDirty = false;
4672 }
4673 }
4674
4675 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4676 (pRaw->uFlags & VDISKRAW_DISK) ?
4677 "fullDevice" : "partitionedDevice");
4678 if (RT_FAILURE(rc))
4679 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4680 return rc;
4681}
4682
4683/**
4684 * Internal: create a regular (i.e. file-backed) VMDK image.
4685 */
4686static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4687 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4688 unsigned uPercentStart, unsigned uPercentSpan)
4689{
4690 int rc = VINF_SUCCESS;
4691 unsigned cExtents = 1;
4692 uint64_t cbOffset = 0;
4693 uint64_t cbRemaining = cbSize;
4694
4695 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4696 {
4697 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4698 /* Do proper extent computation: need one smaller extent if the total
4699 * size isn't evenly divisible by the split size. */
4700 if (cbSize % VMDK_2G_SPLIT_SIZE)
4701 cExtents++;
4702 }
4703 rc = vmdkCreateExtents(pImage, cExtents);
4704 if (RT_FAILURE(rc))
4705 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4706
4707 /* Basename strings needed for constructing the extent names. */
4708 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4709 AssertPtr(pszBasenameSubstr);
4710 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4711
4712 /* Create separate descriptor file if necessary. */
4713 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4714 {
4715 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4716 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4717 true /* fCreate */));
4718 if (RT_FAILURE(rc))
4719 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4720 }
4721 else
4722 pImage->pFile = NULL;
4723
4724 /* Set up all extents. */
4725 for (unsigned i = 0; i < cExtents; i++)
4726 {
4727 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4728 uint64_t cbExtent = cbRemaining;
4729
4730 /* Set up fullname/basename for extent description. Cannot use StrDup
4731 * for basename, as it is not guaranteed that the memory can be freed
4732 * with RTMemTmpFree, which must be used as in other code paths
4733 * StrDup is not usable. */
4734 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4735 {
4736 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4737 if (!pszBasename)
4738 return VERR_NO_MEMORY;
4739 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4740 pExtent->pszBasename = pszBasename;
4741 }
4742 else
4743 {
4744 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
4745 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
4746 RTPathStripSuffix(pszBasenameBase);
4747 char *pszTmp;
4748 size_t cbTmp;
4749 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4750 {
4751 if (cExtents == 1)
4752 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
4753 pszBasenameSuff);
4754 else
4755 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
4756 i+1, pszBasenameSuff);
4757 }
4758 else
4759 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
4760 pszBasenameSuff);
4761 RTStrFree(pszBasenameBase);
4762 if (!pszTmp)
4763 return VERR_NO_STR_MEMORY;
4764 cbTmp = strlen(pszTmp) + 1;
4765 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
4766 if (!pszBasename)
4767 {
4768 RTStrFree(pszTmp);
4769 return VERR_NO_MEMORY;
4770 }
4771 memcpy(pszBasename, pszTmp, cbTmp);
4772 RTStrFree(pszTmp);
4773 pExtent->pszBasename = pszBasename;
4774 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4775 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
4776 }
4777 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4778 if (!pszBasedirectory)
4779 return VERR_NO_STR_MEMORY;
4780 RTPathStripFilename(pszBasedirectory);
4781 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4782 RTStrFree(pszBasedirectory);
4783 if (!pszFullname)
4784 return VERR_NO_STR_MEMORY;
4785 pExtent->pszFullname = pszFullname;
4786
4787 /* Create file for extent. */
4788 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4789 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4790 true /* fCreate */));
4791 if (RT_FAILURE(rc))
4792 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4793 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4794 {
4795 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
4796 0 /* fFlags */, pIfProgress,
4797 uPercentStart + cbOffset * uPercentSpan / cbSize,
4798 cbExtent * uPercentSpan / cbSize);
4799 if (RT_FAILURE(rc))
4800 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
4801 }
4802
4803 /* Place descriptor file information (where integrated). */
4804 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4805 {
4806 pExtent->uDescriptorSector = 1;
4807 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4808 /* The descriptor is part of the (only) extent. */
4809 pExtent->pDescData = pImage->pDescData;
4810 pImage->pDescData = NULL;
4811 }
4812
4813 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4814 {
4815 uint64_t cSectorsPerGDE, cSectorsPerGD;
4816 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4817 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
4818 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4819 pExtent->cGTEntries = 512;
4820 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4821 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4822 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4823 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4824 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4825 {
4826 /* The spec says version is 1 for all VMDKs, but the vast
4827 * majority of streamOptimized VMDKs actually contain
4828 * version 3 - so go with the majority. Both are accepted. */
4829 pExtent->uVersion = 3;
4830 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4831 }
4832 }
4833 else
4834 {
4835 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4836 pExtent->enmType = VMDKETYPE_VMFS;
4837 else
4838 pExtent->enmType = VMDKETYPE_FLAT;
4839 }
4840
4841 pExtent->enmAccess = VMDKACCESS_READWRITE;
4842 pExtent->fUncleanShutdown = true;
4843 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4844 pExtent->uSectorOffset = 0;
4845 pExtent->fMetaDirty = true;
4846
4847 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4848 {
4849 /* fPreAlloc should never be false because VMware can't use such images. */
4850 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4851 RT_MAX( pExtent->uDescriptorSector
4852 + pExtent->cDescriptorSectors,
4853 1),
4854 true /* fPreAlloc */);
4855 if (RT_FAILURE(rc))
4856 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4857 }
4858
4859 cbOffset += cbExtent;
4860
4861 if (RT_SUCCESS(rc))
4862 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
4863
4864 cbRemaining -= cbExtent;
4865 }
4866
4867 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4868 {
4869 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
4870 * controller type is set in an image. */
4871 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
4872 if (RT_FAILURE(rc))
4873 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
4874 }
4875
4876 const char *pszDescType = NULL;
4877 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4878 {
4879 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4880 pszDescType = "vmfs";
4881 else
4882 pszDescType = (cExtents == 1)
4883 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4884 }
4885 else
4886 {
4887 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4888 pszDescType = "streamOptimized";
4889 else
4890 {
4891 pszDescType = (cExtents == 1)
4892 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4893 }
4894 }
4895 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4896 pszDescType);
4897 if (RT_FAILURE(rc))
4898 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4899 return rc;
4900}
4901
4902/**
4903 * Internal: Create a real stream optimized VMDK using only linear writes.
4904 */
4905static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
4906{
4907 int rc = vmdkCreateExtents(pImage, 1);
4908 if (RT_FAILURE(rc))
4909 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4910
4911 /* Basename strings needed for constructing the extent names. */
4912 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4913 AssertPtr(pszBasenameSubstr);
4914 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4915
4916 /* No separate descriptor file. */
4917 pImage->pFile = NULL;
4918
4919 /* Set up all extents. */
4920 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4921
4922 /* Set up fullname/basename for extent description. Cannot use StrDup
4923 * for basename, as it is not guaranteed that the memory can be freed
4924 * with RTMemTmpFree, which must be used as in other code paths
4925 * StrDup is not usable. */
4926 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4927 if (!pszBasename)
4928 return VERR_NO_MEMORY;
4929 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4930 pExtent->pszBasename = pszBasename;
4931
4932 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4933 RTPathStripFilename(pszBasedirectory);
4934 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4935 RTStrFree(pszBasedirectory);
4936 if (!pszFullname)
4937 return VERR_NO_STR_MEMORY;
4938 pExtent->pszFullname = pszFullname;
4939
4940 /* Create file for extent. Make it write only, no reading allowed. */
4941 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4942 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4943 true /* fCreate */)
4944 & ~RTFILE_O_READ);
4945 if (RT_FAILURE(rc))
4946 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4947
4948 /* Place descriptor file information. */
4949 pExtent->uDescriptorSector = 1;
4950 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4951 /* The descriptor is part of the (only) extent. */
4952 pExtent->pDescData = pImage->pDescData;
4953 pImage->pDescData = NULL;
4954
4955 uint64_t cSectorsPerGDE, cSectorsPerGD;
4956 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4957 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
4958 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4959 pExtent->cGTEntries = 512;
4960 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4961 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4962 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4963 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4964
4965 /* The spec says version is 1 for all VMDKs, but the vast
4966 * majority of streamOptimized VMDKs actually contain
4967 * version 3 - so go with the majority. Both are accepted. */
4968 pExtent->uVersion = 3;
4969 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4970 pExtent->fFooter = true;
4971
4972 pExtent->enmAccess = VMDKACCESS_READONLY;
4973 pExtent->fUncleanShutdown = false;
4974 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4975 pExtent->uSectorOffset = 0;
4976 pExtent->fMetaDirty = true;
4977
4978 /* Create grain directory, without preallocating it straight away. It will
4979 * be constructed on the fly when writing out the data and written when
4980 * closing the image. The end effect is that the full grain directory is
4981 * allocated, which is a requirement of the VMDK specs. */
4982 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
4983 false /* fPreAlloc */);
4984 if (RT_FAILURE(rc))
4985 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4986
4987 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4988 "streamOptimized");
4989 if (RT_FAILURE(rc))
4990 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4991
4992 return rc;
4993}
4994
4995/**
4996 * Initializes the UUID fields in the DDB.
4997 *
4998 * @returns VBox status code.
4999 * @param pImage The VMDK image instance.
5000 */
5001static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5002{
5003 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5004 if (RT_SUCCESS(rc))
5005 {
5006 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5007 if (RT_SUCCESS(rc))
5008 {
5009 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5010 &pImage->ModificationUuid);
5011 if (RT_SUCCESS(rc))
5012 {
5013 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5014 &pImage->ParentModificationUuid);
5015 if (RT_FAILURE(rc))
5016 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5017 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5018 }
5019 else
5020 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5021 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5022 }
5023 else
5024 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5025 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5026 }
5027 else
5028 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5029 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5030
5031 return rc;
5032}
5033
5034/**
5035 * Internal: The actual code for creating any VMDK variant currently in
5036 * existence on hosted environments.
5037 */
5038static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5039 unsigned uImageFlags, const char *pszComment,
5040 PCVDGEOMETRY pPCHSGeometry,
5041 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5042 PVDINTERFACEPROGRESS pIfProgress,
5043 unsigned uPercentStart, unsigned uPercentSpan)
5044{
5045 pImage->uImageFlags = uImageFlags;
5046
5047 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5048 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5049 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5050
5051 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5052 &pImage->Descriptor);
5053 if (RT_SUCCESS(rc))
5054 {
5055 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5056 {
5057 /* Raw disk image (includes raw partition). */
5058 PVDISKRAW pRaw = NULL;
5059 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5060 if (RT_FAILURE(rc))
5061 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5062
5063 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5064 vmdkRawDescFree(pRaw);
5065 }
5066 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5067 {
5068 /* Stream optimized sparse image (monolithic). */
5069 rc = vmdkCreateStreamImage(pImage, cbSize);
5070 }
5071 else
5072 {
5073 /* Regular fixed or sparse image (monolithic or split). */
5074 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5075 pIfProgress, uPercentStart,
5076 uPercentSpan * 95 / 100);
5077 }
5078
5079 if (RT_SUCCESS(rc))
5080 {
5081 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5082
5083 pImage->cbSize = cbSize;
5084
5085 for (unsigned i = 0; i < pImage->cExtents; i++)
5086 {
5087 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5088
5089 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5090 pExtent->cNominalSectors, pExtent->enmType,
5091 pExtent->pszBasename, pExtent->uSectorOffset);
5092 if (RT_FAILURE(rc))
5093 {
5094 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5095 break;
5096 }
5097 }
5098
5099 if (RT_SUCCESS(rc))
5100 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5101
5102 if ( RT_SUCCESS(rc)
5103 && pPCHSGeometry->cCylinders != 0
5104 && pPCHSGeometry->cHeads != 0
5105 && pPCHSGeometry->cSectors != 0)
5106 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5107
5108 if ( RT_SUCCESS(rc)
5109 && pLCHSGeometry->cCylinders != 0
5110 && pLCHSGeometry->cHeads != 0
5111 && pLCHSGeometry->cSectors != 0)
5112 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5113
5114 pImage->LCHSGeometry = *pLCHSGeometry;
5115 pImage->PCHSGeometry = *pPCHSGeometry;
5116
5117 pImage->ImageUuid = *pUuid;
5118 RTUuidClear(&pImage->ParentUuid);
5119 RTUuidClear(&pImage->ModificationUuid);
5120 RTUuidClear(&pImage->ParentModificationUuid);
5121
5122 if (RT_SUCCESS(rc))
5123 rc = vmdkCreateImageDdbUuidsInit(pImage);
5124
5125 if (RT_SUCCESS(rc))
5126 rc = vmdkAllocateGrainTableCache(pImage);
5127
5128 if (RT_SUCCESS(rc))
5129 {
5130 rc = vmdkSetImageComment(pImage, pszComment);
5131 if (RT_FAILURE(rc))
5132 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5133 }
5134
5135 if (RT_SUCCESS(rc))
5136 {
5137 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5138
5139 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5140 {
5141 /* streamOptimized is a bit special, we cannot trigger the flush
5142 * until all data has been written. So we write the necessary
5143 * information explicitly. */
5144 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5145 - pImage->Descriptor.aLines[0], 512));
5146 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5147 if (RT_SUCCESS(rc))
5148 {
5149 rc = vmdkWriteDescriptor(pImage, NULL);
5150 if (RT_FAILURE(rc))
5151 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5152 }
5153 else
5154 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5155 }
5156 else
5157 rc = vmdkFlushImage(pImage, NULL);
5158 }
5159 }
5160 }
5161 else
5162 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5163
5164
5165 if (RT_SUCCESS(rc))
5166 {
5167 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5168 pImage->RegionList.fFlags = 0;
5169 pImage->RegionList.cRegions = 1;
5170
5171 pRegion->offRegion = 0; /* Disk start. */
5172 pRegion->cbBlock = 512;
5173 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5174 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5175 pRegion->cbData = 512;
5176 pRegion->cbMetadata = 0;
5177 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5178
5179 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5180 }
5181 else
5182 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5183 return rc;
5184}
5185
5186/**
5187 * Internal: Update image comment.
5188 */
5189static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5190{
5191 char *pszCommentEncoded = NULL;
5192 if (pszComment)
5193 {
5194 pszCommentEncoded = vmdkEncodeString(pszComment);
5195 if (!pszCommentEncoded)
5196 return VERR_NO_MEMORY;
5197 }
5198
5199 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5200 "ddb.comment", pszCommentEncoded);
5201 if (pszCommentEncoded)
5202 RTStrFree(pszCommentEncoded);
5203 if (RT_FAILURE(rc))
5204 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5205 return VINF_SUCCESS;
5206}
5207
5208/**
5209 * Internal. Clear the grain table buffer for real stream optimized writing.
5210 */
5211static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5212{
5213 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5214 for (uint32_t i = 0; i < cCacheLines; i++)
5215 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5216 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5217}
5218
5219/**
5220 * Internal. Flush the grain table buffer for real stream optimized writing.
5221 */
5222static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5223 uint32_t uGDEntry)
5224{
5225 int rc = VINF_SUCCESS;
5226 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5227
5228 /* VMware does not write out completely empty grain tables in the case
5229 * of streamOptimized images, which according to my interpretation of
5230 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5231 * handle it without problems do it the same way and save some bytes. */
5232 bool fAllZero = true;
5233 for (uint32_t i = 0; i < cCacheLines; i++)
5234 {
5235 /* Convert the grain table to little endian in place, as it will not
5236 * be used at all after this function has been called. */
5237 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5238 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5239 if (*pGTTmp)
5240 {
5241 fAllZero = false;
5242 break;
5243 }
5244 if (!fAllZero)
5245 break;
5246 }
5247 if (fAllZero)
5248 return VINF_SUCCESS;
5249
5250 uint64_t uFileOffset = pExtent->uAppendPosition;
5251 if (!uFileOffset)
5252 return VERR_INTERNAL_ERROR;
5253 /* Align to sector, as the previous write could have been any size. */
5254 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5255
5256 /* Grain table marker. */
5257 uint8_t aMarker[512];
5258 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5259 memset(pMarker, '\0', sizeof(aMarker));
5260 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5261 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5262 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5263 aMarker, sizeof(aMarker));
5264 AssertRC(rc);
5265 uFileOffset += 512;
5266
5267 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5268 return VERR_INTERNAL_ERROR;
5269
5270 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5271
5272 for (uint32_t i = 0; i < cCacheLines; i++)
5273 {
5274 /* Convert the grain table to little endian in place, as it will not
5275 * be used at all after this function has been called. */
5276 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5277 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5278 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5279
5280 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5281 &pImage->pGTCache->aGTCache[i].aGTData[0],
5282 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5283 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5284 if (RT_FAILURE(rc))
5285 break;
5286 }
5287 Assert(!(uFileOffset % 512));
5288 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5289 return rc;
5290}
5291
5292/**
5293 * Internal. Free all allocated space for representing an image, and optionally
5294 * delete the image from disk.
5295 */
5296static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5297{
5298 int rc = VINF_SUCCESS;
5299
5300 /* Freeing a never allocated image (e.g. because the open failed) is
5301 * not signalled as an error. After all nothing bad happens. */
5302 if (pImage)
5303 {
5304 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5305 {
5306 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5307 {
5308 /* Check if all extents are clean. */
5309 for (unsigned i = 0; i < pImage->cExtents; i++)
5310 {
5311 Assert(!pImage->pExtents[i].fUncleanShutdown);
5312 }
5313 }
5314 else
5315 {
5316 /* Mark all extents as clean. */
5317 for (unsigned i = 0; i < pImage->cExtents; i++)
5318 {
5319 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5320 && pImage->pExtents[i].fUncleanShutdown)
5321 {
5322 pImage->pExtents[i].fUncleanShutdown = false;
5323 pImage->pExtents[i].fMetaDirty = true;
5324 }
5325
5326 /* From now on it's not safe to append any more data. */
5327 pImage->pExtents[i].uAppendPosition = 0;
5328 }
5329 }
5330 }
5331
5332 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5333 {
5334 /* No need to write any pending data if the file will be deleted
5335 * or if the new file wasn't successfully created. */
5336 if ( !fDelete && pImage->pExtents
5337 && pImage->pExtents[0].cGTEntries
5338 && pImage->pExtents[0].uAppendPosition)
5339 {
5340 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5341 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5342 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5343 AssertRC(rc);
5344 vmdkStreamClearGT(pImage, pExtent);
5345 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5346 {
5347 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5348 AssertRC(rc);
5349 }
5350
5351 uint64_t uFileOffset = pExtent->uAppendPosition;
5352 if (!uFileOffset)
5353 return VERR_INTERNAL_ERROR;
5354 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5355
5356 /* From now on it's not safe to append any more data. */
5357 pExtent->uAppendPosition = 0;
5358
5359 /* Grain directory marker. */
5360 uint8_t aMarker[512];
5361 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5362 memset(pMarker, '\0', sizeof(aMarker));
5363 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5364 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5365 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5366 aMarker, sizeof(aMarker));
5367 AssertRC(rc);
5368 uFileOffset += 512;
5369
5370 /* Write grain directory in little endian style. The array will
5371 * not be used after this, so convert in place. */
5372 uint32_t *pGDTmp = pExtent->pGD;
5373 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5374 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5375 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5376 uFileOffset, pExtent->pGD,
5377 pExtent->cGDEntries * sizeof(uint32_t));
5378 AssertRC(rc);
5379
5380 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5381 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5382 uFileOffset = RT_ALIGN_64( uFileOffset
5383 + pExtent->cGDEntries * sizeof(uint32_t),
5384 512);
5385
5386 /* Footer marker. */
5387 memset(pMarker, '\0', sizeof(aMarker));
5388 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5389 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5390 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5391 uFileOffset, aMarker, sizeof(aMarker));
5392 AssertRC(rc);
5393
5394 uFileOffset += 512;
5395 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5396 AssertRC(rc);
5397
5398 uFileOffset += 512;
5399 /* End-of-stream marker. */
5400 memset(pMarker, '\0', sizeof(aMarker));
5401 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5402 uFileOffset, aMarker, sizeof(aMarker));
5403 AssertRC(rc);
5404 }
5405 }
5406 else if (!fDelete && fFlush)
5407 vmdkFlushImage(pImage, NULL);
5408
5409 if (pImage->pExtents != NULL)
5410 {
5411 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5412 {
5413 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5414 if (RT_SUCCESS(rc))
5415 rc = rc2; /* Propogate any error when closing the file. */
5416 }
5417 RTMemFree(pImage->pExtents);
5418 pImage->pExtents = NULL;
5419 }
5420 pImage->cExtents = 0;
5421 if (pImage->pFile != NULL)
5422 {
5423 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5424 if (RT_SUCCESS(rc))
5425 rc = rc2; /* Propogate any error when closing the file. */
5426 }
5427 int rc2 = vmdkFileCheckAllClose(pImage);
5428 if (RT_SUCCESS(rc))
5429 rc = rc2; /* Propogate any error when closing the file. */
5430
5431 if (pImage->pGTCache)
5432 {
5433 RTMemFree(pImage->pGTCache);
5434 pImage->pGTCache = NULL;
5435 }
5436 if (pImage->pDescData)
5437 {
5438 RTMemFree(pImage->pDescData);
5439 pImage->pDescData = NULL;
5440 }
5441 }
5442
5443 LogFlowFunc(("returns %Rrc\n", rc));
5444 return rc;
5445}
5446
5447/**
5448 * Internal. Flush image data (and metadata) to disk.
5449 */
5450static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5451{
5452 PVMDKEXTENT pExtent;
5453 int rc = VINF_SUCCESS;
5454
5455 /* Update descriptor if changed. */
5456 if (pImage->Descriptor.fDirty)
5457 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5458
5459 if (RT_SUCCESS(rc))
5460 {
5461 for (unsigned i = 0; i < pImage->cExtents; i++)
5462 {
5463 pExtent = &pImage->pExtents[i];
5464 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5465 {
5466 switch (pExtent->enmType)
5467 {
5468 case VMDKETYPE_HOSTED_SPARSE:
5469 if (!pExtent->fFooter)
5470 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5471 else
5472 {
5473 uint64_t uFileOffset = pExtent->uAppendPosition;
5474 /* Simply skip writing anything if the streamOptimized
5475 * image hasn't been just created. */
5476 if (!uFileOffset)
5477 break;
5478 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5479 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5480 uFileOffset, pIoCtx);
5481 }
5482 break;
5483 case VMDKETYPE_VMFS:
5484 case VMDKETYPE_FLAT:
5485 /* Nothing to do. */
5486 break;
5487 case VMDKETYPE_ZERO:
5488 default:
5489 AssertMsgFailed(("extent with type %d marked as dirty\n",
5490 pExtent->enmType));
5491 break;
5492 }
5493 }
5494
5495 if (RT_FAILURE(rc))
5496 break;
5497
5498 switch (pExtent->enmType)
5499 {
5500 case VMDKETYPE_HOSTED_SPARSE:
5501 case VMDKETYPE_VMFS:
5502 case VMDKETYPE_FLAT:
5503 /** @todo implement proper path absolute check. */
5504 if ( pExtent->pFile != NULL
5505 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5506 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5507 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5508 NULL, NULL);
5509 break;
5510 case VMDKETYPE_ZERO:
5511 /* No need to do anything for this extent. */
5512 break;
5513 default:
5514 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5515 break;
5516 }
5517 }
5518 }
5519
5520 return rc;
5521}
5522
5523/**
5524 * Internal. Find extent corresponding to the sector number in the disk.
5525 */
5526static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5527 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5528{
5529 PVMDKEXTENT pExtent = NULL;
5530 int rc = VINF_SUCCESS;
5531
5532 for (unsigned i = 0; i < pImage->cExtents; i++)
5533 {
5534 if (offSector < pImage->pExtents[i].cNominalSectors)
5535 {
5536 pExtent = &pImage->pExtents[i];
5537 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5538 break;
5539 }
5540 offSector -= pImage->pExtents[i].cNominalSectors;
5541 }
5542
5543 if (pExtent)
5544 *ppExtent = pExtent;
5545 else
5546 rc = VERR_IO_SECTOR_NOT_FOUND;
5547
5548 return rc;
5549}
5550
5551/**
5552 * Internal. Hash function for placing the grain table hash entries.
5553 */
5554static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5555 unsigned uExtent)
5556{
5557 /** @todo this hash function is quite simple, maybe use a better one which
5558 * scrambles the bits better. */
5559 return (uSector + uExtent) % pCache->cEntries;
5560}
5561
5562/**
5563 * Internal. Get sector number in the extent file from the relative sector
5564 * number in the extent.
5565 */
5566static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5567 PVMDKEXTENT pExtent, uint64_t uSector,
5568 uint64_t *puExtentSector)
5569{
5570 PVMDKGTCACHE pCache = pImage->pGTCache;
5571 uint64_t uGDIndex, uGTSector, uGTBlock;
5572 uint32_t uGTHash, uGTBlockIndex;
5573 PVMDKGTCACHEENTRY pGTCacheEntry;
5574 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5575 int rc;
5576
5577 /* For newly created and readonly/sequentially opened streamOptimized
5578 * images this must be a no-op, as the grain directory is not there. */
5579 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5580 && pExtent->uAppendPosition)
5581 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5582 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5583 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5584 {
5585 *puExtentSector = 0;
5586 return VINF_SUCCESS;
5587 }
5588
5589 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5590 if (uGDIndex >= pExtent->cGDEntries)
5591 return VERR_OUT_OF_RANGE;
5592 uGTSector = pExtent->pGD[uGDIndex];
5593 if (!uGTSector)
5594 {
5595 /* There is no grain table referenced by this grain directory
5596 * entry. So there is absolutely no data in this area. */
5597 *puExtentSector = 0;
5598 return VINF_SUCCESS;
5599 }
5600
5601 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5602 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5603 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5604 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5605 || pGTCacheEntry->uGTBlock != uGTBlock)
5606 {
5607 /* Cache miss, fetch data from disk. */
5608 PVDMETAXFER pMetaXfer;
5609 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5610 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5611 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5612 if (RT_FAILURE(rc))
5613 return rc;
5614 /* We can release the metadata transfer immediately. */
5615 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5616 pGTCacheEntry->uExtent = pExtent->uExtent;
5617 pGTCacheEntry->uGTBlock = uGTBlock;
5618 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5619 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5620 }
5621 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5622 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5623 if (uGrainSector)
5624 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5625 else
5626 *puExtentSector = 0;
5627 return VINF_SUCCESS;
5628}
5629
5630/**
5631 * Internal. Writes the grain and also if necessary the grain tables.
5632 * Uses the grain table cache as a true grain table.
5633 */
5634static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5635 uint64_t uSector, PVDIOCTX pIoCtx,
5636 uint64_t cbWrite)
5637{
5638 uint32_t uGrain;
5639 uint32_t uGDEntry, uLastGDEntry;
5640 uint32_t cbGrain = 0;
5641 uint32_t uCacheLine, uCacheEntry;
5642 const void *pData;
5643 int rc;
5644
5645 /* Very strict requirements: always write at least one full grain, with
5646 * proper alignment. Everything else would require reading of already
5647 * written data, which we don't support for obvious reasons. The only
5648 * exception is the last grain, and only if the image size specifies
5649 * that only some portion holds data. In any case the write must be
5650 * within the image limits, no "overshoot" allowed. */
5651 if ( cbWrite == 0
5652 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5653 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5654 || uSector % pExtent->cSectorsPerGrain
5655 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5656 return VERR_INVALID_PARAMETER;
5657
5658 /* Clip write range to at most the rest of the grain. */
5659 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5660
5661 /* Do not allow to go back. */
5662 uGrain = uSector / pExtent->cSectorsPerGrain;
5663 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5664 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5665 uGDEntry = uGrain / pExtent->cGTEntries;
5666 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5667 if (uGrain < pExtent->uLastGrainAccess)
5668 return VERR_VD_VMDK_INVALID_WRITE;
5669
5670 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5671 * to allocate something, we also need to detect the situation ourself. */
5672 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5673 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5674 return VINF_SUCCESS;
5675
5676 if (uGDEntry != uLastGDEntry)
5677 {
5678 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5679 if (RT_FAILURE(rc))
5680 return rc;
5681 vmdkStreamClearGT(pImage, pExtent);
5682 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5683 {
5684 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5685 if (RT_FAILURE(rc))
5686 return rc;
5687 }
5688 }
5689
5690 uint64_t uFileOffset;
5691 uFileOffset = pExtent->uAppendPosition;
5692 if (!uFileOffset)
5693 return VERR_INTERNAL_ERROR;
5694 /* Align to sector, as the previous write could have been any size. */
5695 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5696
5697 /* Paranoia check: extent type, grain table buffer presence and
5698 * grain table buffer space. Also grain table entry must be clear. */
5699 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5700 || !pImage->pGTCache
5701 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5702 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5703 return VERR_INTERNAL_ERROR;
5704
5705 /* Update grain table entry. */
5706 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5707
5708 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5709 {
5710 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5711 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5712 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5713 pData = pExtent->pvGrain;
5714 }
5715 else
5716 {
5717 RTSGSEG Segment;
5718 unsigned cSegments = 1;
5719 size_t cbSeg = 0;
5720
5721 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5722 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5723 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5724 pData = Segment.pvSeg;
5725 }
5726 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5727 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5728 uSector, &cbGrain);
5729 if (RT_FAILURE(rc))
5730 {
5731 pExtent->uGrainSectorAbs = 0;
5732 AssertRC(rc);
5733 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5734 }
5735 pExtent->uLastGrainAccess = uGrain;
5736 pExtent->uAppendPosition += cbGrain;
5737
5738 return rc;
5739}
5740
5741/**
5742 * Internal: Updates the grain table during grain allocation.
5743 */
5744static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5745 PVMDKGRAINALLOCASYNC pGrainAlloc)
5746{
5747 int rc = VINF_SUCCESS;
5748 PVMDKGTCACHE pCache = pImage->pGTCache;
5749 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5750 uint32_t uGTHash, uGTBlockIndex;
5751 uint64_t uGTSector, uRGTSector, uGTBlock;
5752 uint64_t uSector = pGrainAlloc->uSector;
5753 PVMDKGTCACHEENTRY pGTCacheEntry;
5754
5755 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5756 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5757
5758 uGTSector = pGrainAlloc->uGTSector;
5759 uRGTSector = pGrainAlloc->uRGTSector;
5760 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5761
5762 /* Update the grain table (and the cache). */
5763 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5764 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5765 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5766 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5767 || pGTCacheEntry->uGTBlock != uGTBlock)
5768 {
5769 /* Cache miss, fetch data from disk. */
5770 LogFlow(("Cache miss, fetch data from disk\n"));
5771 PVDMETAXFER pMetaXfer = NULL;
5772 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5773 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5774 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5775 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5776 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5777 {
5778 pGrainAlloc->cIoXfersPending++;
5779 pGrainAlloc->fGTUpdateNeeded = true;
5780 /* Leave early, we will be called again after the read completed. */
5781 LogFlowFunc(("Metadata read in progress, leaving\n"));
5782 return rc;
5783 }
5784 else if (RT_FAILURE(rc))
5785 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5786 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5787 pGTCacheEntry->uExtent = pExtent->uExtent;
5788 pGTCacheEntry->uGTBlock = uGTBlock;
5789 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5790 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5791 }
5792 else
5793 {
5794 /* Cache hit. Convert grain table block back to disk format, otherwise
5795 * the code below will write garbage for all but the updated entry. */
5796 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5797 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5798 }
5799 pGrainAlloc->fGTUpdateNeeded = false;
5800 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5801 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5802 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5803 /* Update grain table on disk. */
5804 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5805 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5806 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5807 vmdkAllocGrainComplete, pGrainAlloc);
5808 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5809 pGrainAlloc->cIoXfersPending++;
5810 else if (RT_FAILURE(rc))
5811 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5812 if (pExtent->pRGD)
5813 {
5814 /* Update backup grain table on disk. */
5815 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5816 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5817 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5818 vmdkAllocGrainComplete, pGrainAlloc);
5819 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5820 pGrainAlloc->cIoXfersPending++;
5821 else if (RT_FAILURE(rc))
5822 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5823 }
5824
5825 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5826 return rc;
5827}
5828
5829/**
5830 * Internal - complete the grain allocation by updating disk grain table if required.
5831 */
5832static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5833{
5834 RT_NOREF1(rcReq);
5835 int rc = VINF_SUCCESS;
5836 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5837 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5838
5839 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5840 pBackendData, pIoCtx, pvUser, rcReq));
5841
5842 pGrainAlloc->cIoXfersPending--;
5843 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5844 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
5845
5846 if (!pGrainAlloc->cIoXfersPending)
5847 {
5848 /* Grain allocation completed. */
5849 RTMemFree(pGrainAlloc);
5850 }
5851
5852 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5853 return rc;
5854}
5855
5856/**
5857 * Internal. Allocates a new grain table (if necessary).
5858 */
5859static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5860 uint64_t uSector, uint64_t cbWrite)
5861{
5862 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
5863 uint64_t uGDIndex, uGTSector, uRGTSector;
5864 uint64_t uFileOffset;
5865 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5866 int rc;
5867
5868 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5869 pCache, pExtent, pIoCtx, uSector, cbWrite));
5870
5871 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5872 if (!pGrainAlloc)
5873 return VERR_NO_MEMORY;
5874
5875 pGrainAlloc->pExtent = pExtent;
5876 pGrainAlloc->uSector = uSector;
5877
5878 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5879 if (uGDIndex >= pExtent->cGDEntries)
5880 {
5881 RTMemFree(pGrainAlloc);
5882 return VERR_OUT_OF_RANGE;
5883 }
5884 uGTSector = pExtent->pGD[uGDIndex];
5885 if (pExtent->pRGD)
5886 uRGTSector = pExtent->pRGD[uGDIndex];
5887 else
5888 uRGTSector = 0; /**< avoid compiler warning */
5889 if (!uGTSector)
5890 {
5891 LogFlow(("Allocating new grain table\n"));
5892
5893 /* There is no grain table referenced by this grain directory
5894 * entry. So there is absolutely no data in this area. Allocate
5895 * a new grain table and put the reference to it in the GDs. */
5896 uFileOffset = pExtent->uAppendPosition;
5897 if (!uFileOffset)
5898 {
5899 RTMemFree(pGrainAlloc);
5900 return VERR_INTERNAL_ERROR;
5901 }
5902 Assert(!(uFileOffset % 512));
5903
5904 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5905 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5906
5907 /* Normally the grain table is preallocated for hosted sparse extents
5908 * that support more than 32 bit sector numbers. So this shouldn't
5909 * ever happen on a valid extent. */
5910 if (uGTSector > UINT32_MAX)
5911 {
5912 RTMemFree(pGrainAlloc);
5913 return VERR_VD_VMDK_INVALID_HEADER;
5914 }
5915
5916 /* Write grain table by writing the required number of grain table
5917 * cache chunks. Allocate memory dynamically here or we flood the
5918 * metadata cache with very small entries. */
5919 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
5920 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5921
5922 if (!paGTDataTmp)
5923 {
5924 RTMemFree(pGrainAlloc);
5925 return VERR_NO_MEMORY;
5926 }
5927
5928 memset(paGTDataTmp, '\0', cbGTDataTmp);
5929 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5930 VMDK_SECTOR2BYTE(uGTSector),
5931 paGTDataTmp, cbGTDataTmp, pIoCtx,
5932 vmdkAllocGrainComplete, pGrainAlloc);
5933 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5934 pGrainAlloc->cIoXfersPending++;
5935 else if (RT_FAILURE(rc))
5936 {
5937 RTMemTmpFree(paGTDataTmp);
5938 RTMemFree(pGrainAlloc);
5939 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5940 }
5941 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
5942 + cbGTDataTmp, 512);
5943
5944 if (pExtent->pRGD)
5945 {
5946 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5947 uFileOffset = pExtent->uAppendPosition;
5948 if (!uFileOffset)
5949 return VERR_INTERNAL_ERROR;
5950 Assert(!(uFileOffset % 512));
5951 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5952
5953 /* Normally the redundant grain table is preallocated for hosted
5954 * sparse extents that support more than 32 bit sector numbers. So
5955 * this shouldn't ever happen on a valid extent. */
5956 if (uRGTSector > UINT32_MAX)
5957 {
5958 RTMemTmpFree(paGTDataTmp);
5959 return VERR_VD_VMDK_INVALID_HEADER;
5960 }
5961
5962 /* Write grain table by writing the required number of grain table
5963 * cache chunks. Allocate memory dynamically here or we flood the
5964 * metadata cache with very small entries. */
5965 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5966 VMDK_SECTOR2BYTE(uRGTSector),
5967 paGTDataTmp, cbGTDataTmp, pIoCtx,
5968 vmdkAllocGrainComplete, pGrainAlloc);
5969 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5970 pGrainAlloc->cIoXfersPending++;
5971 else if (RT_FAILURE(rc))
5972 {
5973 RTMemTmpFree(paGTDataTmp);
5974 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5975 }
5976
5977 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
5978 }
5979
5980 RTMemTmpFree(paGTDataTmp);
5981
5982 /* Update the grain directory on disk (doing it before writing the
5983 * grain table will result in a garbled extent if the operation is
5984 * aborted for some reason. Otherwise the worst that can happen is
5985 * some unused sectors in the extent. */
5986 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5987 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5988 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5989 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5990 vmdkAllocGrainComplete, pGrainAlloc);
5991 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5992 pGrainAlloc->cIoXfersPending++;
5993 else if (RT_FAILURE(rc))
5994 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5995 if (pExtent->pRGD)
5996 {
5997 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5998 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5999 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6000 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6001 vmdkAllocGrainComplete, pGrainAlloc);
6002 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6003 pGrainAlloc->cIoXfersPending++;
6004 else if (RT_FAILURE(rc))
6005 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6006 }
6007
6008 /* As the final step update the in-memory copy of the GDs. */
6009 pExtent->pGD[uGDIndex] = uGTSector;
6010 if (pExtent->pRGD)
6011 pExtent->pRGD[uGDIndex] = uRGTSector;
6012 }
6013
6014 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6015 pGrainAlloc->uGTSector = uGTSector;
6016 pGrainAlloc->uRGTSector = uRGTSector;
6017
6018 uFileOffset = pExtent->uAppendPosition;
6019 if (!uFileOffset)
6020 return VERR_INTERNAL_ERROR;
6021 Assert(!(uFileOffset % 512));
6022
6023 pGrainAlloc->uGrainOffset = uFileOffset;
6024
6025 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6026 {
6027 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6028 ("Accesses to stream optimized images must be synchronous\n"),
6029 VERR_INVALID_STATE);
6030
6031 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6032 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6033
6034 /* Invalidate cache, just in case some code incorrectly allows mixing
6035 * of reads and writes. Normally shouldn't be needed. */
6036 pExtent->uGrainSectorAbs = 0;
6037
6038 /* Write compressed data block and the markers. */
6039 uint32_t cbGrain = 0;
6040 size_t cbSeg = 0;
6041 RTSGSEG Segment;
6042 unsigned cSegments = 1;
6043
6044 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6045 &cSegments, cbWrite);
6046 Assert(cbSeg == cbWrite);
6047
6048 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6049 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6050 if (RT_FAILURE(rc))
6051 {
6052 AssertRC(rc);
6053 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6054 }
6055 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6056 pExtent->uAppendPosition += cbGrain;
6057 }
6058 else
6059 {
6060 /* Write the data. Always a full grain, or we're in big trouble. */
6061 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6062 uFileOffset, pIoCtx, cbWrite,
6063 vmdkAllocGrainComplete, pGrainAlloc);
6064 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6065 pGrainAlloc->cIoXfersPending++;
6066 else if (RT_FAILURE(rc))
6067 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6068
6069 pExtent->uAppendPosition += cbWrite;
6070 }
6071
6072 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6073
6074 if (!pGrainAlloc->cIoXfersPending)
6075 {
6076 /* Grain allocation completed. */
6077 RTMemFree(pGrainAlloc);
6078 }
6079
6080 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6081
6082 return rc;
6083}
6084
6085/**
6086 * Internal. Reads the contents by sequentially going over the compressed
6087 * grains (hoping that they are in sequence).
6088 */
6089static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6090 uint64_t uSector, PVDIOCTX pIoCtx,
6091 uint64_t cbRead)
6092{
6093 int rc;
6094
6095 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6096 pImage, pExtent, uSector, pIoCtx, cbRead));
6097
6098 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6099 ("Async I/O not supported for sequential stream optimized images\n"),
6100 VERR_INVALID_STATE);
6101
6102 /* Do not allow to go back. */
6103 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6104 if (uGrain < pExtent->uLastGrainAccess)
6105 return VERR_VD_VMDK_INVALID_STATE;
6106 pExtent->uLastGrainAccess = uGrain;
6107
6108 /* After a previous error do not attempt to recover, as it would need
6109 * seeking (in the general case backwards which is forbidden). */
6110 if (!pExtent->uGrainSectorAbs)
6111 return VERR_VD_VMDK_INVALID_STATE;
6112
6113 /* Check if we need to read something from the image or if what we have
6114 * in the buffer is good to fulfill the request. */
6115 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6116 {
6117 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6118 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6119
6120 /* Get the marker from the next data block - and skip everything which
6121 * is not a compressed grain. If it's a compressed grain which is for
6122 * the requested sector (or after), read it. */
6123 VMDKMARKER Marker;
6124 do
6125 {
6126 RT_ZERO(Marker);
6127 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6128 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6129 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6130 if (RT_FAILURE(rc))
6131 return rc;
6132 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6133 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6134
6135 if (Marker.cbSize == 0)
6136 {
6137 /* A marker for something else than a compressed grain. */
6138 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6139 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6140 + RT_UOFFSETOF(VMDKMARKER, uType),
6141 &Marker.uType, sizeof(Marker.uType));
6142 if (RT_FAILURE(rc))
6143 return rc;
6144 Marker.uType = RT_LE2H_U32(Marker.uType);
6145 switch (Marker.uType)
6146 {
6147 case VMDK_MARKER_EOS:
6148 uGrainSectorAbs++;
6149 /* Read (or mostly skip) to the end of file. Uses the
6150 * Marker (LBA sector) as it is unused anyway. This
6151 * makes sure that really everything is read in the
6152 * success case. If this read fails it means the image
6153 * is truncated, but this is harmless so ignore. */
6154 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6155 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6156 + 511,
6157 &Marker.uSector, 1);
6158 break;
6159 case VMDK_MARKER_GT:
6160 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6161 break;
6162 case VMDK_MARKER_GD:
6163 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6164 break;
6165 case VMDK_MARKER_FOOTER:
6166 uGrainSectorAbs += 2;
6167 break;
6168 case VMDK_MARKER_UNSPECIFIED:
6169 /* Skip over the contents of the unspecified marker
6170 * type 4 which exists in some vSphere created files. */
6171 /** @todo figure out what the payload means. */
6172 uGrainSectorAbs += 1;
6173 break;
6174 default:
6175 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6176 pExtent->uGrainSectorAbs = 0;
6177 return VERR_VD_VMDK_INVALID_STATE;
6178 }
6179 pExtent->cbGrainStreamRead = 0;
6180 }
6181 else
6182 {
6183 /* A compressed grain marker. If it is at/after what we're
6184 * interested in read and decompress data. */
6185 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6186 {
6187 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6188 continue;
6189 }
6190 uint64_t uLBA = 0;
6191 uint32_t cbGrainStreamRead = 0;
6192 rc = vmdkFileInflateSync(pImage, pExtent,
6193 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6194 pExtent->pvGrain,
6195 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6196 &Marker, &uLBA, &cbGrainStreamRead);
6197 if (RT_FAILURE(rc))
6198 {
6199 pExtent->uGrainSectorAbs = 0;
6200 return rc;
6201 }
6202 if ( pExtent->uGrain
6203 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6204 {
6205 pExtent->uGrainSectorAbs = 0;
6206 return VERR_VD_VMDK_INVALID_STATE;
6207 }
6208 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6209 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6210 break;
6211 }
6212 } while (Marker.uType != VMDK_MARKER_EOS);
6213
6214 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6215
6216 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6217 {
6218 pExtent->uGrain = UINT32_MAX;
6219 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6220 * the next read would try to get more data, and we're at EOF. */
6221 pExtent->cbGrainStreamRead = 1;
6222 }
6223 }
6224
6225 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6226 {
6227 /* The next data block we have is not for this area, so just return
6228 * that there is no data. */
6229 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6230 return VERR_VD_BLOCK_FREE;
6231 }
6232
6233 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6234 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6235 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6236 cbRead);
6237 LogFlowFunc(("returns VINF_SUCCESS\n"));
6238 return VINF_SUCCESS;
6239}
6240
6241/**
6242 * Replaces a fragment of a string with the specified string.
6243 *
6244 * @returns Pointer to the allocated UTF-8 string.
6245 * @param pszWhere UTF-8 string to search in.
6246 * @param pszWhat UTF-8 string to search for.
6247 * @param pszByWhat UTF-8 string to replace the found string with.
6248 *
6249 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6250 * for updating the base name in the descriptor, the second is for
6251 * generating new filenames for extents. This code borked when
6252 * RTPathAbs started correcting the driver letter case on windows,
6253 * when strstr failed because the pExtent->pszFullname was not
6254 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6255 * this by apply RTPathAbs to the places it wasn't applied.
6256 *
6257 * However, this highlights some undocumented ASSUMPTIONS as well as
6258 * terrible short commings of the approach.
6259 *
6260 * Given the right filename, it may also screw up the descriptor. Take
6261 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6262 * we'll be asked to replace "Test0" with something, no problem. No,
6263 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6264 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6265 * its bum. The descriptor string must be parsed and reconstructed,
6266 * the lazy strstr approach doesn't cut it.
6267 *
6268 * I'm also curious as to what would be the correct escaping of '"' in
6269 * the file name and how that is supposed to be handled, because it
6270 * needs to be or such names must be rejected in several places (maybe
6271 * they are, I didn't check).
6272 *
6273 * When this function is used to replace the start of a path, I think
6274 * the assumption from the prep/setup code is that we kind of knows
6275 * what we're working on (I could be wrong). However, using strstr
6276 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6277 * Especially on unix systems, weird stuff could happen if someone
6278 * unwittingly tinkers with the prep/setup code. What should really be
6279 * done here is using a new RTPathStartEx function that (via flags)
6280 * allows matching partial final component and returns the length of
6281 * what it matched up (in case it skipped slashes and '.' components).
6282 *
6283 */
6284static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6285 const char *pszByWhat)
6286{
6287 AssertPtr(pszWhere);
6288 AssertPtr(pszWhat);
6289 AssertPtr(pszByWhat);
6290 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6291 if (!pszFoundStr)
6292 {
6293 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6294 return NULL;
6295 }
6296 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6297 char *pszNewStr = RTStrAlloc(cbFinal);
6298 if (pszNewStr)
6299 {
6300 char *pszTmp = pszNewStr;
6301 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6302 pszTmp += pszFoundStr - pszWhere;
6303 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6304 pszTmp += strlen(pszByWhat);
6305 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6306 }
6307 return pszNewStr;
6308}
6309
6310
6311/** @copydoc VDIMAGEBACKEND::pfnProbe */
6312static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6313 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6314{
6315 RT_NOREF(enmDesiredType);
6316 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6317 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6318
6319 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6320
6321 int rc = VINF_SUCCESS;
6322 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6323 if (RT_LIKELY(pImage))
6324 {
6325 pImage->pszFilename = pszFilename;
6326 pImage->pFile = NULL;
6327 pImage->pExtents = NULL;
6328 pImage->pFiles = NULL;
6329 pImage->pGTCache = NULL;
6330 pImage->pDescData = NULL;
6331 pImage->pVDIfsDisk = pVDIfsDisk;
6332 pImage->pVDIfsImage = pVDIfsImage;
6333 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6334 * much as possible in vmdkOpenImage. */
6335 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6336 vmdkFreeImage(pImage, false, false /*fFlush*/);
6337 RTMemFree(pImage);
6338
6339 if (RT_SUCCESS(rc))
6340 *penmType = VDTYPE_HDD;
6341 }
6342 else
6343 rc = VERR_NO_MEMORY;
6344
6345 LogFlowFunc(("returns %Rrc\n", rc));
6346 return rc;
6347}
6348
6349/** @copydoc VDIMAGEBACKEND::pfnOpen */
6350static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6351 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6352 VDTYPE enmType, void **ppBackendData)
6353{
6354 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6355
6356 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6357 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6358 int rc;
6359
6360 /* Check open flags. All valid flags are supported. */
6361 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6362 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6363
6364 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6365 if (RT_LIKELY(pImage))
6366 {
6367 pImage->pszFilename = pszFilename;
6368 pImage->pFile = NULL;
6369 pImage->pExtents = NULL;
6370 pImage->pFiles = NULL;
6371 pImage->pGTCache = NULL;
6372 pImage->pDescData = NULL;
6373 pImage->pVDIfsDisk = pVDIfsDisk;
6374 pImage->pVDIfsImage = pVDIfsImage;
6375
6376 rc = vmdkOpenImage(pImage, uOpenFlags);
6377 if (RT_SUCCESS(rc))
6378 *ppBackendData = pImage;
6379 else
6380 RTMemFree(pImage);
6381 }
6382 else
6383 rc = VERR_NO_MEMORY;
6384
6385 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6386 return rc;
6387}
6388
6389/** @copydoc VDIMAGEBACKEND::pfnCreate */
6390static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6391 unsigned uImageFlags, const char *pszComment,
6392 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6393 PCRTUUID pUuid, unsigned uOpenFlags,
6394 unsigned uPercentStart, unsigned uPercentSpan,
6395 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6396 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6397 void **ppBackendData)
6398{
6399 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6400 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6401 int rc;
6402
6403 /* Check the VD container type and image flags. */
6404 if ( enmType != VDTYPE_HDD
6405 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6406 return VERR_VD_INVALID_TYPE;
6407
6408 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6409 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6410 && ( !cbSize
6411 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6412 return VERR_VD_INVALID_SIZE;
6413
6414 /* Check image flags for invalid combinations. */
6415 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6416 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6417 return VERR_INVALID_PARAMETER;
6418
6419 /* Check open flags. All valid flags are supported. */
6420 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6421 AssertReturn( VALID_PTR(pszFilename)
6422 && *pszFilename
6423 && VALID_PTR(pPCHSGeometry)
6424 && VALID_PTR(pLCHSGeometry)
6425 && !( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6426 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6427 VERR_INVALID_PARAMETER);
6428
6429 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6430 if (RT_LIKELY(pImage))
6431 {
6432 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6433
6434 pImage->pszFilename = pszFilename;
6435 pImage->pFile = NULL;
6436 pImage->pExtents = NULL;
6437 pImage->pFiles = NULL;
6438 pImage->pGTCache = NULL;
6439 pImage->pDescData = NULL;
6440 pImage->pVDIfsDisk = pVDIfsDisk;
6441 pImage->pVDIfsImage = pVDIfsImage;
6442 /* Descriptors for split images can be pretty large, especially if the
6443 * filename is long. So prepare for the worst, and allocate quite some
6444 * memory for the descriptor in this case. */
6445 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6446 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6447 else
6448 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6449 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6450 if (RT_LIKELY(pImage->pDescData))
6451 {
6452 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6453 pPCHSGeometry, pLCHSGeometry, pUuid,
6454 pIfProgress, uPercentStart, uPercentSpan);
6455 if (RT_SUCCESS(rc))
6456 {
6457 /* So far the image is opened in read/write mode. Make sure the
6458 * image is opened in read-only mode if the caller requested that. */
6459 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6460 {
6461 vmdkFreeImage(pImage, false, true /*fFlush*/);
6462 rc = vmdkOpenImage(pImage, uOpenFlags);
6463 }
6464
6465 if (RT_SUCCESS(rc))
6466 *ppBackendData = pImage;
6467 }
6468
6469 if (RT_FAILURE(rc))
6470 RTMemFree(pImage->pDescData);
6471 }
6472 else
6473 rc = VERR_NO_MEMORY;
6474
6475 if (RT_FAILURE(rc))
6476 RTMemFree(pImage);
6477 }
6478 else
6479 rc = VERR_NO_MEMORY;
6480
6481 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6482 return rc;
6483}
6484
6485/**
6486 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6487 * memory.
6488 *
6489 * @returns VBox status code.
6490 * @param pImage VMDK image instance.
6491 * @param pRenameState The state to initialize.
6492 * @param pszFilename The new filename.
6493 */
6494static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6495{
6496 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6497
6498 int rc = VINF_SUCCESS;
6499
6500 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6501
6502 /*
6503 * Allocate an array to store both old and new names of renamed files
6504 * in case we have to roll back the changes. Arrays are initialized
6505 * with zeros. We actually save stuff when and if we change it.
6506 */
6507 pRenameState->cExtents = pImage->cExtents;
6508 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6509 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6510 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6511 if ( pRenameState->apszOldName
6512 && pRenameState->apszNewName
6513 && pRenameState->apszNewLines)
6514 {
6515 /* Save the descriptor size and position. */
6516 if (pImage->pDescData)
6517 {
6518 /* Separate descriptor file. */
6519 pRenameState->fEmbeddedDesc = false;
6520 }
6521 else
6522 {
6523 /* Embedded descriptor file. */
6524 pRenameState->ExtentCopy = pImage->pExtents[0];
6525 pRenameState->fEmbeddedDesc = true;
6526 }
6527
6528 /* Save the descriptor content. */
6529 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6530 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6531 {
6532 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6533 if (!pRenameState->DescriptorCopy.aLines[i])
6534 {
6535 rc = VERR_NO_MEMORY;
6536 break;
6537 }
6538 }
6539
6540 if (RT_SUCCESS(rc))
6541 {
6542 /* Prepare both old and new base names used for string replacement. */
6543 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6544 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6545 RTPathStripSuffix(pRenameState->pszNewBaseName);
6546
6547 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6548 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6549 RTPathStripSuffix(pRenameState->pszOldBaseName);
6550
6551 /* Prepare both old and new full names used for string replacement.
6552 Note! Must abspath the stuff here, so the strstr weirdness later in
6553 the renaming process get a match against abspath'ed extent paths.
6554 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6555 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6556 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6557 RTPathStripSuffix(pRenameState->pszNewFullName);
6558
6559 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6560 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6561 RTPathStripSuffix(pRenameState->pszOldFullName);
6562
6563 /* Save the old name for easy access to the old descriptor file. */
6564 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6565 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6566
6567 /* Save old image name. */
6568 pRenameState->pszOldImageName = pImage->pszFilename;
6569 }
6570 }
6571 else
6572 rc = VERR_NO_TMP_MEMORY;
6573
6574 return rc;
6575}
6576
6577/**
6578 * Destroys the given rename state, freeing all allocated memory.
6579 *
6580 * @returns nothing.
6581 * @param pRenameState The rename state to destroy.
6582 */
6583static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6584{
6585 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6586 if (pRenameState->DescriptorCopy.aLines[i])
6587 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6588 if (pRenameState->apszOldName)
6589 {
6590 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6591 if (pRenameState->apszOldName[i])
6592 RTStrFree(pRenameState->apszOldName[i]);
6593 RTMemTmpFree(pRenameState->apszOldName);
6594 }
6595 if (pRenameState->apszNewName)
6596 {
6597 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6598 if (pRenameState->apszNewName[i])
6599 RTStrFree(pRenameState->apszNewName[i]);
6600 RTMemTmpFree(pRenameState->apszNewName);
6601 }
6602 if (pRenameState->apszNewLines)
6603 {
6604 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6605 if (pRenameState->apszNewLines[i])
6606 RTStrFree(pRenameState->apszNewLines[i]);
6607 RTMemTmpFree(pRenameState->apszNewLines);
6608 }
6609 if (pRenameState->pszOldDescName)
6610 RTStrFree(pRenameState->pszOldDescName);
6611 if (pRenameState->pszOldBaseName)
6612 RTStrFree(pRenameState->pszOldBaseName);
6613 if (pRenameState->pszNewBaseName)
6614 RTStrFree(pRenameState->pszNewBaseName);
6615 if (pRenameState->pszOldFullName)
6616 RTStrFree(pRenameState->pszOldFullName);
6617 if (pRenameState->pszNewFullName)
6618 RTStrFree(pRenameState->pszNewFullName);
6619}
6620
6621/**
6622 * Rolls back the rename operation to the original state.
6623 *
6624 * @returns VBox status code.
6625 * @param pImage VMDK image instance.
6626 * @param pRenameState The rename state.
6627 */
6628static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6629{
6630 int rc = VINF_SUCCESS;
6631
6632 if (!pRenameState->fImageFreed)
6633 {
6634 /*
6635 * Some extents may have been closed, close the rest. We will
6636 * re-open the whole thing later.
6637 */
6638 vmdkFreeImage(pImage, false, true /*fFlush*/);
6639 }
6640
6641 /* Rename files back. */
6642 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6643 {
6644 if (pRenameState->apszOldName[i])
6645 {
6646 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6647 AssertRC(rc);
6648 }
6649 }
6650 /* Restore the old descriptor. */
6651 PVMDKFILE pFile;
6652 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6653 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6654 false /* fCreate */));
6655 AssertRC(rc);
6656 if (pRenameState->fEmbeddedDesc)
6657 {
6658 pRenameState->ExtentCopy.pFile = pFile;
6659 pImage->pExtents = &pRenameState->ExtentCopy;
6660 }
6661 else
6662 {
6663 /* Shouldn't be null for separate descriptor.
6664 * There will be no access to the actual content.
6665 */
6666 pImage->pDescData = pRenameState->pszOldDescName;
6667 pImage->pFile = pFile;
6668 }
6669 pImage->Descriptor = pRenameState->DescriptorCopy;
6670 vmdkWriteDescriptor(pImage, NULL);
6671 vmdkFileClose(pImage, &pFile, false);
6672 /* Get rid of the stuff we implanted. */
6673 pImage->pExtents = NULL;
6674 pImage->pFile = NULL;
6675 pImage->pDescData = NULL;
6676 /* Re-open the image back. */
6677 pImage->pszFilename = pRenameState->pszOldImageName;
6678 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6679
6680 return rc;
6681}
6682
6683/**
6684 * Rename worker doing the real work.
6685 *
6686 * @returns VBox status code.
6687 * @param pImage VMDK image instance.
6688 * @param pRenameState The rename state.
6689 * @param pszFilename The new filename.
6690 */
6691static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6692{
6693 int rc = VINF_SUCCESS;
6694 unsigned i, line;
6695
6696 /* Update the descriptor with modified extent names. */
6697 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6698 i < pRenameState->cExtents;
6699 i++, line = pImage->Descriptor.aNextLines[line])
6700 {
6701 /* Update the descriptor. */
6702 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6703 pRenameState->pszOldBaseName,
6704 pRenameState->pszNewBaseName);
6705 if (!pRenameState->apszNewLines[i])
6706 {
6707 rc = VERR_NO_MEMORY;
6708 break;
6709 }
6710 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6711 }
6712
6713 if (RT_SUCCESS(rc))
6714 {
6715 /* Make sure the descriptor gets written back. */
6716 pImage->Descriptor.fDirty = true;
6717 /* Flush the descriptor now, in case it is embedded. */
6718 vmdkFlushImage(pImage, NULL);
6719
6720 /* Close and rename/move extents. */
6721 for (i = 0; i < pRenameState->cExtents; i++)
6722 {
6723 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6724 /* Compose new name for the extent. */
6725 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6726 pRenameState->pszOldFullName,
6727 pRenameState->pszNewFullName);
6728 if (!pRenameState->apszNewName[i])
6729 {
6730 rc = VERR_NO_MEMORY;
6731 break;
6732 }
6733 /* Close the extent file. */
6734 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6735 if (RT_FAILURE(rc))
6736 break;;
6737
6738 /* Rename the extent file. */
6739 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6740 if (RT_FAILURE(rc))
6741 break;
6742 /* Remember the old name. */
6743 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6744 }
6745
6746 if (RT_SUCCESS(rc))
6747 {
6748 /* Release all old stuff. */
6749 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6750 if (RT_SUCCESS(rc))
6751 {
6752 pRenameState->fImageFreed = true;
6753
6754 /* Last elements of new/old name arrays are intended for
6755 * storing descriptor's names.
6756 */
6757 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6758 /* Rename the descriptor file if it's separate. */
6759 if (!pRenameState->fEmbeddedDesc)
6760 {
6761 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6762 if (RT_SUCCESS(rc))
6763 {
6764 /* Save old name only if we may need to change it back. */
6765 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6766 }
6767 }
6768
6769 /* Update pImage with the new information. */
6770 pImage->pszFilename = pszFilename;
6771
6772 /* Open the new image. */
6773 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6774 }
6775 }
6776 }
6777
6778 return rc;
6779}
6780
6781/** @copydoc VDIMAGEBACKEND::pfnRename */
6782static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6783{
6784 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6785
6786 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6787 VMDKRENAMESTATE RenameState;
6788
6789 memset(&RenameState, 0, sizeof(RenameState));
6790
6791 /* Check arguments. */
6792 AssertReturn(( pImage
6793 && VALID_PTR(pszFilename)
6794 && *pszFilename
6795 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)), VERR_INVALID_PARAMETER);
6796
6797 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6798 if (RT_SUCCESS(rc))
6799 {
6800 /* --- Up to this point we have not done any damage yet. --- */
6801
6802 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6803 /* Roll back all changes in case of failure. */
6804 if (RT_FAILURE(rc))
6805 {
6806 int rrc = vmdkRenameRollback(pImage, &RenameState);
6807 AssertRC(rrc);
6808 }
6809 }
6810
6811 vmdkRenameStateDestroy(&RenameState);
6812 LogFlowFunc(("returns %Rrc\n", rc));
6813 return rc;
6814}
6815
6816/** @copydoc VDIMAGEBACKEND::pfnClose */
6817static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6818{
6819 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6820 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6821
6822 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6823 RTMemFree(pImage);
6824
6825 LogFlowFunc(("returns %Rrc\n", rc));
6826 return rc;
6827}
6828
6829/** @copydoc VDIMAGEBACKEND::pfnRead */
6830static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6831 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6832{
6833 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6834 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6835 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6836
6837 AssertPtr(pImage);
6838 Assert(uOffset % 512 == 0);
6839 Assert(cbToRead % 512 == 0);
6840 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
6841 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6842
6843 /* Find the extent and check access permissions as defined in the extent descriptor. */
6844 PVMDKEXTENT pExtent;
6845 uint64_t uSectorExtentRel;
6846 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6847 &pExtent, &uSectorExtentRel);
6848 if ( RT_SUCCESS(rc)
6849 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
6850 {
6851 /* Clip read range to remain in this extent. */
6852 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6853
6854 /* Handle the read according to the current extent type. */
6855 switch (pExtent->enmType)
6856 {
6857 case VMDKETYPE_HOSTED_SPARSE:
6858 {
6859 uint64_t uSectorExtentAbs;
6860
6861 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6862 if (RT_FAILURE(rc))
6863 break;
6864 /* Clip read range to at most the rest of the grain. */
6865 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6866 Assert(!(cbToRead % 512));
6867 if (uSectorExtentAbs == 0)
6868 {
6869 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6870 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6871 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6872 rc = VERR_VD_BLOCK_FREE;
6873 else
6874 rc = vmdkStreamReadSequential(pImage, pExtent,
6875 uSectorExtentRel,
6876 pIoCtx, cbToRead);
6877 }
6878 else
6879 {
6880 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6881 {
6882 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6883 ("Async I/O is not supported for stream optimized VMDK's\n"));
6884
6885 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6886 uSectorExtentAbs -= uSectorInGrain;
6887 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6888 {
6889 uint64_t uLBA = 0; /* gcc maybe uninitialized */
6890 rc = vmdkFileInflateSync(pImage, pExtent,
6891 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6892 pExtent->pvGrain,
6893 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6894 NULL, &uLBA, NULL);
6895 if (RT_FAILURE(rc))
6896 {
6897 pExtent->uGrainSectorAbs = 0;
6898 break;
6899 }
6900 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6901 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6902 Assert(uLBA == uSectorExtentRel);
6903 }
6904 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6905 (uint8_t *)pExtent->pvGrain
6906 + VMDK_SECTOR2BYTE(uSectorInGrain),
6907 cbToRead);
6908 }
6909 else
6910 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6911 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6912 pIoCtx, cbToRead);
6913 }
6914 break;
6915 }
6916 case VMDKETYPE_VMFS:
6917 case VMDKETYPE_FLAT:
6918 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6919 VMDK_SECTOR2BYTE(uSectorExtentRel),
6920 pIoCtx, cbToRead);
6921 break;
6922 case VMDKETYPE_ZERO:
6923 {
6924 size_t cbSet;
6925
6926 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
6927 Assert(cbSet == cbToRead);
6928 break;
6929 }
6930 }
6931 if (pcbActuallyRead)
6932 *pcbActuallyRead = cbToRead;
6933 }
6934 else if (RT_SUCCESS(rc))
6935 rc = VERR_VD_VMDK_INVALID_STATE;
6936
6937 LogFlowFunc(("returns %Rrc\n", rc));
6938 return rc;
6939}
6940
6941/** @copydoc VDIMAGEBACKEND::pfnWrite */
6942static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
6943 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
6944 size_t *pcbPostRead, unsigned fWrite)
6945{
6946 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6947 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6948 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6949 int rc;
6950
6951 AssertPtr(pImage);
6952 Assert(uOffset % 512 == 0);
6953 Assert(cbToWrite % 512 == 0);
6954 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
6955
6956 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6957 {
6958 PVMDKEXTENT pExtent;
6959 uint64_t uSectorExtentRel;
6960 uint64_t uSectorExtentAbs;
6961
6962 /* No size check here, will do that later when the extent is located.
6963 * There are sparse images out there which according to the spec are
6964 * invalid, because the total size is not a multiple of the grain size.
6965 * Also for sparse images which are stitched together in odd ways (not at
6966 * grain boundaries, and with the nominal size not being a multiple of the
6967 * grain size), this would prevent writing to the last grain. */
6968
6969 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6970 &pExtent, &uSectorExtentRel);
6971 if (RT_SUCCESS(rc))
6972 {
6973 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
6974 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6975 && !pImage->pExtents[0].uAppendPosition
6976 && pExtent->enmAccess != VMDKACCESS_READONLY))
6977 rc = VERR_VD_VMDK_INVALID_STATE;
6978 else
6979 {
6980 /* Handle the write according to the current extent type. */
6981 switch (pExtent->enmType)
6982 {
6983 case VMDKETYPE_HOSTED_SPARSE:
6984 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6985 if (RT_SUCCESS(rc))
6986 {
6987 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6988 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
6989 rc = VERR_VD_VMDK_INVALID_WRITE;
6990 else
6991 {
6992 /* Clip write range to at most the rest of the grain. */
6993 cbToWrite = RT_MIN(cbToWrite,
6994 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
6995 - uSectorExtentRel % pExtent->cSectorsPerGrain));
6996 if (uSectorExtentAbs == 0)
6997 {
6998 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6999 {
7000 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7001 {
7002 /* Full block write to a previously unallocated block.
7003 * Check if the caller wants to avoid the automatic alloc. */
7004 if (!(fWrite & VD_WRITE_NO_ALLOC))
7005 {
7006 /* Allocate GT and find out where to store the grain. */
7007 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7008 uSectorExtentRel, cbToWrite);
7009 }
7010 else
7011 rc = VERR_VD_BLOCK_FREE;
7012 *pcbPreRead = 0;
7013 *pcbPostRead = 0;
7014 }
7015 else
7016 {
7017 /* Clip write range to remain in this extent. */
7018 cbToWrite = RT_MIN(cbToWrite,
7019 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7020 + pExtent->cNominalSectors - uSectorExtentRel));
7021 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7022 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7023 rc = VERR_VD_BLOCK_FREE;
7024 }
7025 }
7026 else
7027 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7028 pIoCtx, cbToWrite);
7029 }
7030 else
7031 {
7032 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7033 {
7034 /* A partial write to a streamOptimized image is simply
7035 * invalid. It requires rewriting already compressed data
7036 * which is somewhere between expensive and impossible. */
7037 rc = VERR_VD_VMDK_INVALID_STATE;
7038 pExtent->uGrainSectorAbs = 0;
7039 AssertRC(rc);
7040 }
7041 else
7042 {
7043 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7044 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7045 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7046 pIoCtx, cbToWrite, NULL, NULL);
7047 }
7048 }
7049 }
7050 }
7051 break;
7052 case VMDKETYPE_VMFS:
7053 case VMDKETYPE_FLAT:
7054 /* Clip write range to remain in this extent. */
7055 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7056 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7057 VMDK_SECTOR2BYTE(uSectorExtentRel),
7058 pIoCtx, cbToWrite, NULL, NULL);
7059 break;
7060 case VMDKETYPE_ZERO:
7061 /* Clip write range to remain in this extent. */
7062 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7063 break;
7064 }
7065 }
7066
7067 if (pcbWriteProcess)
7068 *pcbWriteProcess = cbToWrite;
7069 }
7070 }
7071 else
7072 rc = VERR_VD_IMAGE_READ_ONLY;
7073
7074 LogFlowFunc(("returns %Rrc\n", rc));
7075 return rc;
7076}
7077
7078/** @copydoc VDIMAGEBACKEND::pfnFlush */
7079static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7080{
7081 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7082
7083 return vmdkFlushImage(pImage, pIoCtx);
7084}
7085
7086/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7087static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7088{
7089 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7090 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7091
7092 AssertPtrReturn(pImage, 0);
7093
7094 return VMDK_IMAGE_VERSION;
7095}
7096
7097/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7098static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7099{
7100 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7101 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7102 uint64_t cb = 0;
7103
7104 AssertPtrReturn(pImage, 0);
7105
7106 if (pImage->pFile != NULL)
7107 {
7108 uint64_t cbFile;
7109 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7110 if (RT_SUCCESS(rc))
7111 cb += cbFile;
7112 }
7113 for (unsigned i = 0; i < pImage->cExtents; i++)
7114 {
7115 if (pImage->pExtents[i].pFile != NULL)
7116 {
7117 uint64_t cbFile;
7118 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7119 if (RT_SUCCESS(rc))
7120 cb += cbFile;
7121 }
7122 }
7123
7124 LogFlowFunc(("returns %lld\n", cb));
7125 return cb;
7126}
7127
7128/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7129static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7130{
7131 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7132 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7133 int rc = VINF_SUCCESS;
7134
7135 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7136
7137 if (pImage->PCHSGeometry.cCylinders)
7138 *pPCHSGeometry = pImage->PCHSGeometry;
7139 else
7140 rc = VERR_VD_GEOMETRY_NOT_SET;
7141
7142 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7143 return rc;
7144}
7145
7146/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7147static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7148{
7149 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7150 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7151 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7152 int rc = VINF_SUCCESS;
7153
7154 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7155
7156 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7157 {
7158 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7159 {
7160 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7161 if (RT_SUCCESS(rc))
7162 pImage->PCHSGeometry = *pPCHSGeometry;
7163 }
7164 else
7165 rc = VERR_NOT_SUPPORTED;
7166 }
7167 else
7168 rc = VERR_VD_IMAGE_READ_ONLY;
7169
7170 LogFlowFunc(("returns %Rrc\n", rc));
7171 return rc;
7172}
7173
7174/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7175static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7176{
7177 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7178 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7179 int rc = VINF_SUCCESS;
7180
7181 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7182
7183 if (pImage->LCHSGeometry.cCylinders)
7184 *pLCHSGeometry = pImage->LCHSGeometry;
7185 else
7186 rc = VERR_VD_GEOMETRY_NOT_SET;
7187
7188 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7189 return rc;
7190}
7191
7192/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7193static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7194{
7195 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7196 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7197 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7198 int rc = VINF_SUCCESS;
7199
7200 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7201
7202 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7203 {
7204 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7205 {
7206 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7207 if (RT_SUCCESS(rc))
7208 pImage->LCHSGeometry = *pLCHSGeometry;
7209 }
7210 else
7211 rc = VERR_NOT_SUPPORTED;
7212 }
7213 else
7214 rc = VERR_VD_IMAGE_READ_ONLY;
7215
7216 LogFlowFunc(("returns %Rrc\n", rc));
7217 return rc;
7218}
7219
7220/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7221static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7222{
7223 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7224 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7225
7226 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7227
7228 *ppRegionList = &pThis->RegionList;
7229 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7230 return VINF_SUCCESS;
7231}
7232
7233/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7234static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7235{
7236 RT_NOREF1(pRegionList);
7237 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7238 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7239 AssertPtr(pThis); RT_NOREF(pThis);
7240
7241 /* Nothing to do here. */
7242}
7243
7244/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7245static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7246{
7247 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7248 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7249
7250 AssertPtrReturn(pImage, 0);
7251
7252 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7253 return pImage->uImageFlags;
7254}
7255
7256/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7257static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7258{
7259 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7260 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7261
7262 AssertPtrReturn(pImage, 0);
7263
7264 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7265 return pImage->uOpenFlags;
7266}
7267
7268/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7269static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7270{
7271 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7272 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7273 int rc;
7274
7275 /* Image must be opened and the new flags must be valid. */
7276 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7277 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7278 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7279 rc = VERR_INVALID_PARAMETER;
7280 else
7281 {
7282 /* StreamOptimized images need special treatment: reopen is prohibited. */
7283 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7284 {
7285 if (pImage->uOpenFlags == uOpenFlags)
7286 rc = VINF_SUCCESS;
7287 else
7288 rc = VERR_INVALID_PARAMETER;
7289 }
7290 else
7291 {
7292 /* Implement this operation via reopening the image. */
7293 vmdkFreeImage(pImage, false, true /*fFlush*/);
7294 rc = vmdkOpenImage(pImage, uOpenFlags);
7295 }
7296 }
7297
7298 LogFlowFunc(("returns %Rrc\n", rc));
7299 return rc;
7300}
7301
7302/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7303static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7304{
7305 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7306 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7307
7308 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7309
7310 char *pszCommentEncoded = NULL;
7311 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7312 "ddb.comment", &pszCommentEncoded);
7313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7314 {
7315 pszCommentEncoded = NULL;
7316 rc = VINF_SUCCESS;
7317 }
7318
7319 if (RT_SUCCESS(rc))
7320 {
7321 if (pszComment && pszCommentEncoded)
7322 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7323 else if (pszComment)
7324 *pszComment = '\0';
7325
7326 if (pszCommentEncoded)
7327 RTMemTmpFree(pszCommentEncoded);
7328 }
7329
7330 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7331 return rc;
7332}
7333
7334/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7335static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7336{
7337 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7338 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7339 int rc;
7340
7341 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7342
7343 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7344 {
7345 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7346 rc = vmdkSetImageComment(pImage, pszComment);
7347 else
7348 rc = VERR_NOT_SUPPORTED;
7349 }
7350 else
7351 rc = VERR_VD_IMAGE_READ_ONLY;
7352
7353 LogFlowFunc(("returns %Rrc\n", rc));
7354 return rc;
7355}
7356
7357/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7358static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7359{
7360 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7361 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7362
7363 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7364
7365 *pUuid = pImage->ImageUuid;
7366
7367 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7368 return VINF_SUCCESS;
7369}
7370
7371/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7372static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7373{
7374 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7375 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7376 int rc = VINF_SUCCESS;
7377
7378 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7379
7380 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7381 {
7382 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7383 {
7384 pImage->ImageUuid = *pUuid;
7385 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7386 VMDK_DDB_IMAGE_UUID, pUuid);
7387 if (RT_FAILURE(rc))
7388 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7389 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7390 }
7391 else
7392 rc = VERR_NOT_SUPPORTED;
7393 }
7394 else
7395 rc = VERR_VD_IMAGE_READ_ONLY;
7396
7397 LogFlowFunc(("returns %Rrc\n", rc));
7398 return rc;
7399}
7400
7401/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7402static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7403{
7404 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7405 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7406
7407 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7408
7409 *pUuid = pImage->ModificationUuid;
7410
7411 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7412 return VINF_SUCCESS;
7413}
7414
7415/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7416static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7417{
7418 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7419 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7420 int rc = VINF_SUCCESS;
7421
7422 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7423
7424 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7425 {
7426 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7427 {
7428 /* Only touch the modification uuid if it changed. */
7429 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7430 {
7431 pImage->ModificationUuid = *pUuid;
7432 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7433 VMDK_DDB_MODIFICATION_UUID, pUuid);
7434 if (RT_FAILURE(rc))
7435 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7436 }
7437 }
7438 else
7439 rc = VERR_NOT_SUPPORTED;
7440 }
7441 else
7442 rc = VERR_VD_IMAGE_READ_ONLY;
7443
7444 LogFlowFunc(("returns %Rrc\n", rc));
7445 return rc;
7446}
7447
7448/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7449static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7450{
7451 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7452 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7453
7454 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7455
7456 *pUuid = pImage->ParentUuid;
7457
7458 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7459 return VINF_SUCCESS;
7460}
7461
7462/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7463static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7464{
7465 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7466 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7467 int rc = VINF_SUCCESS;
7468
7469 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7470
7471 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7472 {
7473 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7474 {
7475 pImage->ParentUuid = *pUuid;
7476 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7477 VMDK_DDB_PARENT_UUID, pUuid);
7478 if (RT_FAILURE(rc))
7479 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7480 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7481 }
7482 else
7483 rc = VERR_NOT_SUPPORTED;
7484 }
7485 else
7486 rc = VERR_VD_IMAGE_READ_ONLY;
7487
7488 LogFlowFunc(("returns %Rrc\n", rc));
7489 return rc;
7490}
7491
7492/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7493static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7494{
7495 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7496 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7497
7498 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7499
7500 *pUuid = pImage->ParentModificationUuid;
7501
7502 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7503 return VINF_SUCCESS;
7504}
7505
7506/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7507static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7508{
7509 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7510 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7511 int rc = VINF_SUCCESS;
7512
7513 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7514
7515 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7516 {
7517 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7518 {
7519 pImage->ParentModificationUuid = *pUuid;
7520 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7521 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7522 if (RT_FAILURE(rc))
7523 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7524 }
7525 else
7526 rc = VERR_NOT_SUPPORTED;
7527 }
7528 else
7529 rc = VERR_VD_IMAGE_READ_ONLY;
7530
7531 LogFlowFunc(("returns %Rrc\n", rc));
7532 return rc;
7533}
7534
7535/** @copydoc VDIMAGEBACKEND::pfnDump */
7536static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7537{
7538 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7539
7540 AssertPtrReturnVoid(pImage);
7541 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7542 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7543 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7544 VMDK_BYTE2SECTOR(pImage->cbSize));
7545 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7546 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7547 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7548 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7549}
7550
7551
7552
7553const VDIMAGEBACKEND g_VmdkBackend =
7554{
7555 /* u32Version */
7556 VD_IMGBACKEND_VERSION,
7557 /* pszBackendName */
7558 "VMDK",
7559 /* uBackendCaps */
7560 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7561 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7562 | VD_CAP_VFS | VD_CAP_PREFERRED,
7563 /* paFileExtensions */
7564 s_aVmdkFileExtensions,
7565 /* paConfigInfo */
7566 s_aVmdkConfigInfo,
7567 /* pfnProbe */
7568 vmdkProbe,
7569 /* pfnOpen */
7570 vmdkOpen,
7571 /* pfnCreate */
7572 vmdkCreate,
7573 /* pfnRename */
7574 vmdkRename,
7575 /* pfnClose */
7576 vmdkClose,
7577 /* pfnRead */
7578 vmdkRead,
7579 /* pfnWrite */
7580 vmdkWrite,
7581 /* pfnFlush */
7582 vmdkFlush,
7583 /* pfnDiscard */
7584 NULL,
7585 /* pfnGetVersion */
7586 vmdkGetVersion,
7587 /* pfnGetFileSize */
7588 vmdkGetFileSize,
7589 /* pfnGetPCHSGeometry */
7590 vmdkGetPCHSGeometry,
7591 /* pfnSetPCHSGeometry */
7592 vmdkSetPCHSGeometry,
7593 /* pfnGetLCHSGeometry */
7594 vmdkGetLCHSGeometry,
7595 /* pfnSetLCHSGeometry */
7596 vmdkSetLCHSGeometry,
7597 /* pfnQueryRegions */
7598 vmdkQueryRegions,
7599 /* pfnRegionListRelease */
7600 vmdkRegionListRelease,
7601 /* pfnGetImageFlags */
7602 vmdkGetImageFlags,
7603 /* pfnGetOpenFlags */
7604 vmdkGetOpenFlags,
7605 /* pfnSetOpenFlags */
7606 vmdkSetOpenFlags,
7607 /* pfnGetComment */
7608 vmdkGetComment,
7609 /* pfnSetComment */
7610 vmdkSetComment,
7611 /* pfnGetUuid */
7612 vmdkGetUuid,
7613 /* pfnSetUuid */
7614 vmdkSetUuid,
7615 /* pfnGetModificationUuid */
7616 vmdkGetModificationUuid,
7617 /* pfnSetModificationUuid */
7618 vmdkSetModificationUuid,
7619 /* pfnGetParentUuid */
7620 vmdkGetParentUuid,
7621 /* pfnSetParentUuid */
7622 vmdkSetParentUuid,
7623 /* pfnGetParentModificationUuid */
7624 vmdkGetParentModificationUuid,
7625 /* pfnSetParentModificationUuid */
7626 vmdkSetParentModificationUuid,
7627 /* pfnDump */
7628 vmdkDump,
7629 /* pfnGetTimestamp */
7630 NULL,
7631 /* pfnGetParentTimestamp */
7632 NULL,
7633 /* pfnSetParentTimestamp */
7634 NULL,
7635 /* pfnGetParentFilename */
7636 NULL,
7637 /* pfnSetParentFilename */
7638 NULL,
7639 /* pfnComposeLocation */
7640 genericFileComposeLocation,
7641 /* pfnComposeName */
7642 genericFileComposeName,
7643 /* pfnCompact */
7644 NULL,
7645 /* pfnResize */
7646 NULL,
7647 /* pfnRepair */
7648 NULL,
7649 /* pfnTraverseMetadata */
7650 NULL,
7651 /* u32VersionEnd */
7652 VD_IMGBACKEND_VERSION
7653};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette