VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 86719

Last change on this file since 86719 was 86604, checked in by vboxsync, 4 years ago

Main: bugref:9224: Added the drive path and partition checking for linux

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 318.7 KB
Line 
1/* $Id: VMDK.cpp 86604 2020-10-16 12:00:49Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD_VMDK
23#include <VBox/log.h> /* before VBox/vd-ifs.h */
24#include <VBox/vd-plugin.h>
25#include <VBox/err.h>
26
27#include <iprt/assert.h>
28#include <iprt/alloc.h>
29#include <iprt/base64.h>
30#include <iprt/ctype.h>
31#include <iprt/crc.h>
32#include <iprt/dvm.h>
33#include <iprt/uuid.h>
34#include <iprt/path.h>
35#include <iprt/rand.h>
36#include <iprt/string.h>
37#include <iprt/sort.h>
38#include <iprt/zip.h>
39#include <iprt/asm.h>
40#ifdef RT_OS_WINDOWS
41# include <iprt/utf16.h>
42# include <iprt/uni.h>
43# include <iprt/uni.h>
44# include <iprt/nt/nt-and-windows.h>
45# include <winioctl.h>
46#endif
47#ifdef RT_OS_LINUX
48# include <errno.h>
49# include <sys/stat.h>
50# include <iprt/dir.h>
51# include <iprt/symlink.h>
52# include <iprt/linux/sysfs.h>
53#endif
54
55#include "VDBackends.h"
56
57
58/*********************************************************************************************************************************
59* Constants And Macros, Structures and Typedefs *
60*********************************************************************************************************************************/
61
62/** Maximum encoded string size (including NUL) we allow for VMDK images.
63 * Deliberately not set high to avoid running out of descriptor space. */
64#define VMDK_ENCODED_COMMENT_MAX 1024
65
66/** VMDK descriptor DDB entry for PCHS cylinders. */
67#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
68
69/** VMDK descriptor DDB entry for PCHS heads. */
70#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
71
72/** VMDK descriptor DDB entry for PCHS sectors. */
73#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
74
75/** VMDK descriptor DDB entry for LCHS cylinders. */
76#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
77
78/** VMDK descriptor DDB entry for LCHS heads. */
79#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
80
81/** VMDK descriptor DDB entry for LCHS sectors. */
82#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
83
84/** VMDK descriptor DDB entry for image UUID. */
85#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
86
87/** VMDK descriptor DDB entry for image modification UUID. */
88#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
89
90/** VMDK descriptor DDB entry for parent image UUID. */
91#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
92
93/** VMDK descriptor DDB entry for parent image modification UUID. */
94#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
95
96/** No compression for streamOptimized files. */
97#define VMDK_COMPRESSION_NONE 0
98
99/** Deflate compression for streamOptimized files. */
100#define VMDK_COMPRESSION_DEFLATE 1
101
102/** Marker that the actual GD value is stored in the footer. */
103#define VMDK_GD_AT_END 0xffffffffffffffffULL
104
105/** Marker for end-of-stream in streamOptimized images. */
106#define VMDK_MARKER_EOS 0
107
108/** Marker for grain table block in streamOptimized images. */
109#define VMDK_MARKER_GT 1
110
111/** Marker for grain directory block in streamOptimized images. */
112#define VMDK_MARKER_GD 2
113
114/** Marker for footer in streamOptimized images. */
115#define VMDK_MARKER_FOOTER 3
116
117/** Marker for unknown purpose in streamOptimized images.
118 * Shows up in very recent images created by vSphere, but only sporadically.
119 * They "forgot" to document that one in the VMDK specification. */
120#define VMDK_MARKER_UNSPECIFIED 4
121
122/** Dummy marker for "don't check the marker value". */
123#define VMDK_MARKER_IGNORE 0xffffffffU
124
125/**
126 * Magic number for hosted images created by VMware Workstation 4, VMware
127 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
128 */
129#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
130
131/**
132 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
133 * this header is also used for monolithic flat images.
134 */
135#pragma pack(1)
136typedef struct SparseExtentHeader
137{
138 uint32_t magicNumber;
139 uint32_t version;
140 uint32_t flags;
141 uint64_t capacity;
142 uint64_t grainSize;
143 uint64_t descriptorOffset;
144 uint64_t descriptorSize;
145 uint32_t numGTEsPerGT;
146 uint64_t rgdOffset;
147 uint64_t gdOffset;
148 uint64_t overHead;
149 bool uncleanShutdown;
150 char singleEndLineChar;
151 char nonEndLineChar;
152 char doubleEndLineChar1;
153 char doubleEndLineChar2;
154 uint16_t compressAlgorithm;
155 uint8_t pad[433];
156} SparseExtentHeader;
157#pragma pack()
158
159/** The maximum allowed descriptor size in the extent header in sectors. */
160#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
161
162/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
163 * divisible by the default grain size (64K) */
164#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
165
166/** VMDK streamOptimized file format marker. The type field may or may not
167 * be actually valid, but there's always data to read there. */
168#pragma pack(1)
169typedef struct VMDKMARKER
170{
171 uint64_t uSector;
172 uint32_t cbSize;
173 uint32_t uType;
174} VMDKMARKER, *PVMDKMARKER;
175#pragma pack()
176
177
178/** Convert sector number/size to byte offset/size. */
179#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
180
181/** Convert byte offset/size to sector number/size. */
182#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
183
184/**
185 * VMDK extent type.
186 */
187typedef enum VMDKETYPE
188{
189 /** Hosted sparse extent. */
190 VMDKETYPE_HOSTED_SPARSE = 1,
191 /** Flat extent. */
192 VMDKETYPE_FLAT,
193 /** Zero extent. */
194 VMDKETYPE_ZERO,
195 /** VMFS extent, used by ESX. */
196 VMDKETYPE_VMFS
197} VMDKETYPE, *PVMDKETYPE;
198
199/**
200 * VMDK access type for a extent.
201 */
202typedef enum VMDKACCESS
203{
204 /** No access allowed. */
205 VMDKACCESS_NOACCESS = 0,
206 /** Read-only access. */
207 VMDKACCESS_READONLY,
208 /** Read-write access. */
209 VMDKACCESS_READWRITE
210} VMDKACCESS, *PVMDKACCESS;
211
212/** Forward declaration for PVMDKIMAGE. */
213typedef struct VMDKIMAGE *PVMDKIMAGE;
214
215/**
216 * Extents files entry. Used for opening a particular file only once.
217 */
218typedef struct VMDKFILE
219{
220 /** Pointer to file path. Local copy. */
221 const char *pszFilename;
222 /** Pointer to base name. Local copy. */
223 const char *pszBasename;
224 /** File open flags for consistency checking. */
225 unsigned fOpen;
226 /** Handle for sync/async file abstraction.*/
227 PVDIOSTORAGE pStorage;
228 /** Reference counter. */
229 unsigned uReferences;
230 /** Flag whether the file should be deleted on last close. */
231 bool fDelete;
232 /** Pointer to the image we belong to (for debugging purposes). */
233 PVMDKIMAGE pImage;
234 /** Pointer to next file descriptor. */
235 struct VMDKFILE *pNext;
236 /** Pointer to the previous file descriptor. */
237 struct VMDKFILE *pPrev;
238} VMDKFILE, *PVMDKFILE;
239
240/**
241 * VMDK extent data structure.
242 */
243typedef struct VMDKEXTENT
244{
245 /** File handle. */
246 PVMDKFILE pFile;
247 /** Base name of the image extent. */
248 const char *pszBasename;
249 /** Full name of the image extent. */
250 const char *pszFullname;
251 /** Number of sectors in this extent. */
252 uint64_t cSectors;
253 /** Number of sectors per block (grain in VMDK speak). */
254 uint64_t cSectorsPerGrain;
255 /** Starting sector number of descriptor. */
256 uint64_t uDescriptorSector;
257 /** Size of descriptor in sectors. */
258 uint64_t cDescriptorSectors;
259 /** Starting sector number of grain directory. */
260 uint64_t uSectorGD;
261 /** Starting sector number of redundant grain directory. */
262 uint64_t uSectorRGD;
263 /** Total number of metadata sectors. */
264 uint64_t cOverheadSectors;
265 /** Nominal size (i.e. as described by the descriptor) of this extent. */
266 uint64_t cNominalSectors;
267 /** Sector offset (i.e. as described by the descriptor) of this extent. */
268 uint64_t uSectorOffset;
269 /** Number of entries in a grain table. */
270 uint32_t cGTEntries;
271 /** Number of sectors reachable via a grain directory entry. */
272 uint32_t cSectorsPerGDE;
273 /** Number of entries in the grain directory. */
274 uint32_t cGDEntries;
275 /** Pointer to the next free sector. Legacy information. Do not use. */
276 uint32_t uFreeSector;
277 /** Number of this extent in the list of images. */
278 uint32_t uExtent;
279 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
280 char *pDescData;
281 /** Pointer to the grain directory. */
282 uint32_t *pGD;
283 /** Pointer to the redundant grain directory. */
284 uint32_t *pRGD;
285 /** VMDK version of this extent. 1=1.0/1.1 */
286 uint32_t uVersion;
287 /** Type of this extent. */
288 VMDKETYPE enmType;
289 /** Access to this extent. */
290 VMDKACCESS enmAccess;
291 /** Flag whether this extent is marked as unclean. */
292 bool fUncleanShutdown;
293 /** Flag whether the metadata in the extent header needs to be updated. */
294 bool fMetaDirty;
295 /** Flag whether there is a footer in this extent. */
296 bool fFooter;
297 /** Compression type for this extent. */
298 uint16_t uCompression;
299 /** Append position for writing new grain. Only for sparse extents. */
300 uint64_t uAppendPosition;
301 /** Last grain which was accessed. Only for streamOptimized extents. */
302 uint32_t uLastGrainAccess;
303 /** Starting sector corresponding to the grain buffer. */
304 uint32_t uGrainSectorAbs;
305 /** Grain number corresponding to the grain buffer. */
306 uint32_t uGrain;
307 /** Actual size of the compressed data, only valid for reading. */
308 uint32_t cbGrainStreamRead;
309 /** Size of compressed grain buffer for streamOptimized extents. */
310 size_t cbCompGrain;
311 /** Compressed grain buffer for streamOptimized extents, with marker. */
312 void *pvCompGrain;
313 /** Decompressed grain buffer for streamOptimized extents. */
314 void *pvGrain;
315 /** Reference to the image in which this extent is used. Do not use this
316 * on a regular basis to avoid passing pImage references to functions
317 * explicitly. */
318 struct VMDKIMAGE *pImage;
319} VMDKEXTENT, *PVMDKEXTENT;
320
321/**
322 * Grain table cache size. Allocated per image.
323 */
324#define VMDK_GT_CACHE_SIZE 256
325
326/**
327 * Grain table block size. Smaller than an actual grain table block to allow
328 * more grain table blocks to be cached without having to allocate excessive
329 * amounts of memory for the cache.
330 */
331#define VMDK_GT_CACHELINE_SIZE 128
332
333
334/**
335 * Maximum number of lines in a descriptor file. Not worth the effort of
336 * making it variable. Descriptor files are generally very short (~20 lines),
337 * with the exception of sparse files split in 2G chunks, which need for the
338 * maximum size (almost 2T) exactly 1025 lines for the disk database.
339 */
340#define VMDK_DESCRIPTOR_LINES_MAX 1100U
341
342/**
343 * Parsed descriptor information. Allows easy access and update of the
344 * descriptor (whether separate file or not). Free form text files suck.
345 */
346typedef struct VMDKDESCRIPTOR
347{
348 /** Line number of first entry of the disk descriptor. */
349 unsigned uFirstDesc;
350 /** Line number of first entry in the extent description. */
351 unsigned uFirstExtent;
352 /** Line number of first disk database entry. */
353 unsigned uFirstDDB;
354 /** Total number of lines. */
355 unsigned cLines;
356 /** Total amount of memory available for the descriptor. */
357 size_t cbDescAlloc;
358 /** Set if descriptor has been changed and not yet written to disk. */
359 bool fDirty;
360 /** Array of pointers to the data in the descriptor. */
361 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
362 /** Array of line indices pointing to the next non-comment line. */
363 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
364} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
365
366
367/**
368 * Cache entry for translating extent/sector to a sector number in that
369 * extent.
370 */
371typedef struct VMDKGTCACHEENTRY
372{
373 /** Extent number for which this entry is valid. */
374 uint32_t uExtent;
375 /** GT data block number. */
376 uint64_t uGTBlock;
377 /** Data part of the cache entry. */
378 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
379} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
380
381/**
382 * Cache data structure for blocks of grain table entries. For now this is a
383 * fixed size direct mapping cache, but this should be adapted to the size of
384 * the sparse image and maybe converted to a set-associative cache. The
385 * implementation below implements a write-through cache with write allocate.
386 */
387typedef struct VMDKGTCACHE
388{
389 /** Cache entries. */
390 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
391 /** Number of cache entries (currently unused). */
392 unsigned cEntries;
393} VMDKGTCACHE, *PVMDKGTCACHE;
394
395/**
396 * Complete VMDK image data structure. Mainly a collection of extents and a few
397 * extra global data fields.
398 */
399typedef struct VMDKIMAGE
400{
401 /** Image name. */
402 const char *pszFilename;
403 /** Descriptor file if applicable. */
404 PVMDKFILE pFile;
405
406 /** Pointer to the per-disk VD interface list. */
407 PVDINTERFACE pVDIfsDisk;
408 /** Pointer to the per-image VD interface list. */
409 PVDINTERFACE pVDIfsImage;
410
411 /** Error interface. */
412 PVDINTERFACEERROR pIfError;
413 /** I/O interface. */
414 PVDINTERFACEIOINT pIfIo;
415
416
417 /** Pointer to the image extents. */
418 PVMDKEXTENT pExtents;
419 /** Number of image extents. */
420 unsigned cExtents;
421 /** Pointer to the files list, for opening a file referenced multiple
422 * times only once (happens mainly with raw partition access). */
423 PVMDKFILE pFiles;
424
425 /**
426 * Pointer to an array of segment entries for async I/O.
427 * This is an optimization because the task number to submit is not known
428 * and allocating/freeing an array in the read/write functions every time
429 * is too expensive.
430 */
431 PPDMDATASEG paSegments;
432 /** Entries available in the segments array. */
433 unsigned cSegments;
434
435 /** Open flags passed by VBoxHD layer. */
436 unsigned uOpenFlags;
437 /** Image flags defined during creation or determined during open. */
438 unsigned uImageFlags;
439 /** Total size of the image. */
440 uint64_t cbSize;
441 /** Physical geometry of this image. */
442 VDGEOMETRY PCHSGeometry;
443 /** Logical geometry of this image. */
444 VDGEOMETRY LCHSGeometry;
445 /** Image UUID. */
446 RTUUID ImageUuid;
447 /** Image modification UUID. */
448 RTUUID ModificationUuid;
449 /** Parent image UUID. */
450 RTUUID ParentUuid;
451 /** Parent image modification UUID. */
452 RTUUID ParentModificationUuid;
453
454 /** Pointer to grain table cache, if this image contains sparse extents. */
455 PVMDKGTCACHE pGTCache;
456 /** Pointer to the descriptor (NULL if no separate descriptor file). */
457 char *pDescData;
458 /** Allocation size of the descriptor file. */
459 size_t cbDescAlloc;
460 /** Parsed descriptor file content. */
461 VMDKDESCRIPTOR Descriptor;
462 /** The static region list. */
463 VDREGIONLIST RegionList;
464} VMDKIMAGE;
465
466
467/** State for the input/output callout of the inflate reader/deflate writer. */
468typedef struct VMDKCOMPRESSIO
469{
470 /* Image this operation relates to. */
471 PVMDKIMAGE pImage;
472 /* Current read position. */
473 ssize_t iOffset;
474 /* Size of the compressed grain buffer (available data). */
475 size_t cbCompGrain;
476 /* Pointer to the compressed grain buffer. */
477 void *pvCompGrain;
478} VMDKCOMPRESSIO;
479
480
481/** Tracks async grain allocation. */
482typedef struct VMDKGRAINALLOCASYNC
483{
484 /** Flag whether the allocation failed. */
485 bool fIoErr;
486 /** Current number of transfers pending.
487 * If reached 0 and there is an error the old state is restored. */
488 unsigned cIoXfersPending;
489 /** Sector number */
490 uint64_t uSector;
491 /** Flag whether the grain table needs to be updated. */
492 bool fGTUpdateNeeded;
493 /** Extent the allocation happens. */
494 PVMDKEXTENT pExtent;
495 /** Position of the new grain, required for the grain table update. */
496 uint64_t uGrainOffset;
497 /** Grain table sector. */
498 uint64_t uGTSector;
499 /** Backup grain table sector. */
500 uint64_t uRGTSector;
501} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
502
503/**
504 * State information for vmdkRename() and helpers.
505 */
506typedef struct VMDKRENAMESTATE
507{
508 /** Array of old filenames. */
509 char **apszOldName;
510 /** Array of new filenames. */
511 char **apszNewName;
512 /** Array of new lines in the extent descriptor. */
513 char **apszNewLines;
514 /** Name of the old descriptor file if not a sparse image. */
515 char *pszOldDescName;
516 /** Flag whether we called vmdkFreeImage(). */
517 bool fImageFreed;
518 /** Flag whther the descriptor is embedded in the image (sparse) or
519 * in a separate file. */
520 bool fEmbeddedDesc;
521 /** Number of extents in the image. */
522 unsigned cExtents;
523 /** New base filename. */
524 char *pszNewBaseName;
525 /** The old base filename. */
526 char *pszOldBaseName;
527 /** New full filename. */
528 char *pszNewFullName;
529 /** Old full filename. */
530 char *pszOldFullName;
531 /** The old image name. */
532 const char *pszOldImageName;
533 /** Copy of the original VMDK descriptor. */
534 VMDKDESCRIPTOR DescriptorCopy;
535 /** Copy of the extent state for sparse images. */
536 VMDKEXTENT ExtentCopy;
537} VMDKRENAMESTATE;
538/** Pointer to a VMDK rename state. */
539typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
540
541
542/*********************************************************************************************************************************
543* Static Variables *
544*********************************************************************************************************************************/
545
546/** NULL-terminated array of supported file extensions. */
547static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
548{
549 {"vmdk", VDTYPE_HDD},
550 {NULL, VDTYPE_INVALID}
551};
552
553/** NULL-terminated array of configuration option. */
554static const VDCONFIGINFO s_aVmdkConfigInfo[] =
555{
556 /* Options for VMDK raw disks */
557 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
558 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
559 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
560 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
561
562 /* End of options list */
563 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
564};
565
566
567/*********************************************************************************************************************************
568* Internal Functions *
569*********************************************************************************************************************************/
570
571static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
572static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
573 bool fDelete);
574
575static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
576static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
577static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
578static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
579
580static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
581 void *pvUser, int rcReq);
582
583/**
584 * Internal: open a file (using a file descriptor cache to ensure each file
585 * is only opened once - anything else can cause locking problems).
586 */
587static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
588 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
589{
590 int rc = VINF_SUCCESS;
591 PVMDKFILE pVmdkFile;
592
593 for (pVmdkFile = pImage->pFiles;
594 pVmdkFile != NULL;
595 pVmdkFile = pVmdkFile->pNext)
596 {
597 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
598 {
599 Assert(fOpen == pVmdkFile->fOpen);
600 pVmdkFile->uReferences++;
601
602 *ppVmdkFile = pVmdkFile;
603
604 return rc;
605 }
606 }
607
608 /* If we get here, there's no matching entry in the cache. */
609 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
610 if (!pVmdkFile)
611 {
612 *ppVmdkFile = NULL;
613 return VERR_NO_MEMORY;
614 }
615
616 pVmdkFile->pszFilename = RTStrDup(pszFilename);
617 if (!pVmdkFile->pszFilename)
618 {
619 RTMemFree(pVmdkFile);
620 *ppVmdkFile = NULL;
621 return VERR_NO_MEMORY;
622 }
623
624 if (pszBasename)
625 {
626 pVmdkFile->pszBasename = RTStrDup(pszBasename);
627 if (!pVmdkFile->pszBasename)
628 {
629 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
630 RTMemFree(pVmdkFile);
631 *ppVmdkFile = NULL;
632 return VERR_NO_MEMORY;
633 }
634 }
635
636 pVmdkFile->fOpen = fOpen;
637
638 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
639 &pVmdkFile->pStorage);
640 if (RT_SUCCESS(rc))
641 {
642 pVmdkFile->uReferences = 1;
643 pVmdkFile->pImage = pImage;
644 pVmdkFile->pNext = pImage->pFiles;
645 if (pImage->pFiles)
646 pImage->pFiles->pPrev = pVmdkFile;
647 pImage->pFiles = pVmdkFile;
648 *ppVmdkFile = pVmdkFile;
649 }
650 else
651 {
652 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
653 RTMemFree(pVmdkFile);
654 *ppVmdkFile = NULL;
655 }
656
657 return rc;
658}
659
660/**
661 * Internal: close a file, updating the file descriptor cache.
662 */
663static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
664{
665 int rc = VINF_SUCCESS;
666 PVMDKFILE pVmdkFile = *ppVmdkFile;
667
668 AssertPtr(pVmdkFile);
669
670 pVmdkFile->fDelete |= fDelete;
671 Assert(pVmdkFile->uReferences);
672 pVmdkFile->uReferences--;
673 if (pVmdkFile->uReferences == 0)
674 {
675 PVMDKFILE pPrev;
676 PVMDKFILE pNext;
677
678 /* Unchain the element from the list. */
679 pPrev = pVmdkFile->pPrev;
680 pNext = pVmdkFile->pNext;
681
682 if (pNext)
683 pNext->pPrev = pPrev;
684 if (pPrev)
685 pPrev->pNext = pNext;
686 else
687 pImage->pFiles = pNext;
688
689 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
690
691 bool fFileDel = pVmdkFile->fDelete;
692 if ( pVmdkFile->pszBasename
693 && fFileDel)
694 {
695 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
696 if ( RTPathHasPath(pVmdkFile->pszBasename)
697 || !pszSuffix
698 || ( strcmp(pszSuffix, ".vmdk")
699 && strcmp(pszSuffix, ".bin")
700 && strcmp(pszSuffix, ".img")))
701 fFileDel = false;
702 }
703
704 if (fFileDel)
705 {
706 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
707 if (RT_SUCCESS(rc))
708 rc = rc2;
709 }
710 else if (pVmdkFile->fDelete)
711 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
712 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
713 if (pVmdkFile->pszBasename)
714 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
715 RTMemFree(pVmdkFile);
716 }
717
718 *ppVmdkFile = NULL;
719 return rc;
720}
721
722/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
723#ifndef VMDK_USE_BLOCK_DECOMP_API
724static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
725{
726 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
727 size_t cbInjected = 0;
728
729 Assert(cbBuf);
730 if (pInflateState->iOffset < 0)
731 {
732 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
733 pvBuf = (uint8_t *)pvBuf + 1;
734 cbBuf--;
735 cbInjected = 1;
736 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
737 }
738 if (!cbBuf)
739 {
740 if (pcbBuf)
741 *pcbBuf = cbInjected;
742 return VINF_SUCCESS;
743 }
744 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
745 memcpy(pvBuf,
746 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
747 cbBuf);
748 pInflateState->iOffset += cbBuf;
749 Assert(pcbBuf);
750 *pcbBuf = cbBuf + cbInjected;
751 return VINF_SUCCESS;
752}
753#endif
754
755/**
756 * Internal: read from a file and inflate the compressed data,
757 * distinguishing between async and normal operation
758 */
759DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
760 uint64_t uOffset, void *pvBuf,
761 size_t cbToRead, const void *pcvMarker,
762 uint64_t *puLBA, uint32_t *pcbMarkerData)
763{
764 int rc;
765#ifndef VMDK_USE_BLOCK_DECOMP_API
766 PRTZIPDECOMP pZip = NULL;
767#endif
768 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
769 size_t cbCompSize, cbActuallyRead;
770
771 if (!pcvMarker)
772 {
773 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
774 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
775 if (RT_FAILURE(rc))
776 return rc;
777 }
778 else
779 {
780 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
781 /* pcvMarker endianness has already been partially transformed, fix it */
782 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
783 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
784 }
785
786 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
787 if (cbCompSize == 0)
788 {
789 AssertMsgFailed(("VMDK: corrupted marker\n"));
790 return VERR_VD_VMDK_INVALID_FORMAT;
791 }
792
793 /* Sanity check - the expansion ratio should be much less than 2. */
794 Assert(cbCompSize < 2 * cbToRead);
795 if (cbCompSize >= 2 * cbToRead)
796 return VERR_VD_VMDK_INVALID_FORMAT;
797
798 /* Compressed grain marker. Data follows immediately. */
799 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
800 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
801 (uint8_t *)pExtent->pvCompGrain
802 + RT_UOFFSETOF(VMDKMARKER, uType),
803 RT_ALIGN_Z( cbCompSize
804 + RT_UOFFSETOF(VMDKMARKER, uType),
805 512)
806 - RT_UOFFSETOF(VMDKMARKER, uType));
807
808 if (puLBA)
809 *puLBA = RT_LE2H_U64(pMarker->uSector);
810 if (pcbMarkerData)
811 *pcbMarkerData = RT_ALIGN( cbCompSize
812 + RT_UOFFSETOF(VMDKMARKER, uType),
813 512);
814
815#ifdef VMDK_USE_BLOCK_DECOMP_API
816 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
817 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
818 pvBuf, cbToRead, &cbActuallyRead);
819#else
820 VMDKCOMPRESSIO InflateState;
821 InflateState.pImage = pImage;
822 InflateState.iOffset = -1;
823 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
824 InflateState.pvCompGrain = pExtent->pvCompGrain;
825
826 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
827 if (RT_FAILURE(rc))
828 return rc;
829 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
830 RTZipDecompDestroy(pZip);
831#endif /* !VMDK_USE_BLOCK_DECOMP_API */
832 if (RT_FAILURE(rc))
833 {
834 if (rc == VERR_ZIP_CORRUPTED)
835 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
836 return rc;
837 }
838 if (cbActuallyRead != cbToRead)
839 rc = VERR_VD_VMDK_INVALID_FORMAT;
840 return rc;
841}
842
843static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
844{
845 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
846
847 Assert(cbBuf);
848 if (pDeflateState->iOffset < 0)
849 {
850 pvBuf = (const uint8_t *)pvBuf + 1;
851 cbBuf--;
852 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
853 }
854 if (!cbBuf)
855 return VINF_SUCCESS;
856 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
857 return VERR_BUFFER_OVERFLOW;
858 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
859 pvBuf, cbBuf);
860 pDeflateState->iOffset += cbBuf;
861 return VINF_SUCCESS;
862}
863
864/**
865 * Internal: deflate the uncompressed data and write to a file,
866 * distinguishing between async and normal operation
867 */
868DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
869 uint64_t uOffset, const void *pvBuf,
870 size_t cbToWrite, uint64_t uLBA,
871 uint32_t *pcbMarkerData)
872{
873 int rc;
874 PRTZIPCOMP pZip = NULL;
875 VMDKCOMPRESSIO DeflateState;
876
877 DeflateState.pImage = pImage;
878 DeflateState.iOffset = -1;
879 DeflateState.cbCompGrain = pExtent->cbCompGrain;
880 DeflateState.pvCompGrain = pExtent->pvCompGrain;
881
882 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
883 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
884 if (RT_FAILURE(rc))
885 return rc;
886 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
887 if (RT_SUCCESS(rc))
888 rc = RTZipCompFinish(pZip);
889 RTZipCompDestroy(pZip);
890 if (RT_SUCCESS(rc))
891 {
892 Assert( DeflateState.iOffset > 0
893 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
894
895 /* pad with zeroes to get to a full sector size */
896 uint32_t uSize = DeflateState.iOffset;
897 if (uSize % 512)
898 {
899 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
900 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
901 uSizeAlign - uSize);
902 uSize = uSizeAlign;
903 }
904
905 if (pcbMarkerData)
906 *pcbMarkerData = uSize;
907
908 /* Compressed grain marker. Data follows immediately. */
909 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
910 pMarker->uSector = RT_H2LE_U64(uLBA);
911 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
912 - RT_UOFFSETOF(VMDKMARKER, uType));
913 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
914 uOffset, pMarker, uSize);
915 if (RT_FAILURE(rc))
916 return rc;
917 }
918 return rc;
919}
920
921
922/**
923 * Internal: check if all files are closed, prevent leaking resources.
924 */
925static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
926{
927 int rc = VINF_SUCCESS, rc2;
928 PVMDKFILE pVmdkFile;
929
930 Assert(pImage->pFiles == NULL);
931 for (pVmdkFile = pImage->pFiles;
932 pVmdkFile != NULL;
933 pVmdkFile = pVmdkFile->pNext)
934 {
935 LogRel(("VMDK: leaking reference to file \"%s\"\n",
936 pVmdkFile->pszFilename));
937 pImage->pFiles = pVmdkFile->pNext;
938
939 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
940
941 if (RT_SUCCESS(rc))
942 rc = rc2;
943 }
944 return rc;
945}
946
947/**
948 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
949 * critical non-ASCII characters.
950 */
951static char *vmdkEncodeString(const char *psz)
952{
953 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
954 char *pszDst = szEnc;
955
956 AssertPtr(psz);
957
958 for (; *psz; psz = RTStrNextCp(psz))
959 {
960 char *pszDstPrev = pszDst;
961 RTUNICP Cp = RTStrGetCp(psz);
962 if (Cp == '\\')
963 {
964 pszDst = RTStrPutCp(pszDst, Cp);
965 pszDst = RTStrPutCp(pszDst, Cp);
966 }
967 else if (Cp == '\n')
968 {
969 pszDst = RTStrPutCp(pszDst, '\\');
970 pszDst = RTStrPutCp(pszDst, 'n');
971 }
972 else if (Cp == '\r')
973 {
974 pszDst = RTStrPutCp(pszDst, '\\');
975 pszDst = RTStrPutCp(pszDst, 'r');
976 }
977 else
978 pszDst = RTStrPutCp(pszDst, Cp);
979 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
980 {
981 pszDst = pszDstPrev;
982 break;
983 }
984 }
985 *pszDst = '\0';
986 return RTStrDup(szEnc);
987}
988
989/**
990 * Internal: decode a string and store it into the specified string.
991 */
992static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
993{
994 int rc = VINF_SUCCESS;
995 char szBuf[4];
996
997 if (!cb)
998 return VERR_BUFFER_OVERFLOW;
999
1000 AssertPtr(psz);
1001
1002 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1003 {
1004 char *pszDst = szBuf;
1005 RTUNICP Cp = RTStrGetCp(pszEncoded);
1006 if (Cp == '\\')
1007 {
1008 pszEncoded = RTStrNextCp(pszEncoded);
1009 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1010 if (CpQ == 'n')
1011 RTStrPutCp(pszDst, '\n');
1012 else if (CpQ == 'r')
1013 RTStrPutCp(pszDst, '\r');
1014 else if (CpQ == '\0')
1015 {
1016 rc = VERR_VD_VMDK_INVALID_HEADER;
1017 break;
1018 }
1019 else
1020 RTStrPutCp(pszDst, CpQ);
1021 }
1022 else
1023 pszDst = RTStrPutCp(pszDst, Cp);
1024
1025 /* Need to leave space for terminating NUL. */
1026 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1027 {
1028 rc = VERR_BUFFER_OVERFLOW;
1029 break;
1030 }
1031 memcpy(psz, szBuf, pszDst - szBuf);
1032 psz += pszDst - szBuf;
1033 }
1034 *psz = '\0';
1035 return rc;
1036}
1037
1038/**
1039 * Internal: free all buffers associated with grain directories.
1040 */
1041static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1042{
1043 if (pExtent->pGD)
1044 {
1045 RTMemFree(pExtent->pGD);
1046 pExtent->pGD = NULL;
1047 }
1048 if (pExtent->pRGD)
1049 {
1050 RTMemFree(pExtent->pRGD);
1051 pExtent->pRGD = NULL;
1052 }
1053}
1054
1055/**
1056 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1057 * images.
1058 */
1059static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1060{
1061 int rc = VINF_SUCCESS;
1062
1063 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1064 {
1065 /* streamOptimized extents need a compressed grain buffer, which must
1066 * be big enough to hold uncompressible data (which needs ~8 bytes
1067 * more than the uncompressed data), the marker and padding. */
1068 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1069 + 8 + sizeof(VMDKMARKER), 512);
1070 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1071 if (RT_LIKELY(pExtent->pvCompGrain))
1072 {
1073 /* streamOptimized extents need a decompressed grain buffer. */
1074 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1075 if (!pExtent->pvGrain)
1076 rc = VERR_NO_MEMORY;
1077 }
1078 else
1079 rc = VERR_NO_MEMORY;
1080 }
1081
1082 if (RT_FAILURE(rc))
1083 vmdkFreeStreamBuffers(pExtent);
1084 return rc;
1085}
1086
1087/**
1088 * Internal: allocate all buffers associated with grain directories.
1089 */
1090static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1091{
1092 RT_NOREF1(pImage);
1093 int rc = VINF_SUCCESS;
1094 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1095
1096 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1097 if (RT_LIKELY(pExtent->pGD))
1098 {
1099 if (pExtent->uSectorRGD)
1100 {
1101 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1102 if (RT_UNLIKELY(!pExtent->pRGD))
1103 rc = VERR_NO_MEMORY;
1104 }
1105 }
1106 else
1107 rc = VERR_NO_MEMORY;
1108
1109 if (RT_FAILURE(rc))
1110 vmdkFreeGrainDirectory(pExtent);
1111 return rc;
1112}
1113
1114/**
1115 * Converts the grain directory from little to host endianess.
1116 *
1117 * @returns nothing.
1118 * @param pGD The grain directory.
1119 * @param cGDEntries Number of entries in the grain directory to convert.
1120 */
1121DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1122{
1123 uint32_t *pGDTmp = pGD;
1124
1125 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1126 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1127}
1128
1129/**
1130 * Read the grain directory and allocated grain tables verifying them against
1131 * their back up copies if available.
1132 *
1133 * @returns VBox status code.
1134 * @param pImage Image instance data.
1135 * @param pExtent The VMDK extent.
1136 */
1137static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1138{
1139 int rc = VINF_SUCCESS;
1140 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1141
1142 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1143 && pExtent->uSectorGD != VMDK_GD_AT_END
1144 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1145
1146 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1147 if (RT_SUCCESS(rc))
1148 {
1149 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1150 * but in reality they are not compressed. */
1151 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1152 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1153 pExtent->pGD, cbGD);
1154 if (RT_SUCCESS(rc))
1155 {
1156 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1157
1158 if ( pExtent->uSectorRGD
1159 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1160 {
1161 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1162 * but in reality they are not compressed. */
1163 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1164 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1165 pExtent->pRGD, cbGD);
1166 if (RT_SUCCESS(rc))
1167 {
1168 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1169
1170 /* Check grain table and redundant grain table for consistency. */
1171 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1172 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1173 size_t cbGTBuffersMax = _1M;
1174
1175 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1176 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1177
1178 if ( !pTmpGT1
1179 || !pTmpGT2)
1180 rc = VERR_NO_MEMORY;
1181
1182 size_t i = 0;
1183 uint32_t *pGDTmp = pExtent->pGD;
1184 uint32_t *pRGDTmp = pExtent->pRGD;
1185
1186 /* Loop through all entries. */
1187 while (i < pExtent->cGDEntries)
1188 {
1189 uint32_t uGTStart = *pGDTmp;
1190 uint32_t uRGTStart = *pRGDTmp;
1191 size_t cbGTRead = cbGT;
1192
1193 /* If no grain table is allocated skip the entry. */
1194 if (*pGDTmp == 0 && *pRGDTmp == 0)
1195 {
1196 i++;
1197 continue;
1198 }
1199
1200 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1201 {
1202 /* Just one grain directory entry refers to a not yet allocated
1203 * grain table or both grain directory copies refer to the same
1204 * grain table. Not allowed. */
1205 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1206 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1207 break;
1208 }
1209
1210 i++;
1211 pGDTmp++;
1212 pRGDTmp++;
1213
1214 /*
1215 * Read a few tables at once if adjacent to decrease the number
1216 * of I/O requests. Read at maximum 1MB at once.
1217 */
1218 while ( i < pExtent->cGDEntries
1219 && cbGTRead < cbGTBuffersMax)
1220 {
1221 /* If no grain table is allocated skip the entry. */
1222 if (*pGDTmp == 0 && *pRGDTmp == 0)
1223 {
1224 i++;
1225 continue;
1226 }
1227
1228 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1229 {
1230 /* Just one grain directory entry refers to a not yet allocated
1231 * grain table or both grain directory copies refer to the same
1232 * grain table. Not allowed. */
1233 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1234 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1235 break;
1236 }
1237
1238 /* Check that the start offsets are adjacent.*/
1239 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1240 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1241 break;
1242
1243 i++;
1244 pGDTmp++;
1245 pRGDTmp++;
1246 cbGTRead += cbGT;
1247 }
1248
1249 /* Increase buffers if required. */
1250 if ( RT_SUCCESS(rc)
1251 && cbGTBuffers < cbGTRead)
1252 {
1253 uint32_t *pTmp;
1254 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1255 if (pTmp)
1256 {
1257 pTmpGT1 = pTmp;
1258 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1259 if (pTmp)
1260 pTmpGT2 = pTmp;
1261 else
1262 rc = VERR_NO_MEMORY;
1263 }
1264 else
1265 rc = VERR_NO_MEMORY;
1266
1267 if (rc == VERR_NO_MEMORY)
1268 {
1269 /* Reset to the old values. */
1270 rc = VINF_SUCCESS;
1271 i -= cbGTRead / cbGT;
1272 cbGTRead = cbGT;
1273
1274 /* Don't try to increase the buffer again in the next run. */
1275 cbGTBuffersMax = cbGTBuffers;
1276 }
1277 }
1278
1279 if (RT_SUCCESS(rc))
1280 {
1281 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1282 * but in reality they are not compressed. */
1283 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1284 VMDK_SECTOR2BYTE(uGTStart),
1285 pTmpGT1, cbGTRead);
1286 if (RT_FAILURE(rc))
1287 {
1288 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1289 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1290 break;
1291 }
1292 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1293 * but in reality they are not compressed. */
1294 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1295 VMDK_SECTOR2BYTE(uRGTStart),
1296 pTmpGT2, cbGTRead);
1297 if (RT_FAILURE(rc))
1298 {
1299 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1300 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1301 break;
1302 }
1303 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1304 {
1305 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1306 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1307 break;
1308 }
1309 }
1310 } /* while (i < pExtent->cGDEntries) */
1311
1312 /** @todo figure out what to do for unclean VMDKs. */
1313 if (pTmpGT1)
1314 RTMemFree(pTmpGT1);
1315 if (pTmpGT2)
1316 RTMemFree(pTmpGT2);
1317 }
1318 else
1319 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1320 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1321 }
1322 }
1323 else
1324 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1325 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1326 }
1327
1328 if (RT_FAILURE(rc))
1329 vmdkFreeGrainDirectory(pExtent);
1330 return rc;
1331}
1332
1333/**
1334 * Creates a new grain directory for the given extent at the given start sector.
1335 *
1336 * @returns VBox status code.
1337 * @param pImage Image instance data.
1338 * @param pExtent The VMDK extent.
1339 * @param uStartSector Where the grain directory should be stored in the image.
1340 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1341 */
1342static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1343 uint64_t uStartSector, bool fPreAlloc)
1344{
1345 int rc = VINF_SUCCESS;
1346 unsigned i;
1347 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1348 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1349 size_t cbGTRounded;
1350 uint64_t cbOverhead;
1351
1352 if (fPreAlloc)
1353 {
1354 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1355 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1356 }
1357 else
1358 {
1359 /* Use a dummy start sector for layout computation. */
1360 if (uStartSector == VMDK_GD_AT_END)
1361 uStartSector = 1;
1362 cbGTRounded = 0;
1363 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1364 }
1365
1366 /* For streamOptimized extents there is only one grain directory,
1367 * and for all others take redundant grain directory into account. */
1368 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1369 {
1370 cbOverhead = RT_ALIGN_64(cbOverhead,
1371 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1372 }
1373 else
1374 {
1375 cbOverhead += cbGDRounded + cbGTRounded;
1376 cbOverhead = RT_ALIGN_64(cbOverhead,
1377 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1378 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1379 }
1380
1381 if (RT_SUCCESS(rc))
1382 {
1383 pExtent->uAppendPosition = cbOverhead;
1384 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1385
1386 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1387 {
1388 pExtent->uSectorRGD = 0;
1389 pExtent->uSectorGD = uStartSector;
1390 }
1391 else
1392 {
1393 pExtent->uSectorRGD = uStartSector;
1394 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1395 }
1396
1397 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1398 if (RT_SUCCESS(rc))
1399 {
1400 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1401 if ( RT_SUCCESS(rc)
1402 && fPreAlloc)
1403 {
1404 uint32_t uGTSectorLE;
1405 uint64_t uOffsetSectors;
1406
1407 if (pExtent->pRGD)
1408 {
1409 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1410 for (i = 0; i < pExtent->cGDEntries; i++)
1411 {
1412 pExtent->pRGD[i] = uOffsetSectors;
1413 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1414 /* Write the redundant grain directory entry to disk. */
1415 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1416 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1417 &uGTSectorLE, sizeof(uGTSectorLE));
1418 if (RT_FAILURE(rc))
1419 {
1420 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1421 break;
1422 }
1423 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1424 }
1425 }
1426
1427 if (RT_SUCCESS(rc))
1428 {
1429 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1430 for (i = 0; i < pExtent->cGDEntries; i++)
1431 {
1432 pExtent->pGD[i] = uOffsetSectors;
1433 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1434 /* Write the grain directory entry to disk. */
1435 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1436 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1437 &uGTSectorLE, sizeof(uGTSectorLE));
1438 if (RT_FAILURE(rc))
1439 {
1440 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1441 break;
1442 }
1443 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1444 }
1445 }
1446 }
1447 }
1448 }
1449
1450 if (RT_FAILURE(rc))
1451 vmdkFreeGrainDirectory(pExtent);
1452 return rc;
1453}
1454
1455/**
1456 * Unquotes the given string returning the result in a separate buffer.
1457 *
1458 * @returns VBox status code.
1459 * @param pImage The VMDK image state.
1460 * @param pszStr The string to unquote.
1461 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1462 * free.
1463 * @param ppszNext Where to store the pointer to any character following
1464 * the quoted value, optional.
1465 */
1466static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1467 char **ppszUnquoted, char **ppszNext)
1468{
1469 const char *pszStart = pszStr;
1470 char *pszQ;
1471 char *pszUnquoted;
1472
1473 /* Skip over whitespace. */
1474 while (*pszStr == ' ' || *pszStr == '\t')
1475 pszStr++;
1476
1477 if (*pszStr != '"')
1478 {
1479 pszQ = (char *)pszStr;
1480 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1481 pszQ++;
1482 }
1483 else
1484 {
1485 pszStr++;
1486 pszQ = (char *)strchr(pszStr, '"');
1487 if (pszQ == NULL)
1488 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1489 pImage->pszFilename, pszStart);
1490 }
1491
1492 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1493 if (!pszUnquoted)
1494 return VERR_NO_MEMORY;
1495 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1496 pszUnquoted[pszQ - pszStr] = '\0';
1497 *ppszUnquoted = pszUnquoted;
1498 if (ppszNext)
1499 *ppszNext = pszQ + 1;
1500 return VINF_SUCCESS;
1501}
1502
1503static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1504 const char *pszLine)
1505{
1506 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1507 ssize_t cbDiff = strlen(pszLine) + 1;
1508
1509 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1510 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1511 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1512
1513 memcpy(pEnd, pszLine, cbDiff);
1514 pDescriptor->cLines++;
1515 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1516 pDescriptor->fDirty = true;
1517
1518 return VINF_SUCCESS;
1519}
1520
1521static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1522 const char *pszKey, const char **ppszValue)
1523{
1524 size_t cbKey = strlen(pszKey);
1525 const char *pszValue;
1526
1527 while (uStart != 0)
1528 {
1529 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1530 {
1531 /* Key matches, check for a '=' (preceded by whitespace). */
1532 pszValue = pDescriptor->aLines[uStart] + cbKey;
1533 while (*pszValue == ' ' || *pszValue == '\t')
1534 pszValue++;
1535 if (*pszValue == '=')
1536 {
1537 *ppszValue = pszValue + 1;
1538 break;
1539 }
1540 }
1541 uStart = pDescriptor->aNextLines[uStart];
1542 }
1543 return !!uStart;
1544}
1545
1546static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1547 unsigned uStart,
1548 const char *pszKey, const char *pszValue)
1549{
1550 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1551 size_t cbKey = strlen(pszKey);
1552 unsigned uLast = 0;
1553
1554 while (uStart != 0)
1555 {
1556 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1557 {
1558 /* Key matches, check for a '=' (preceded by whitespace). */
1559 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1560 while (*pszTmp == ' ' || *pszTmp == '\t')
1561 pszTmp++;
1562 if (*pszTmp == '=')
1563 {
1564 pszTmp++;
1565 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1566 * bloat and potentially out of space error? */
1567 while (*pszTmp == ' ' || *pszTmp == '\t')
1568 pszTmp++;
1569 break;
1570 }
1571 }
1572 if (!pDescriptor->aNextLines[uStart])
1573 uLast = uStart;
1574 uStart = pDescriptor->aNextLines[uStart];
1575 }
1576 if (uStart)
1577 {
1578 if (pszValue)
1579 {
1580 /* Key already exists, replace existing value. */
1581 size_t cbOldVal = strlen(pszTmp);
1582 size_t cbNewVal = strlen(pszValue);
1583 ssize_t cbDiff = cbNewVal - cbOldVal;
1584 /* Check for buffer overflow. */
1585 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1586 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1587 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1588
1589 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1590 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1591 memcpy(pszTmp, pszValue, cbNewVal + 1);
1592 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1593 pDescriptor->aLines[i] += cbDiff;
1594 }
1595 else
1596 {
1597 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1598 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1599 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1600 {
1601 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1602 if (pDescriptor->aNextLines[i])
1603 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1604 else
1605 pDescriptor->aNextLines[i-1] = 0;
1606 }
1607 pDescriptor->cLines--;
1608 /* Adjust starting line numbers of following descriptor sections. */
1609 if (uStart < pDescriptor->uFirstExtent)
1610 pDescriptor->uFirstExtent--;
1611 if (uStart < pDescriptor->uFirstDDB)
1612 pDescriptor->uFirstDDB--;
1613 }
1614 }
1615 else
1616 {
1617 /* Key doesn't exist, append after the last entry in this category. */
1618 if (!pszValue)
1619 {
1620 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1621 return VINF_SUCCESS;
1622 }
1623 cbKey = strlen(pszKey);
1624 size_t cbValue = strlen(pszValue);
1625 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1626 /* Check for buffer overflow. */
1627 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1628 || ( pDescriptor->aLines[pDescriptor->cLines]
1629 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1630 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1631 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1632 {
1633 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1634 if (pDescriptor->aNextLines[i - 1])
1635 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1636 else
1637 pDescriptor->aNextLines[i] = 0;
1638 }
1639 uStart = uLast + 1;
1640 pDescriptor->aNextLines[uLast] = uStart;
1641 pDescriptor->aNextLines[uStart] = 0;
1642 pDescriptor->cLines++;
1643 pszTmp = pDescriptor->aLines[uStart];
1644 memmove(pszTmp + cbDiff, pszTmp,
1645 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1646 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1647 pDescriptor->aLines[uStart][cbKey] = '=';
1648 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1649 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1650 pDescriptor->aLines[i] += cbDiff;
1651
1652 /* Adjust starting line numbers of following descriptor sections. */
1653 if (uStart <= pDescriptor->uFirstExtent)
1654 pDescriptor->uFirstExtent++;
1655 if (uStart <= pDescriptor->uFirstDDB)
1656 pDescriptor->uFirstDDB++;
1657 }
1658 pDescriptor->fDirty = true;
1659 return VINF_SUCCESS;
1660}
1661
1662static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1663 uint32_t *puValue)
1664{
1665 const char *pszValue;
1666
1667 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1668 &pszValue))
1669 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1670 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1671}
1672
1673/**
1674 * Returns the value of the given key as a string allocating the necessary memory.
1675 *
1676 * @returns VBox status code.
1677 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1678 * @param pImage The VMDK image state.
1679 * @param pDescriptor The descriptor to fetch the value from.
1680 * @param pszKey The key to get the value from.
1681 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1682 * free.
1683 */
1684static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1685 const char *pszKey, char **ppszValue)
1686{
1687 const char *pszValue;
1688 char *pszValueUnquoted;
1689
1690 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1691 &pszValue))
1692 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1693 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1694 if (RT_FAILURE(rc))
1695 return rc;
1696 *ppszValue = pszValueUnquoted;
1697 return rc;
1698}
1699
1700static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1701 const char *pszKey, const char *pszValue)
1702{
1703 char *pszValueQuoted;
1704
1705 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1706 if (!pszValueQuoted)
1707 return VERR_NO_STR_MEMORY;
1708 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1709 pszValueQuoted);
1710 RTStrFree(pszValueQuoted);
1711 return rc;
1712}
1713
1714static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1715 PVMDKDESCRIPTOR pDescriptor)
1716{
1717 RT_NOREF1(pImage);
1718 unsigned uEntry = pDescriptor->uFirstExtent;
1719 ssize_t cbDiff;
1720
1721 if (!uEntry)
1722 return;
1723
1724 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1725 /* Move everything including \0 in the entry marking the end of buffer. */
1726 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1727 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1728 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1729 {
1730 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1731 if (pDescriptor->aNextLines[i])
1732 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1733 else
1734 pDescriptor->aNextLines[i - 1] = 0;
1735 }
1736 pDescriptor->cLines--;
1737 if (pDescriptor->uFirstDDB)
1738 pDescriptor->uFirstDDB--;
1739
1740 return;
1741}
1742
1743static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1744 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1745 VMDKETYPE enmType, const char *pszBasename,
1746 uint64_t uSectorOffset)
1747{
1748 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1749 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1750 char *pszTmp;
1751 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1752 char szExt[1024];
1753 ssize_t cbDiff;
1754
1755 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1756 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1757
1758 /* Find last entry in extent description. */
1759 while (uStart)
1760 {
1761 if (!pDescriptor->aNextLines[uStart])
1762 uLast = uStart;
1763 uStart = pDescriptor->aNextLines[uStart];
1764 }
1765
1766 if (enmType == VMDKETYPE_ZERO)
1767 {
1768 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1769 cNominalSectors, apszType[enmType]);
1770 }
1771 else if (enmType == VMDKETYPE_FLAT)
1772 {
1773 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1774 apszAccess[enmAccess], cNominalSectors,
1775 apszType[enmType], pszBasename, uSectorOffset);
1776 }
1777 else
1778 {
1779 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1780 apszAccess[enmAccess], cNominalSectors,
1781 apszType[enmType], pszBasename);
1782 }
1783 cbDiff = strlen(szExt) + 1;
1784
1785 /* Check for buffer overflow. */
1786 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1787 || ( pDescriptor->aLines[pDescriptor->cLines]
1788 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1789 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1790
1791 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1792 {
1793 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1794 if (pDescriptor->aNextLines[i - 1])
1795 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1796 else
1797 pDescriptor->aNextLines[i] = 0;
1798 }
1799 uStart = uLast + 1;
1800 pDescriptor->aNextLines[uLast] = uStart;
1801 pDescriptor->aNextLines[uStart] = 0;
1802 pDescriptor->cLines++;
1803 pszTmp = pDescriptor->aLines[uStart];
1804 memmove(pszTmp + cbDiff, pszTmp,
1805 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1806 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1807 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1808 pDescriptor->aLines[i] += cbDiff;
1809
1810 /* Adjust starting line numbers of following descriptor sections. */
1811 if (uStart <= pDescriptor->uFirstDDB)
1812 pDescriptor->uFirstDDB++;
1813
1814 pDescriptor->fDirty = true;
1815 return VINF_SUCCESS;
1816}
1817
1818/**
1819 * Returns the value of the given key from the DDB as a string allocating
1820 * the necessary memory.
1821 *
1822 * @returns VBox status code.
1823 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1824 * @param pImage The VMDK image state.
1825 * @param pDescriptor The descriptor to fetch the value from.
1826 * @param pszKey The key to get the value from.
1827 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1828 * free.
1829 */
1830static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1831 const char *pszKey, char **ppszValue)
1832{
1833 const char *pszValue;
1834 char *pszValueUnquoted;
1835
1836 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1837 &pszValue))
1838 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1839 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1840 if (RT_FAILURE(rc))
1841 return rc;
1842 *ppszValue = pszValueUnquoted;
1843 return rc;
1844}
1845
1846static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1847 const char *pszKey, uint32_t *puValue)
1848{
1849 const char *pszValue;
1850 char *pszValueUnquoted;
1851
1852 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1853 &pszValue))
1854 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1855 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1856 if (RT_FAILURE(rc))
1857 return rc;
1858 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1859 RTMemTmpFree(pszValueUnquoted);
1860 return rc;
1861}
1862
1863static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1864 const char *pszKey, PRTUUID pUuid)
1865{
1866 const char *pszValue;
1867 char *pszValueUnquoted;
1868
1869 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1870 &pszValue))
1871 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1872 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1873 if (RT_FAILURE(rc))
1874 return rc;
1875 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1876 RTMemTmpFree(pszValueUnquoted);
1877 return rc;
1878}
1879
1880static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1881 const char *pszKey, const char *pszVal)
1882{
1883 int rc;
1884 char *pszValQuoted;
1885
1886 if (pszVal)
1887 {
1888 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1889 if (!pszValQuoted)
1890 return VERR_NO_STR_MEMORY;
1891 }
1892 else
1893 pszValQuoted = NULL;
1894 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1895 pszValQuoted);
1896 if (pszValQuoted)
1897 RTStrFree(pszValQuoted);
1898 return rc;
1899}
1900
1901static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1902 const char *pszKey, PCRTUUID pUuid)
1903{
1904 char *pszUuid;
1905
1906 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1907 if (!pszUuid)
1908 return VERR_NO_STR_MEMORY;
1909 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1910 pszUuid);
1911 RTStrFree(pszUuid);
1912 return rc;
1913}
1914
1915static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1916 const char *pszKey, uint32_t uValue)
1917{
1918 char *pszValue;
1919
1920 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1921 if (!pszValue)
1922 return VERR_NO_STR_MEMORY;
1923 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1924 pszValue);
1925 RTStrFree(pszValue);
1926 return rc;
1927}
1928
1929/**
1930 * Splits the descriptor data into individual lines checking for correct line
1931 * endings and descriptor size.
1932 *
1933 * @returns VBox status code.
1934 * @param pImage The image instance.
1935 * @param pDesc The descriptor.
1936 * @param pszTmp The raw descriptor data from the image.
1937 */
1938static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1939{
1940 unsigned cLine = 0;
1941 int rc = VINF_SUCCESS;
1942
1943 while ( RT_SUCCESS(rc)
1944 && *pszTmp != '\0')
1945 {
1946 pDesc->aLines[cLine++] = pszTmp;
1947 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1948 {
1949 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1950 rc = VERR_VD_VMDK_INVALID_HEADER;
1951 break;
1952 }
1953
1954 while (*pszTmp != '\0' && *pszTmp != '\n')
1955 {
1956 if (*pszTmp == '\r')
1957 {
1958 if (*(pszTmp + 1) != '\n')
1959 {
1960 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1961 break;
1962 }
1963 else
1964 {
1965 /* Get rid of CR character. */
1966 *pszTmp = '\0';
1967 }
1968 }
1969 pszTmp++;
1970 }
1971
1972 if (RT_FAILURE(rc))
1973 break;
1974
1975 /* Get rid of LF character. */
1976 if (*pszTmp == '\n')
1977 {
1978 *pszTmp = '\0';
1979 pszTmp++;
1980 }
1981 }
1982
1983 if (RT_SUCCESS(rc))
1984 {
1985 pDesc->cLines = cLine;
1986 /* Pointer right after the end of the used part of the buffer. */
1987 pDesc->aLines[cLine] = pszTmp;
1988 }
1989
1990 return rc;
1991}
1992
1993static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1994 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1995{
1996 pDescriptor->cbDescAlloc = cbDescData;
1997 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1998 if (RT_SUCCESS(rc))
1999 {
2000 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2001 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2002 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2003 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2004 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2005 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2006 else
2007 {
2008 unsigned uLastNonEmptyLine = 0;
2009
2010 /* Initialize those, because we need to be able to reopen an image. */
2011 pDescriptor->uFirstDesc = 0;
2012 pDescriptor->uFirstExtent = 0;
2013 pDescriptor->uFirstDDB = 0;
2014 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2015 {
2016 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2017 {
2018 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2019 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2020 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2021 {
2022 /* An extent descriptor. */
2023 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2024 {
2025 /* Incorrect ordering of entries. */
2026 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2027 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2028 break;
2029 }
2030 if (!pDescriptor->uFirstExtent)
2031 {
2032 pDescriptor->uFirstExtent = i;
2033 uLastNonEmptyLine = 0;
2034 }
2035 }
2036 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2037 {
2038 /* A disk database entry. */
2039 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2040 {
2041 /* Incorrect ordering of entries. */
2042 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2043 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2044 break;
2045 }
2046 if (!pDescriptor->uFirstDDB)
2047 {
2048 pDescriptor->uFirstDDB = i;
2049 uLastNonEmptyLine = 0;
2050 }
2051 }
2052 else
2053 {
2054 /* A normal entry. */
2055 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2056 {
2057 /* Incorrect ordering of entries. */
2058 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2059 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2060 break;
2061 }
2062 if (!pDescriptor->uFirstDesc)
2063 {
2064 pDescriptor->uFirstDesc = i;
2065 uLastNonEmptyLine = 0;
2066 }
2067 }
2068 if (uLastNonEmptyLine)
2069 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2070 uLastNonEmptyLine = i;
2071 }
2072 }
2073 }
2074 }
2075
2076 return rc;
2077}
2078
2079static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2080 PCVDGEOMETRY pPCHSGeometry)
2081{
2082 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2083 VMDK_DDB_GEO_PCHS_CYLINDERS,
2084 pPCHSGeometry->cCylinders);
2085 if (RT_FAILURE(rc))
2086 return rc;
2087 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2088 VMDK_DDB_GEO_PCHS_HEADS,
2089 pPCHSGeometry->cHeads);
2090 if (RT_FAILURE(rc))
2091 return rc;
2092 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2093 VMDK_DDB_GEO_PCHS_SECTORS,
2094 pPCHSGeometry->cSectors);
2095 return rc;
2096}
2097
2098static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2099 PCVDGEOMETRY pLCHSGeometry)
2100{
2101 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2102 VMDK_DDB_GEO_LCHS_CYLINDERS,
2103 pLCHSGeometry->cCylinders);
2104 if (RT_FAILURE(rc))
2105 return rc;
2106 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2107 VMDK_DDB_GEO_LCHS_HEADS,
2108
2109 pLCHSGeometry->cHeads);
2110 if (RT_FAILURE(rc))
2111 return rc;
2112 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2113 VMDK_DDB_GEO_LCHS_SECTORS,
2114 pLCHSGeometry->cSectors);
2115 return rc;
2116}
2117
2118static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2119 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2120{
2121 pDescriptor->uFirstDesc = 0;
2122 pDescriptor->uFirstExtent = 0;
2123 pDescriptor->uFirstDDB = 0;
2124 pDescriptor->cLines = 0;
2125 pDescriptor->cbDescAlloc = cbDescData;
2126 pDescriptor->fDirty = false;
2127 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2128 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2129
2130 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2131 if (RT_SUCCESS(rc))
2132 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2133 if (RT_SUCCESS(rc))
2134 {
2135 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2136 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2137 }
2138 if (RT_SUCCESS(rc))
2139 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2140 if (RT_SUCCESS(rc))
2141 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2142 if (RT_SUCCESS(rc))
2143 {
2144 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2145 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2146 }
2147 if (RT_SUCCESS(rc))
2148 {
2149 /* The trailing space is created by VMware, too. */
2150 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2151 }
2152 if (RT_SUCCESS(rc))
2153 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2154 if (RT_SUCCESS(rc))
2155 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2156 if (RT_SUCCESS(rc))
2157 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2158 if (RT_SUCCESS(rc))
2159 {
2160 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2161
2162 /* Now that the framework is in place, use the normal functions to insert
2163 * the remaining keys. */
2164 char szBuf[9];
2165 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2166 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2167 "CID", szBuf);
2168 }
2169 if (RT_SUCCESS(rc))
2170 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2171 "parentCID", "ffffffff");
2172 if (RT_SUCCESS(rc))
2173 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2174
2175 return rc;
2176}
2177
2178static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2179{
2180 int rc;
2181 unsigned cExtents;
2182 unsigned uLine;
2183 unsigned i;
2184
2185 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2186 &pImage->Descriptor);
2187 if (RT_FAILURE(rc))
2188 return rc;
2189
2190 /* Check version, must be 1. */
2191 uint32_t uVersion;
2192 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2193 if (RT_FAILURE(rc))
2194 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2195 if (uVersion != 1)
2196 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2197
2198 /* Get image creation type and determine image flags. */
2199 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2200 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2201 &pszCreateType);
2202 if (RT_FAILURE(rc))
2203 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2204 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2205 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2206 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2207 else if ( !strcmp(pszCreateType, "partitionedDevice")
2208 || !strcmp(pszCreateType, "fullDevice"))
2209 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2210 else if (!strcmp(pszCreateType, "streamOptimized"))
2211 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2212 else if (!strcmp(pszCreateType, "vmfs"))
2213 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2214 RTMemTmpFree(pszCreateType);
2215
2216 /* Count the number of extent config entries. */
2217 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2218 uLine != 0;
2219 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2220 /* nothing */;
2221
2222 if (!pImage->pDescData && cExtents != 1)
2223 {
2224 /* Monolithic image, must have only one extent (already opened). */
2225 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2226 }
2227
2228 if (pImage->pDescData)
2229 {
2230 /* Non-monolithic image, extents need to be allocated. */
2231 rc = vmdkCreateExtents(pImage, cExtents);
2232 if (RT_FAILURE(rc))
2233 return rc;
2234 }
2235
2236 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2237 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2238 {
2239 char *pszLine = pImage->Descriptor.aLines[uLine];
2240
2241 /* Access type of the extent. */
2242 if (!strncmp(pszLine, "RW", 2))
2243 {
2244 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2245 pszLine += 2;
2246 }
2247 else if (!strncmp(pszLine, "RDONLY", 6))
2248 {
2249 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2250 pszLine += 6;
2251 }
2252 else if (!strncmp(pszLine, "NOACCESS", 8))
2253 {
2254 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2255 pszLine += 8;
2256 }
2257 else
2258 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2259 if (*pszLine++ != ' ')
2260 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2261
2262 /* Nominal size of the extent. */
2263 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2264 &pImage->pExtents[i].cNominalSectors);
2265 if (RT_FAILURE(rc))
2266 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2267 if (*pszLine++ != ' ')
2268 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2269
2270 /* Type of the extent. */
2271 if (!strncmp(pszLine, "SPARSE", 6))
2272 {
2273 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2274 pszLine += 6;
2275 }
2276 else if (!strncmp(pszLine, "FLAT", 4))
2277 {
2278 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2279 pszLine += 4;
2280 }
2281 else if (!strncmp(pszLine, "ZERO", 4))
2282 {
2283 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2284 pszLine += 4;
2285 }
2286 else if (!strncmp(pszLine, "VMFS", 4))
2287 {
2288 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2289 pszLine += 4;
2290 }
2291 else
2292 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2293
2294 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2295 {
2296 /* This one has no basename or offset. */
2297 if (*pszLine == ' ')
2298 pszLine++;
2299 if (*pszLine != '\0')
2300 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2301 pImage->pExtents[i].pszBasename = NULL;
2302 }
2303 else
2304 {
2305 /* All other extent types have basename and optional offset. */
2306 if (*pszLine++ != ' ')
2307 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2308
2309 /* Basename of the image. Surrounded by quotes. */
2310 char *pszBasename;
2311 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2312 if (RT_FAILURE(rc))
2313 return rc;
2314 pImage->pExtents[i].pszBasename = pszBasename;
2315 if (*pszLine == ' ')
2316 {
2317 pszLine++;
2318 if (*pszLine != '\0')
2319 {
2320 /* Optional offset in extent specified. */
2321 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2322 &pImage->pExtents[i].uSectorOffset);
2323 if (RT_FAILURE(rc))
2324 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2325 }
2326 }
2327
2328 if (*pszLine != '\0')
2329 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2330 }
2331 }
2332
2333 /* Determine PCHS geometry (autogenerate if necessary). */
2334 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2335 VMDK_DDB_GEO_PCHS_CYLINDERS,
2336 &pImage->PCHSGeometry.cCylinders);
2337 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2338 pImage->PCHSGeometry.cCylinders = 0;
2339 else if (RT_FAILURE(rc))
2340 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2341 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2342 VMDK_DDB_GEO_PCHS_HEADS,
2343 &pImage->PCHSGeometry.cHeads);
2344 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2345 pImage->PCHSGeometry.cHeads = 0;
2346 else if (RT_FAILURE(rc))
2347 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2348 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2349 VMDK_DDB_GEO_PCHS_SECTORS,
2350 &pImage->PCHSGeometry.cSectors);
2351 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2352 pImage->PCHSGeometry.cSectors = 0;
2353 else if (RT_FAILURE(rc))
2354 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2355 if ( pImage->PCHSGeometry.cCylinders == 0
2356 || pImage->PCHSGeometry.cHeads == 0
2357 || pImage->PCHSGeometry.cHeads > 16
2358 || pImage->PCHSGeometry.cSectors == 0
2359 || pImage->PCHSGeometry.cSectors > 63)
2360 {
2361 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2362 * as the total image size isn't known yet). */
2363 pImage->PCHSGeometry.cCylinders = 0;
2364 pImage->PCHSGeometry.cHeads = 16;
2365 pImage->PCHSGeometry.cSectors = 63;
2366 }
2367
2368 /* Determine LCHS geometry (set to 0 if not specified). */
2369 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2370 VMDK_DDB_GEO_LCHS_CYLINDERS,
2371 &pImage->LCHSGeometry.cCylinders);
2372 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2373 pImage->LCHSGeometry.cCylinders = 0;
2374 else if (RT_FAILURE(rc))
2375 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2376 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2377 VMDK_DDB_GEO_LCHS_HEADS,
2378 &pImage->LCHSGeometry.cHeads);
2379 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2380 pImage->LCHSGeometry.cHeads = 0;
2381 else if (RT_FAILURE(rc))
2382 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2383 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2384 VMDK_DDB_GEO_LCHS_SECTORS,
2385 &pImage->LCHSGeometry.cSectors);
2386 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2387 pImage->LCHSGeometry.cSectors = 0;
2388 else if (RT_FAILURE(rc))
2389 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2390 if ( pImage->LCHSGeometry.cCylinders == 0
2391 || pImage->LCHSGeometry.cHeads == 0
2392 || pImage->LCHSGeometry.cSectors == 0)
2393 {
2394 pImage->LCHSGeometry.cCylinders = 0;
2395 pImage->LCHSGeometry.cHeads = 0;
2396 pImage->LCHSGeometry.cSectors = 0;
2397 }
2398
2399 /* Get image UUID. */
2400 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2401 &pImage->ImageUuid);
2402 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2403 {
2404 /* Image without UUID. Probably created by VMware and not yet used
2405 * by VirtualBox. Can only be added for images opened in read/write
2406 * mode, so don't bother producing a sensible UUID otherwise. */
2407 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2408 RTUuidClear(&pImage->ImageUuid);
2409 else
2410 {
2411 rc = RTUuidCreate(&pImage->ImageUuid);
2412 if (RT_FAILURE(rc))
2413 return rc;
2414 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2415 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2416 if (RT_FAILURE(rc))
2417 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2418 }
2419 }
2420 else if (RT_FAILURE(rc))
2421 return rc;
2422
2423 /* Get image modification UUID. */
2424 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2425 VMDK_DDB_MODIFICATION_UUID,
2426 &pImage->ModificationUuid);
2427 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2428 {
2429 /* Image without UUID. Probably created by VMware and not yet used
2430 * by VirtualBox. Can only be added for images opened in read/write
2431 * mode, so don't bother producing a sensible UUID otherwise. */
2432 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2433 RTUuidClear(&pImage->ModificationUuid);
2434 else
2435 {
2436 rc = RTUuidCreate(&pImage->ModificationUuid);
2437 if (RT_FAILURE(rc))
2438 return rc;
2439 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2440 VMDK_DDB_MODIFICATION_UUID,
2441 &pImage->ModificationUuid);
2442 if (RT_FAILURE(rc))
2443 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2444 }
2445 }
2446 else if (RT_FAILURE(rc))
2447 return rc;
2448
2449 /* Get UUID of parent image. */
2450 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2451 &pImage->ParentUuid);
2452 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2453 {
2454 /* Image without UUID. Probably created by VMware and not yet used
2455 * by VirtualBox. Can only be added for images opened in read/write
2456 * mode, so don't bother producing a sensible UUID otherwise. */
2457 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2458 RTUuidClear(&pImage->ParentUuid);
2459 else
2460 {
2461 rc = RTUuidClear(&pImage->ParentUuid);
2462 if (RT_FAILURE(rc))
2463 return rc;
2464 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2465 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2466 if (RT_FAILURE(rc))
2467 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2468 }
2469 }
2470 else if (RT_FAILURE(rc))
2471 return rc;
2472
2473 /* Get parent image modification UUID. */
2474 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2475 VMDK_DDB_PARENT_MODIFICATION_UUID,
2476 &pImage->ParentModificationUuid);
2477 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2478 {
2479 /* Image without UUID. Probably created by VMware and not yet used
2480 * by VirtualBox. Can only be added for images opened in read/write
2481 * mode, so don't bother producing a sensible UUID otherwise. */
2482 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2483 RTUuidClear(&pImage->ParentModificationUuid);
2484 else
2485 {
2486 RTUuidClear(&pImage->ParentModificationUuid);
2487 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2488 VMDK_DDB_PARENT_MODIFICATION_UUID,
2489 &pImage->ParentModificationUuid);
2490 if (RT_FAILURE(rc))
2491 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2492 }
2493 }
2494 else if (RT_FAILURE(rc))
2495 return rc;
2496
2497 return VINF_SUCCESS;
2498}
2499
2500/**
2501 * Internal : Prepares the descriptor to write to the image.
2502 */
2503static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2504 void **ppvData, size_t *pcbData)
2505{
2506 int rc = VINF_SUCCESS;
2507
2508 /*
2509 * Allocate temporary descriptor buffer.
2510 * In case there is no limit allocate a default
2511 * and increase if required.
2512 */
2513 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2514 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2515 size_t offDescriptor = 0;
2516
2517 if (!pszDescriptor)
2518 return VERR_NO_MEMORY;
2519
2520 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2521 {
2522 const char *psz = pImage->Descriptor.aLines[i];
2523 size_t cb = strlen(psz);
2524
2525 /*
2526 * Increase the descriptor if there is no limit and
2527 * there is not enough room left for this line.
2528 */
2529 if (offDescriptor + cb + 1 > cbDescriptor)
2530 {
2531 if (cbLimit)
2532 {
2533 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2534 break;
2535 }
2536 else
2537 {
2538 char *pszDescriptorNew = NULL;
2539 LogFlow(("Increasing descriptor cache\n"));
2540
2541 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2542 if (!pszDescriptorNew)
2543 {
2544 rc = VERR_NO_MEMORY;
2545 break;
2546 }
2547 pszDescriptor = pszDescriptorNew;
2548 cbDescriptor += cb + 4 * _1K;
2549 }
2550 }
2551
2552 if (cb > 0)
2553 {
2554 memcpy(pszDescriptor + offDescriptor, psz, cb);
2555 offDescriptor += cb;
2556 }
2557
2558 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2559 offDescriptor++;
2560 }
2561
2562 if (RT_SUCCESS(rc))
2563 {
2564 *ppvData = pszDescriptor;
2565 *pcbData = offDescriptor;
2566 }
2567 else if (pszDescriptor)
2568 RTMemFree(pszDescriptor);
2569
2570 return rc;
2571}
2572
2573/**
2574 * Internal: write/update the descriptor part of the image.
2575 */
2576static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2577{
2578 int rc = VINF_SUCCESS;
2579 uint64_t cbLimit;
2580 uint64_t uOffset;
2581 PVMDKFILE pDescFile;
2582 void *pvDescriptor = NULL;
2583 size_t cbDescriptor;
2584
2585 if (pImage->pDescData)
2586 {
2587 /* Separate descriptor file. */
2588 uOffset = 0;
2589 cbLimit = 0;
2590 pDescFile = pImage->pFile;
2591 }
2592 else
2593 {
2594 /* Embedded descriptor file. */
2595 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2596 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2597 pDescFile = pImage->pExtents[0].pFile;
2598 }
2599 /* Bail out if there is no file to write to. */
2600 if (pDescFile == NULL)
2601 return VERR_INVALID_PARAMETER;
2602
2603 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2604 if (RT_SUCCESS(rc))
2605 {
2606 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2607 uOffset, pvDescriptor,
2608 cbLimit ? cbLimit : cbDescriptor,
2609 pIoCtx, NULL, NULL);
2610 if ( RT_FAILURE(rc)
2611 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2612 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2613 }
2614
2615 if (RT_SUCCESS(rc) && !cbLimit)
2616 {
2617 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2618 if (RT_FAILURE(rc))
2619 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2620 }
2621
2622 if (RT_SUCCESS(rc))
2623 pImage->Descriptor.fDirty = false;
2624
2625 if (pvDescriptor)
2626 RTMemFree(pvDescriptor);
2627 return rc;
2628
2629}
2630
2631/**
2632 * Internal: validate the consistency check values in a binary header.
2633 */
2634static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2635{
2636 int rc = VINF_SUCCESS;
2637 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2638 {
2639 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2640 return rc;
2641 }
2642 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2643 {
2644 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2645 return rc;
2646 }
2647 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2648 && ( pHeader->singleEndLineChar != '\n'
2649 || pHeader->nonEndLineChar != ' '
2650 || pHeader->doubleEndLineChar1 != '\r'
2651 || pHeader->doubleEndLineChar2 != '\n') )
2652 {
2653 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2654 return rc;
2655 }
2656 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2657 {
2658 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2659 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2660 return rc;
2661 }
2662 return rc;
2663}
2664
2665/**
2666 * Internal: read metadata belonging to an extent with binary header, i.e.
2667 * as found in monolithic files.
2668 */
2669static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2670 bool fMagicAlreadyRead)
2671{
2672 SparseExtentHeader Header;
2673 int rc;
2674
2675 if (!fMagicAlreadyRead)
2676 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2677 &Header, sizeof(Header));
2678 else
2679 {
2680 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2681 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2682 RT_UOFFSETOF(SparseExtentHeader, version),
2683 &Header.version,
2684 sizeof(Header)
2685 - RT_UOFFSETOF(SparseExtentHeader, version));
2686 }
2687
2688 if (RT_SUCCESS(rc))
2689 {
2690 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2691 if (RT_SUCCESS(rc))
2692 {
2693 uint64_t cbFile = 0;
2694
2695 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2696 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2697 pExtent->fFooter = true;
2698
2699 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2700 || ( pExtent->fFooter
2701 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2702 {
2703 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2704 if (RT_FAILURE(rc))
2705 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2706 }
2707
2708 if (RT_SUCCESS(rc))
2709 {
2710 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2711 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2712
2713 if ( pExtent->fFooter
2714 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2715 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2716 {
2717 /* Read the footer, which comes before the end-of-stream marker. */
2718 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2719 cbFile - 2*512, &Header,
2720 sizeof(Header));
2721 if (RT_FAILURE(rc))
2722 {
2723 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2724 rc = VERR_VD_VMDK_INVALID_HEADER;
2725 }
2726
2727 if (RT_SUCCESS(rc))
2728 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2729 /* Prohibit any writes to this extent. */
2730 pExtent->uAppendPosition = 0;
2731 }
2732
2733 if (RT_SUCCESS(rc))
2734 {
2735 pExtent->uVersion = RT_LE2H_U32(Header.version);
2736 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2737 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2738 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2739 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2740 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2741 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2742 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2743 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2744 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2745 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2746 {
2747 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2748 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2749 }
2750 else
2751 {
2752 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2753 pExtent->uSectorRGD = 0;
2754 }
2755
2756 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2757 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2758 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2759
2760 if ( RT_SUCCESS(rc)
2761 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2762 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2763 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2764 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2765 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2766 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2767
2768 if (RT_SUCCESS(rc))
2769 {
2770 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2771 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2772 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2773 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2774 else
2775 {
2776 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2777 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2778
2779 /* Fix up the number of descriptor sectors, as some flat images have
2780 * really just one, and this causes failures when inserting the UUID
2781 * values and other extra information. */
2782 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2783 {
2784 /* Do it the easy way - just fix it for flat images which have no
2785 * other complicated metadata which needs space too. */
2786 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2787 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2788 pExtent->cDescriptorSectors = 4;
2789 }
2790 }
2791 }
2792 }
2793 }
2794 }
2795 }
2796 else
2797 {
2798 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2799 rc = VERR_VD_VMDK_INVALID_HEADER;
2800 }
2801
2802 if (RT_FAILURE(rc))
2803 vmdkFreeExtentData(pImage, pExtent, false);
2804
2805 return rc;
2806}
2807
2808/**
2809 * Internal: read additional metadata belonging to an extent. For those
2810 * extents which have no additional metadata just verify the information.
2811 */
2812static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2813{
2814 int rc = VINF_SUCCESS;
2815
2816/* disabled the check as there are too many truncated vmdk images out there */
2817#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2818 uint64_t cbExtentSize;
2819 /* The image must be a multiple of a sector in size and contain the data
2820 * area (flat images only). If not, it means the image is at least
2821 * truncated, or even seriously garbled. */
2822 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2823 if (RT_FAILURE(rc))
2824 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2825 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2826 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2827 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2828 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2829#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2830 if ( RT_SUCCESS(rc)
2831 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2832 {
2833 /* The spec says that this must be a power of two and greater than 8,
2834 * but probably they meant not less than 8. */
2835 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2836 || pExtent->cSectorsPerGrain < 8)
2837 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2838 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2839 else
2840 {
2841 /* This code requires that a grain table must hold a power of two multiple
2842 * of the number of entries per GT cache entry. */
2843 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2844 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2845 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2846 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2847 else
2848 {
2849 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2850 if (RT_SUCCESS(rc))
2851 {
2852 /* Prohibit any writes to this streamOptimized extent. */
2853 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2854 pExtent->uAppendPosition = 0;
2855
2856 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2857 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2858 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2859 rc = vmdkReadGrainDirectory(pImage, pExtent);
2860 else
2861 {
2862 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2863 pExtent->cbGrainStreamRead = 0;
2864 }
2865 }
2866 }
2867 }
2868 }
2869
2870 if (RT_FAILURE(rc))
2871 vmdkFreeExtentData(pImage, pExtent, false);
2872
2873 return rc;
2874}
2875
2876/**
2877 * Internal: write/update the metadata for a sparse extent.
2878 */
2879static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2880 uint64_t uOffset, PVDIOCTX pIoCtx)
2881{
2882 SparseExtentHeader Header;
2883
2884 memset(&Header, '\0', sizeof(Header));
2885 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2886 Header.version = RT_H2LE_U32(pExtent->uVersion);
2887 Header.flags = RT_H2LE_U32(RT_BIT(0));
2888 if (pExtent->pRGD)
2889 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2890 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2891 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2892 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2893 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2894 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2895 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2896 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2897 if (pExtent->fFooter && uOffset == 0)
2898 {
2899 if (pExtent->pRGD)
2900 {
2901 Assert(pExtent->uSectorRGD);
2902 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2903 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2904 }
2905 else
2906 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2907 }
2908 else
2909 {
2910 if (pExtent->pRGD)
2911 {
2912 Assert(pExtent->uSectorRGD);
2913 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2914 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2915 }
2916 else
2917 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2918 }
2919 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2920 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2921 Header.singleEndLineChar = '\n';
2922 Header.nonEndLineChar = ' ';
2923 Header.doubleEndLineChar1 = '\r';
2924 Header.doubleEndLineChar2 = '\n';
2925 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2926
2927 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2928 uOffset, &Header, sizeof(Header),
2929 pIoCtx, NULL, NULL);
2930 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2931 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2932 return rc;
2933}
2934
2935/**
2936 * Internal: free the buffers used for streamOptimized images.
2937 */
2938static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2939{
2940 if (pExtent->pvCompGrain)
2941 {
2942 RTMemFree(pExtent->pvCompGrain);
2943 pExtent->pvCompGrain = NULL;
2944 }
2945 if (pExtent->pvGrain)
2946 {
2947 RTMemFree(pExtent->pvGrain);
2948 pExtent->pvGrain = NULL;
2949 }
2950}
2951
2952/**
2953 * Internal: free the memory used by the extent data structure, optionally
2954 * deleting the referenced files.
2955 *
2956 * @returns VBox status code.
2957 * @param pImage Pointer to the image instance data.
2958 * @param pExtent The extent to free.
2959 * @param fDelete Flag whether to delete the backing storage.
2960 */
2961static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2962 bool fDelete)
2963{
2964 int rc = VINF_SUCCESS;
2965
2966 vmdkFreeGrainDirectory(pExtent);
2967 if (pExtent->pDescData)
2968 {
2969 RTMemFree(pExtent->pDescData);
2970 pExtent->pDescData = NULL;
2971 }
2972 if (pExtent->pFile != NULL)
2973 {
2974 /* Do not delete raw extents, these have full and base names equal. */
2975 rc = vmdkFileClose(pImage, &pExtent->pFile,
2976 fDelete
2977 && pExtent->pszFullname
2978 && pExtent->pszBasename
2979 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2980 }
2981 if (pExtent->pszBasename)
2982 {
2983 RTMemTmpFree((void *)pExtent->pszBasename);
2984 pExtent->pszBasename = NULL;
2985 }
2986 if (pExtent->pszFullname)
2987 {
2988 RTStrFree((char *)(void *)pExtent->pszFullname);
2989 pExtent->pszFullname = NULL;
2990 }
2991 vmdkFreeStreamBuffers(pExtent);
2992
2993 return rc;
2994}
2995
2996/**
2997 * Internal: allocate grain table cache if necessary for this image.
2998 */
2999static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3000{
3001 PVMDKEXTENT pExtent;
3002
3003 /* Allocate grain table cache if any sparse extent is present. */
3004 for (unsigned i = 0; i < pImage->cExtents; i++)
3005 {
3006 pExtent = &pImage->pExtents[i];
3007 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3008 {
3009 /* Allocate grain table cache. */
3010 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3011 if (!pImage->pGTCache)
3012 return VERR_NO_MEMORY;
3013 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3014 {
3015 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3016 pGCE->uExtent = UINT32_MAX;
3017 }
3018 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3019 break;
3020 }
3021 }
3022
3023 return VINF_SUCCESS;
3024}
3025
3026/**
3027 * Internal: allocate the given number of extents.
3028 */
3029static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3030{
3031 int rc = VINF_SUCCESS;
3032 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3033 if (pExtents)
3034 {
3035 for (unsigned i = 0; i < cExtents; i++)
3036 {
3037 pExtents[i].pFile = NULL;
3038 pExtents[i].pszBasename = NULL;
3039 pExtents[i].pszFullname = NULL;
3040 pExtents[i].pGD = NULL;
3041 pExtents[i].pRGD = NULL;
3042 pExtents[i].pDescData = NULL;
3043 pExtents[i].uVersion = 1;
3044 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3045 pExtents[i].uExtent = i;
3046 pExtents[i].pImage = pImage;
3047 }
3048 pImage->pExtents = pExtents;
3049 pImage->cExtents = cExtents;
3050 }
3051 else
3052 rc = VERR_NO_MEMORY;
3053
3054 return rc;
3055}
3056
3057/**
3058 * Reads and processes the descriptor embedded in sparse images.
3059 *
3060 * @returns VBox status code.
3061 * @param pImage VMDK image instance.
3062 * @param pFile The sparse file handle.
3063 */
3064static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3065{
3066 /* It's a hosted single-extent image. */
3067 int rc = vmdkCreateExtents(pImage, 1);
3068 if (RT_SUCCESS(rc))
3069 {
3070 /* The opened file is passed to the extent. No separate descriptor
3071 * file, so no need to keep anything open for the image. */
3072 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3073 pExtent->pFile = pFile;
3074 pImage->pFile = NULL;
3075 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3076 if (RT_LIKELY(pExtent->pszFullname))
3077 {
3078 /* As we're dealing with a monolithic image here, there must
3079 * be a descriptor embedded in the image file. */
3080 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3081 if ( RT_SUCCESS(rc)
3082 && pExtent->uDescriptorSector
3083 && pExtent->cDescriptorSectors)
3084 {
3085 /* HACK: extend the descriptor if it is unusually small and it fits in
3086 * the unused space after the image header. Allows opening VMDK files
3087 * with extremely small descriptor in read/write mode.
3088 *
3089 * The previous version introduced a possible regression for VMDK stream
3090 * optimized images from VMware which tend to have only a single sector sized
3091 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3092 * entries required to make it work with VBox but for stream optimized images
3093 * the updated binary header wasn't written to the disk creating a mismatch
3094 * between advertised and real descriptor size.
3095 *
3096 * The descriptor size will be increased even if opened readonly now if there
3097 * enough room but the new value will not be written back to the image.
3098 */
3099 if ( pExtent->cDescriptorSectors < 3
3100 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3101 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3102 {
3103 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3104
3105 pExtent->cDescriptorSectors = 4;
3106 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3107 {
3108 /*
3109 * Update the on disk number now to make sure we don't introduce inconsistencies
3110 * in case of stream optimized images from VMware where the descriptor is just
3111 * one sector big (the binary header is not written to disk for complete
3112 * stream optimized images in vmdkFlushImage()).
3113 */
3114 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3115 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3116 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3117 &u64DescSizeNew, sizeof(u64DescSizeNew));
3118 if (RT_FAILURE(rc))
3119 {
3120 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3121 /* Restore the old size and carry on. */
3122 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3123 }
3124 }
3125 }
3126 /* Read the descriptor from the extent. */
3127 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3128 if (RT_LIKELY(pExtent->pDescData))
3129 {
3130 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3131 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3132 pExtent->pDescData,
3133 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3134 if (RT_SUCCESS(rc))
3135 {
3136 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3137 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3138 if ( RT_SUCCESS(rc)
3139 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3140 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3141 {
3142 rc = vmdkReadMetaExtent(pImage, pExtent);
3143 if (RT_SUCCESS(rc))
3144 {
3145 /* Mark the extent as unclean if opened in read-write mode. */
3146 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3147 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3148 {
3149 pExtent->fUncleanShutdown = true;
3150 pExtent->fMetaDirty = true;
3151 }
3152 }
3153 }
3154 else if (RT_SUCCESS(rc))
3155 rc = VERR_NOT_SUPPORTED;
3156 }
3157 else
3158 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3159 }
3160 else
3161 rc = VERR_NO_MEMORY;
3162 }
3163 else if (RT_SUCCESS(rc))
3164 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3165 }
3166 else
3167 rc = VERR_NO_MEMORY;
3168 }
3169
3170 return rc;
3171}
3172
3173/**
3174 * Reads the descriptor from a pure text file.
3175 *
3176 * @returns VBox status code.
3177 * @param pImage VMDK image instance.
3178 * @param pFile The descriptor file handle.
3179 */
3180static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3181{
3182 /* Allocate at least 10K, and make sure that there is 5K free space
3183 * in case new entries need to be added to the descriptor. Never
3184 * allocate more than 128K, because that's no valid descriptor file
3185 * and will result in the correct "truncated read" error handling. */
3186 uint64_t cbFileSize;
3187 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3188 if ( RT_SUCCESS(rc)
3189 && cbFileSize >= 50)
3190 {
3191 uint64_t cbSize = cbFileSize;
3192 if (cbSize % VMDK_SECTOR2BYTE(10))
3193 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3194 else
3195 cbSize += VMDK_SECTOR2BYTE(10);
3196 cbSize = RT_MIN(cbSize, _128K);
3197 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3198 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3199 if (RT_LIKELY(pImage->pDescData))
3200 {
3201 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3202 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3203 if (RT_SUCCESS(rc))
3204 {
3205#if 0 /** @todo Revisit */
3206 cbRead += sizeof(u32Magic);
3207 if (cbRead == pImage->cbDescAlloc)
3208 {
3209 /* Likely the read is truncated. Better fail a bit too early
3210 * (normally the descriptor is much smaller than our buffer). */
3211 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3212 goto out;
3213 }
3214#endif
3215 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3216 pImage->cbDescAlloc);
3217 if (RT_SUCCESS(rc))
3218 {
3219 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3220 {
3221 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3222 if (pExtent->pszBasename)
3223 {
3224 /* Hack to figure out whether the specified name in the
3225 * extent descriptor is absolute. Doesn't always work, but
3226 * should be good enough for now. */
3227 char *pszFullname;
3228 /** @todo implement proper path absolute check. */
3229 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3230 {
3231 pszFullname = RTStrDup(pExtent->pszBasename);
3232 if (!pszFullname)
3233 {
3234 rc = VERR_NO_MEMORY;
3235 break;
3236 }
3237 }
3238 else
3239 {
3240 char *pszDirname = RTStrDup(pImage->pszFilename);
3241 if (!pszDirname)
3242 {
3243 rc = VERR_NO_MEMORY;
3244 break;
3245 }
3246 RTPathStripFilename(pszDirname);
3247 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3248 RTStrFree(pszDirname);
3249 if (!pszFullname)
3250 {
3251 rc = VERR_NO_STR_MEMORY;
3252 break;
3253 }
3254 }
3255 pExtent->pszFullname = pszFullname;
3256 }
3257 else
3258 pExtent->pszFullname = NULL;
3259
3260 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3261 switch (pExtent->enmType)
3262 {
3263 case VMDKETYPE_HOSTED_SPARSE:
3264 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3265 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3266 if (RT_FAILURE(rc))
3267 {
3268 /* Do NOT signal an appropriate error here, as the VD
3269 * layer has the choice of retrying the open if it
3270 * failed. */
3271 break;
3272 }
3273 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3274 false /* fMagicAlreadyRead */);
3275 if (RT_FAILURE(rc))
3276 break;
3277 rc = vmdkReadMetaExtent(pImage, pExtent);
3278 if (RT_FAILURE(rc))
3279 break;
3280
3281 /* Mark extent as unclean if opened in read-write mode. */
3282 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3283 {
3284 pExtent->fUncleanShutdown = true;
3285 pExtent->fMetaDirty = true;
3286 }
3287 break;
3288 case VMDKETYPE_VMFS:
3289 case VMDKETYPE_FLAT:
3290 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3291 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3292 if (RT_FAILURE(rc))
3293 {
3294 /* Do NOT signal an appropriate error here, as the VD
3295 * layer has the choice of retrying the open if it
3296 * failed. */
3297 break;
3298 }
3299 break;
3300 case VMDKETYPE_ZERO:
3301 /* Nothing to do. */
3302 break;
3303 default:
3304 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3305 }
3306 }
3307 }
3308 }
3309 else
3310 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3311 }
3312 else
3313 rc = VERR_NO_MEMORY;
3314 }
3315 else if (RT_SUCCESS(rc))
3316 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3317
3318 return rc;
3319}
3320
3321/**
3322 * Read and process the descriptor based on the image type.
3323 *
3324 * @returns VBox status code.
3325 * @param pImage VMDK image instance.
3326 * @param pFile VMDK file handle.
3327 */
3328static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3329{
3330 uint32_t u32Magic;
3331
3332 /* Read magic (if present). */
3333 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3334 &u32Magic, sizeof(u32Magic));
3335 if (RT_SUCCESS(rc))
3336 {
3337 /* Handle the file according to its magic number. */
3338 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3339 rc = vmdkDescriptorReadSparse(pImage, pFile);
3340 else
3341 rc = vmdkDescriptorReadAscii(pImage, pFile);
3342 }
3343 else
3344 {
3345 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3346 rc = VERR_VD_VMDK_INVALID_HEADER;
3347 }
3348
3349 return rc;
3350}
3351
3352/**
3353 * Internal: Open an image, constructing all necessary data structures.
3354 */
3355static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3356{
3357 pImage->uOpenFlags = uOpenFlags;
3358 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3359 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3360 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3361
3362 /*
3363 * Open the image.
3364 * We don't have to check for asynchronous access because
3365 * we only support raw access and the opened file is a description
3366 * file were no data is stored.
3367 */
3368 PVMDKFILE pFile;
3369 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3370 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3371 if (RT_SUCCESS(rc))
3372 {
3373 pImage->pFile = pFile;
3374
3375 rc = vmdkDescriptorRead(pImage, pFile);
3376 if (RT_SUCCESS(rc))
3377 {
3378 /* Determine PCHS geometry if not set. */
3379 if (pImage->PCHSGeometry.cCylinders == 0)
3380 {
3381 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3382 / pImage->PCHSGeometry.cHeads
3383 / pImage->PCHSGeometry.cSectors;
3384 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3385 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3386 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3387 {
3388 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3389 AssertRC(rc);
3390 }
3391 }
3392
3393 /* Update the image metadata now in case has changed. */
3394 rc = vmdkFlushImage(pImage, NULL);
3395 if (RT_SUCCESS(rc))
3396 {
3397 /* Figure out a few per-image constants from the extents. */
3398 pImage->cbSize = 0;
3399 for (unsigned i = 0; i < pImage->cExtents; i++)
3400 {
3401 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3402 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3403 {
3404 /* Here used to be a check whether the nominal size of an extent
3405 * is a multiple of the grain size. The spec says that this is
3406 * always the case, but unfortunately some files out there in the
3407 * wild violate the spec (e.g. ReactOS 0.3.1). */
3408 }
3409 else if ( pExtent->enmType == VMDKETYPE_FLAT
3410 || pExtent->enmType == VMDKETYPE_ZERO)
3411 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3412
3413 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3414 }
3415
3416 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3417 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3418 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3419 rc = vmdkAllocateGrainTableCache(pImage);
3420 }
3421 }
3422 }
3423 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3424 * choice of retrying the open if it failed. */
3425
3426 if (RT_SUCCESS(rc))
3427 {
3428 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3429 pImage->RegionList.fFlags = 0;
3430 pImage->RegionList.cRegions = 1;
3431
3432 pRegion->offRegion = 0; /* Disk start. */
3433 pRegion->cbBlock = 512;
3434 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3435 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3436 pRegion->cbData = 512;
3437 pRegion->cbMetadata = 0;
3438 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3439 }
3440 else
3441 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3442 return rc;
3443}
3444
3445/**
3446 * Frees a raw descriptor.
3447 * @internal
3448 */
3449static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3450{
3451 if (!pRawDesc)
3452 return VINF_SUCCESS;
3453
3454 RTStrFree(pRawDesc->pszRawDisk);
3455 pRawDesc->pszRawDisk = NULL;
3456
3457 /* Partitions: */
3458 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3459 {
3460 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3461 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3462
3463 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3464 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3465 }
3466
3467 RTMemFree(pRawDesc->pPartDescs);
3468 pRawDesc->pPartDescs = NULL;
3469
3470 RTMemFree(pRawDesc);
3471 return VINF_SUCCESS;
3472}
3473
3474/**
3475 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3476 * returning the pointer to the first new entry.
3477 * @internal
3478 */
3479static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3480{
3481 uint32_t const cOld = pRawDesc->cPartDescs;
3482 uint32_t const cNew = cOld + cToAdd;
3483 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3484 cOld * sizeof(pRawDesc->pPartDescs[0]),
3485 cNew * sizeof(pRawDesc->pPartDescs[0]));
3486 if (paNew)
3487 {
3488 pRawDesc->cPartDescs = cNew;
3489 pRawDesc->pPartDescs = paNew;
3490
3491 *ppRet = &paNew[cOld];
3492 return VINF_SUCCESS;
3493 }
3494 *ppRet = NULL;
3495 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3496 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3497 pImage->pszFilename, cOld, cNew);
3498}
3499
3500/**
3501 * @callback_method_impl{FNRTSORTCMP}
3502 */
3503static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3504{
3505 RT_NOREF(pvUser);
3506 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3507 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3508}
3509
3510/**
3511 * Post processes the partition descriptors.
3512 *
3513 * Sorts them and check that they don't overlap.
3514 */
3515static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3516{
3517 /*
3518 * Sort data areas in ascending order of start.
3519 */
3520 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3521
3522 /*
3523 * Check that we don't have overlapping descriptors. If we do, that's an
3524 * indication that the drive is corrupt or that the RTDvm code is buggy.
3525 */
3526 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3527 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3528 {
3529 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3530 if (offLast <= paPartDescs[i].offStartInVDisk)
3531 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3532 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3533 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3534 paPartDescs[i].pvPartitionData ? " (data)" : "");
3535 offLast -= 1;
3536
3537 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3538 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3539 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3540 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3541 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3542 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3543 if (offLast >= cbSize)
3544 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3545 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3546 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3547 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3548 }
3549
3550 return VINF_SUCCESS;
3551}
3552
3553
3554#ifdef RT_OS_LINUX
3555/**
3556 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3557 * 'dev' file matching @a uDevToLocate.
3558 *
3559 * This is used both
3560 *
3561 * @returns IPRT status code, errors have been reported properly.
3562 * @param pImage For error reporting.
3563 * @param pszBlockDevDir Input: Path to the directory search under.
3564 * Output: Path to the directory containing information
3565 * for @a uDevToLocate.
3566 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3567 * @param uDevToLocate The device number of the block device info dir to
3568 * locate.
3569 * @param pszDevToLocate For error reporting.
3570 */
3571static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3572 dev_t uDevToLocate, const char *pszDevToLocate)
3573{
3574 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3575 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3576
3577 RTDIR hDir = NIL_RTDIR;
3578 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3579 if (RT_SUCCESS(rc))
3580 {
3581 for (;;)
3582 {
3583 RTDIRENTRY Entry;
3584 rc = RTDirRead(hDir, &Entry, NULL);
3585 if (RT_SUCCESS(rc))
3586 {
3587 /* We're interested in directories and symlinks. */
3588 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3589 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3590 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3591 {
3592 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3593 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3594
3595 dev_t uThisDevNo = ~uDevToLocate;
3596 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3597 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3598 break;
3599 }
3600 }
3601 else
3602 {
3603 pszBlockDevDir[cchDir] = '\0';
3604 if (rc == VERR_NO_MORE_FILES)
3605 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3606 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3607 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3608 else
3609 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3610 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3611 pImage->pszFilename, pszBlockDevDir, rc);
3612 break;
3613 }
3614 }
3615 RTDirClose(hDir);
3616 }
3617 else
3618 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3619 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3620 pImage->pszFilename, pszBlockDevDir, rc);
3621 return rc;
3622}
3623#endif /* RT_OS_LINUX */
3624
3625
3626/**
3627 * Attempts to verify the raw partition path.
3628 *
3629 * We don't want to trust RTDvm and the partition device node morphing blindly.
3630 */
3631static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3632 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3633{
3634 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3635
3636 /*
3637 * Try open the raw partition device.
3638 */
3639 RTFILE hRawPart = NIL_RTFILE;
3640 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3641 if (RT_FAILURE(rc))
3642 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3643 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3644 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3645
3646 /*
3647 * Compare the partition UUID if we can get it.
3648 */
3649#ifdef RT_OS_WINDOWS
3650 DWORD cbReturned;
3651
3652 /* 1. Get the device numbers for both handles, they should have the same disk. */
3653 STORAGE_DEVICE_NUMBER DevNum1;
3654 RT_ZERO(DevNum1);
3655 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3656 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3657 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3658 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3659 pImage->pszFilename, pszRawDrive, GetLastError());
3660
3661 STORAGE_DEVICE_NUMBER DevNum2;
3662 RT_ZERO(DevNum2);
3663 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3664 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3665 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3666 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3667 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3668 if ( RT_SUCCESS(rc)
3669 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3670 || DevNum1.DeviceType != DevNum2.DeviceType))
3671 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3672 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3673 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3674 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3675 if (RT_SUCCESS(rc))
3676 {
3677 /* Get the partitions from the raw drive and match up with the volume info
3678 from RTDvm. The partition number is found in DevNum2. */
3679 DWORD cbNeeded = 0;
3680 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3681 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3682 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3683 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3684 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3685 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3686 if (pLayout)
3687 {
3688 cbReturned = 0;
3689 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3690 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3691 {
3692 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3693 unsigned iEntry = 0;
3694 while ( iEntry < pLayout->PartitionCount
3695 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3696 iEntry++;
3697 if (iEntry < pLayout->PartitionCount)
3698 {
3699 /* Compare the basics */
3700 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3701 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3702 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3703 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3704 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3705 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3706 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3707 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3708 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3709 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3710 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3711 /** @todo We could compare the MBR type, GPT type and ID. */
3712 RT_NOREF(hVol);
3713 }
3714 else
3715 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3716 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3717 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3718 DevNum2.PartitionNumber, pLayout->PartitionCount);
3719# ifndef LOG_ENABLED
3720 if (RT_FAILURE(rc))
3721# endif
3722 {
3723 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3724 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3725 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3726 {
3727 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3728 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3729 pEntry->PartitionStyle, pEntry->RewritePartition));
3730 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3731 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3732 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3733 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3734 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3735 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3736 else
3737 LogRel(("\n"));
3738 }
3739 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3740 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3741 }
3742 }
3743 else
3744 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3745 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3746 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3747 RTMemTmpFree(pLayout);
3748 }
3749 else
3750 rc = VERR_NO_TMP_MEMORY;
3751 }
3752
3753#elif defined(RT_OS_LINUX)
3754 RT_NOREF(hVol);
3755
3756 /* Stat the two devices first to get their device numbers. (We probably
3757 could make some assumptions here about the major & minor number assignments
3758 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
3759 struct stat StDrive, StPart;
3760 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3761 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3762 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3763 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
3764 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3765 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
3766 else
3767 {
3768 /* Scan the directories immediately under /sys/block/ for one with a
3769 'dev' file matching the drive's device number: */
3770 char szSysPath[RTPATH_MAX];
3771 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
3772 AssertRCReturn(rc, rc); /* this shall not fail */
3773 if (RTDirExists(szSysPath))
3774 {
3775 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
3776
3777 /* Now, scan the directories under that again for a partition device
3778 matching the hRawPart device's number: */
3779 if (RT_SUCCESS(rc))
3780 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
3781
3782 /* Having found the /sys/block/device/partition/ path, we can finally
3783 read the partition attributes and compare with hVol. */
3784 if (RT_SUCCESS(rc))
3785 {
3786 /* partition number: */
3787 int64_t iLnxPartition = 0;
3788 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
3789 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
3790 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3791 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
3792 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
3793 /* else: ignore failure? */
3794
3795 /* start offset: */
3796 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
3797 if (RT_SUCCESS(rc))
3798 {
3799 int64_t offLnxStart = -1;
3800 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
3801 offLnxStart *= cbLnxSector;
3802 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
3803 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3804 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3805 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
3806 /* else: ignore failure? */
3807 }
3808
3809 /* the size: */
3810 if (RT_SUCCESS(rc))
3811 {
3812 int64_t cbLnxData = -1;
3813 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
3814 cbLnxData *= cbLnxSector;
3815 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
3816 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3817 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3818 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
3819 /* else: ignore failure? */
3820 }
3821 }
3822 }
3823 /* else: We've got nothing to work on, so only do content comparison. */
3824 }
3825#else
3826 RT_NOREF(hVol); /* PORTME */
3827#endif
3828 if (RT_SUCCESS(rc))
3829 {
3830 /*
3831 * Compare the first 32 sectors of the partition.
3832 *
3833 * This might not be conclusive, but for partitions formatted with the more
3834 * common file systems it should be as they have a superblock copy at or near
3835 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3836 */
3837 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3838 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3839 if (pbSector1 != NULL)
3840 {
3841 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3842
3843 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3844 uint64_t uPrevCrc1 = 0;
3845 uint64_t uPrevCrc2 = 0;
3846 uint32_t cStable = 0;
3847 for (unsigned iTry = 0; iTry < 256; iTry++)
3848 {
3849 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3850 if (RT_SUCCESS(rc))
3851 {
3852 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3853 if (RT_SUCCESS(rc))
3854 {
3855 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3856 {
3857 rc = VERR_MISMATCH;
3858
3859 /* Do data stability checks before repeating: */
3860 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
3861 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
3862 if ( uPrevCrc1 != uCrc1
3863 || uPrevCrc2 != uCrc2)
3864 cStable = 0;
3865 else if (++cStable > 4)
3866 break;
3867 uPrevCrc1 = uCrc1;
3868 uPrevCrc2 = uCrc2;
3869 continue;
3870 }
3871 rc = VINF_SUCCESS;
3872 }
3873 else
3874 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3875 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3876 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
3877 }
3878 else
3879 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3880 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3881 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
3882 break;
3883 }
3884 if (rc == VERR_MISMATCH)
3885 {
3886 /* Find the first mismatching bytes: */
3887 size_t offMissmatch = 0;
3888 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
3889 offMissmatch++;
3890 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
3891
3892 if (cStable > 0)
3893 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3894 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
3895 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
3896 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
3897 else
3898 {
3899 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
3900 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3901 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
3902 rc = -rc;
3903 }
3904 }
3905
3906 RTMemTmpFree(pbSector1);
3907 }
3908 else
3909 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
3910 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
3911 pImage->pszFilename, cbToCompare * 2);
3912 }
3913 RTFileClose(hRawPart);
3914 return rc;
3915}
3916
3917#ifdef RT_OS_WINDOWS
3918/**
3919 * Construct the device name for the given partition number.
3920 */
3921static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
3922 char **ppszRawPartition)
3923{
3924 int rc = VINF_SUCCESS;
3925 DWORD cbReturned = 0;
3926 STORAGE_DEVICE_NUMBER DevNum;
3927 RT_ZERO(DevNum);
3928 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3929 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
3930 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
3931 else
3932 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3933 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3934 pImage->pszFilename, pszRawDrive, GetLastError());
3935 return rc;
3936}
3937#endif /* RT_OS_WINDOWS */
3938
3939/**
3940 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
3941 * 'Partitions' configuration value is present.
3942 *
3943 * @returns VBox status code, error message has been set on failure.
3944 *
3945 * @note Caller is assumed to clean up @a pRawDesc and release
3946 * @a *phVolToRelease.
3947 * @internal
3948 */
3949static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
3950 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
3951 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
3952 PRTDVMVOLUME phVolToRelease)
3953{
3954 *phVolToRelease = NIL_RTDVMVOLUME;
3955
3956 /* Check sanity/understanding. */
3957 Assert(fPartitions);
3958 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
3959
3960 /*
3961 * Allocate on descriptor for each volume up front.
3962 */
3963 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
3964
3965 PVDISKRAWPARTDESC paPartDescs = NULL;
3966 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
3967 AssertRCReturn(rc, rc);
3968
3969 /*
3970 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
3971 */
3972 uint32_t fPartitionsLeft = fPartitions;
3973 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
3974 for (uint32_t i = 0; i < cVolumes; i++)
3975 {
3976 /*
3977 * Get the next/first volume and release the current.
3978 */
3979 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
3980 if (i == 0)
3981 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
3982 else
3983 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
3984 if (RT_FAILURE(rc))
3985 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3986 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
3987 pImage->pszFilename, i, pszRawDrive, rc);
3988 uint32_t cRefs = RTDvmVolumeRelease(hVol);
3989 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
3990 *phVolToRelease = hVol = hVolNext;
3991
3992 /*
3993 * Depending on the fPartitions selector and associated read-only mask,
3994 * the guest either gets read-write or read-only access (bits set)
3995 * or no access (selector bit clear, access directed to the VMDK).
3996 */
3997 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
3998
3999 uint64_t offVolumeEndIgnored = 0;
4000 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4001 if (RT_FAILURE(rc))
4002 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4003 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4004 pImage->pszFilename, i, pszRawDrive, rc);
4005 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4006
4007 /* Note! The index must match IHostDrivePartition::number. */
4008 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4009 if ( idxPartition < 32
4010 && (fPartitions & RT_BIT_32(idxPartition)))
4011 {
4012 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4013 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4014 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4015
4016 if (!fRelative)
4017 {
4018 /*
4019 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4020 */
4021 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4022 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4023 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4024 }
4025 else
4026 {
4027 /*
4028 * Relative means access the partition data via the device node for that
4029 * partition, allowing the sysadmin/OS to allow a user access to individual
4030 * partitions without necessarily being able to compromise the host OS.
4031 * Obviously, the creation of the VMDK requires read access to the main
4032 * device node for the drive, but that's a one-time thing and can be done
4033 * by the sysadmin. Here data starts at offset zero in the device node.
4034 */
4035 paPartDescs[i].offStartInDevice = 0;
4036
4037#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4038 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4039 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4040#elif defined(RT_OS_LINUX)
4041 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4042 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4043 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4044#elif defined(RT_OS_WINDOWS)
4045 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4046 AssertRCReturn(rc, rc);
4047#else
4048 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4049#endif
4050 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4051
4052 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4053 AssertRCReturn(rc, rc);
4054 }
4055 }
4056 else
4057 {
4058 /* Not accessible to the guest. */
4059 paPartDescs[i].offStartInDevice = 0;
4060 paPartDescs[i].pszRawDevice = NULL;
4061 }
4062 } /* for each volume */
4063
4064 RTDvmVolumeRelease(hVol);
4065 *phVolToRelease = NIL_RTDVMVOLUME;
4066
4067 /*
4068 * Check that we found all the partitions the user selected.
4069 */
4070 if (fPartitionsLeft)
4071 {
4072 char szLeft[3 * sizeof(fPartitions) * 8];
4073 size_t cchLeft = 0;
4074 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4075 if (fPartitionsLeft & RT_BIT_32(i))
4076 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4077 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4078 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4079 pImage->pszFilename, pszRawDrive, szLeft);
4080 }
4081
4082 return VINF_SUCCESS;
4083}
4084
4085/**
4086 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4087 * of the partition tables and associated padding areas when the 'Partitions'
4088 * configuration value is present.
4089 *
4090 * The guest is not allowed access to the partition tables, however it needs
4091 * them to be able to access the drive. So, create descriptors for each of the
4092 * tables and attach the current disk content. vmdkCreateRawImage() will later
4093 * write the content to the VMDK. Any changes the guest later makes to the
4094 * partition tables will then go to the VMDK copy, rather than the host drive.
4095 *
4096 * @returns VBox status code, error message has been set on failure.
4097 *
4098 * @note Caller is assumed to clean up @a pRawDesc
4099 * @internal
4100 */
4101static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4102 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4103{
4104 /*
4105 * Query the locations.
4106 */
4107 /* Determin how many locations there are: */
4108 size_t cLocations = 0;
4109 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4110 if (rc != VERR_BUFFER_OVERFLOW)
4111 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4112 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4113 pImage->pszFilename, pszRawDrive, rc);
4114 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4115
4116 /* We can allocate the partition descriptors here to save an intentation level. */
4117 PVDISKRAWPARTDESC paPartDescs = NULL;
4118 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4119 AssertRCReturn(rc, rc);
4120
4121 /* Allocate the result table and repeat the location table query: */
4122 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4123 if (!paLocations)
4124 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4125 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4126 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4127 if (RT_SUCCESS(rc))
4128 {
4129 /*
4130 * Translate them into descriptors.
4131 *
4132 * We restrict the amount of partition alignment padding to 4MiB as more
4133 * will just be a waste of space. The use case for including the padding
4134 * are older boot loaders and boot manager (including one by a team member)
4135 * that put data and code in the 62 sectors between the MBR and the first
4136 * partition (total of 63). Later CHS was abandond and partition started
4137 * being aligned on power of two sector boundraries (typically 64KiB or
4138 * 1MiB depending on the media size).
4139 */
4140 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4141 {
4142 Assert(paLocations[i].cb > 0);
4143 if (paLocations[i].cb <= _64M)
4144 {
4145 /* Create the partition descriptor entry: */
4146 //paPartDescs[i].pszRawDevice = NULL;
4147 //paPartDescs[i].offStartInDevice = 0;
4148 //paPartDescs[i].uFlags = 0;
4149 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4150 paPartDescs[i].cbData = paLocations[i].cb;
4151 if (paPartDescs[i].cbData < _4M)
4152 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4153 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4154 if (paPartDescs[i].pvPartitionData)
4155 {
4156 /* Read the content from the drive: */
4157 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4158 (size_t)paPartDescs[i].cbData, NULL);
4159 if (RT_SUCCESS(rc))
4160 {
4161 /* Do we have custom boot sector code? */
4162 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4163 {
4164 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4165 Instead we fail as we weren't able to do what the user requested us to do.
4166 Better if the user knows than starts questioning why the guest isn't
4167 booting as expected. */
4168 if (cbBootSector <= paPartDescs[i].cbData)
4169 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4170 else
4171 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4172 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4173 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4174 }
4175 }
4176 else
4177 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4178 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4179 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4180 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4181 }
4182 else
4183 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4184 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4185 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4186 }
4187 else
4188 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4189 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4190 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4191 }
4192 }
4193 else
4194 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4195 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4196 pImage->pszFilename, pszRawDrive, rc);
4197 RTMemFree(paLocations);
4198 return rc;
4199}
4200
4201/**
4202 * Opens the volume manager for the raw drive when in selected-partition mode.
4203 *
4204 * @param pImage The VMDK image (for errors).
4205 * @param hRawDrive The raw drive handle.
4206 * @param pszRawDrive The raw drive device path (for errors).
4207 * @param cbSector The sector size.
4208 * @param phVolMgr Where to return the handle to the volume manager on
4209 * success.
4210 * @returns VBox status code, errors have been reported.
4211 * @internal
4212 */
4213static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4214{
4215 *phVolMgr = NIL_RTDVM;
4216
4217 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4218 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4219 if (RT_FAILURE(rc))
4220 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4221 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4222 pImage->pszFilename, pszRawDrive, rc);
4223
4224 RTDVM hVolMgr = NIL_RTDVM;
4225 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4226
4227 RTVfsFileRelease(hVfsFile);
4228
4229 if (RT_FAILURE(rc))
4230 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4231 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4232 pImage->pszFilename, pszRawDrive, rc);
4233
4234 rc = RTDvmMapOpen(hVolMgr);
4235 if (RT_SUCCESS(rc))
4236 {
4237 *phVolMgr = hVolMgr;
4238 return VINF_SUCCESS;
4239 }
4240 RTDvmRelease(hVolMgr);
4241 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4242 pImage->pszFilename, pszRawDrive, rc);
4243}
4244
4245/**
4246 * Opens the raw drive device and get the sizes for it.
4247 *
4248 * @param pImage The image (for error reporting).
4249 * @param pszRawDrive The device/whatever to open.
4250 * @param phRawDrive Where to return the file handle.
4251 * @param pcbRawDrive Where to return the size.
4252 * @param pcbSector Where to return the sector size.
4253 * @returns IPRT status code, errors have been reported.
4254 * @internal
4255 */
4256static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4257 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4258{
4259 /*
4260 * Open the device for the raw drive.
4261 */
4262 RTFILE hRawDrive = NIL_RTFILE;
4263 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4264 if (RT_FAILURE(rc))
4265 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4266 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4267 pImage->pszFilename, pszRawDrive, rc);
4268
4269 /*
4270 * Get the sector size.
4271 */
4272 uint32_t cbSector = 0;
4273 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4274 if (RT_SUCCESS(rc))
4275 {
4276 /* sanity checks */
4277 if ( cbSector >= 512
4278 && cbSector <= _64K
4279 && RT_IS_POWER_OF_TWO(cbSector))
4280 {
4281 /*
4282 * Get the size.
4283 */
4284 uint64_t cbRawDrive = 0;
4285 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4286 if (RT_SUCCESS(rc))
4287 {
4288 /* Check whether cbSize is actually sensible. */
4289 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4290 {
4291 *phRawDrive = hRawDrive;
4292 *pcbRawDrive = cbRawDrive;
4293 *pcbSector = cbSector;
4294 return VINF_SUCCESS;
4295 }
4296 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4297 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4298 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4299 }
4300 else
4301 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4302 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4303 pImage->pszFilename, pszRawDrive, rc);
4304 }
4305 else
4306 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4307 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4308 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4309 }
4310 else
4311 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4312 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4313 pImage->pszFilename, pszRawDrive, rc);
4314 RTFileClose(hRawDrive);
4315 return rc;
4316}
4317
4318/**
4319 * Reads the raw disk configuration, leaving initalization and cleanup to the
4320 * caller (regardless of return status).
4321 *
4322 * @returns VBox status code, errors properly reported.
4323 * @internal
4324 */
4325static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4326 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4327 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4328 char **ppszFreeMe)
4329{
4330 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4331 if (!pImgCfg)
4332 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4333 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4334
4335 /*
4336 * RawDrive = path
4337 */
4338 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4339 if (RT_FAILURE(rc))
4340 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4341 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4342 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4343
4344 /*
4345 * Partitions=n[r][,...]
4346 */
4347 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4348 *pfPartitions = *pfPartitionsReadOnly = 0;
4349
4350 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4351 if (RT_SUCCESS(rc))
4352 {
4353 char *psz = *ppszFreeMe;
4354 while (*psz != '\0')
4355 {
4356 char *pszNext;
4357 uint32_t u32;
4358 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4359 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4360 rc = -rc;
4361 if (RT_FAILURE(rc))
4362 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4363 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4364 pImage->pszFilename, rc, psz);
4365 if (u32 >= cMaxPartitionBits)
4366 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4367 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4368 pImage->pszFilename, u32, cMaxPartitionBits);
4369 *pfPartitions |= RT_BIT_32(u32);
4370 psz = pszNext;
4371 if (*psz == 'r')
4372 {
4373 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4374 psz++;
4375 }
4376 if (*psz == ',')
4377 psz++;
4378 else if (*psz != '\0')
4379 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4380 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4381 pImage->pszFilename, psz);
4382 }
4383
4384 RTStrFree(*ppszFreeMe);
4385 *ppszFreeMe = NULL;
4386 }
4387 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4388 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4389 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4390
4391 /*
4392 * BootSector=base64
4393 */
4394 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4395 if (RT_SUCCESS(rc))
4396 {
4397 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4398 if (cbBootSector < 0)
4399 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4400 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4401 pImage->pszFilename, *ppszRawDrive);
4402 if (cbBootSector == 0)
4403 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4404 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4405 pImage->pszFilename, *ppszRawDrive);
4406 if (cbBootSector > _4M) /* this is just a preliminary max */
4407 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4408 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4409 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4410
4411 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4412 however, bird disagrees and thinks the user should be told that what
4413 he/she/it tries to do isn't possible. There should be less head
4414 scratching this way when the guest doesn't do the expected thing. */
4415 if (!*pfPartitions)
4416 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4417 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4418 pImage->pszFilename, *ppszRawDrive);
4419
4420 *pcbBootSector = (size_t)cbBootSector;
4421 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4422 if (!*ppvBootSector)
4423 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4424 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4425 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4426
4427 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4428 if (RT_FAILURE(rc))
4429 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4430 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4431 pImage->pszFilename, *ppszRawDrive, rc);
4432
4433 RTStrFree(*ppszFreeMe);
4434 *ppszFreeMe = NULL;
4435 }
4436 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4437 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4438 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4439
4440 /*
4441 * Relative=0/1
4442 */
4443 *pfRelative = false;
4444 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4445 if (RT_SUCCESS(rc))
4446 {
4447 if (!*pfPartitions && *pfRelative != false)
4448 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4449 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4450 pImage->pszFilename);
4451#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) /* PORTME */
4452 if (*pfRelative == true)
4453 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4454 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4455 pImage->pszFilename);
4456#endif
4457 }
4458 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4459 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4460 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4461 else
4462#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4463 *pfRelative = true;
4464#else
4465 *pfRelative = false;
4466#endif
4467
4468 return VINF_SUCCESS;
4469}
4470
4471/**
4472 * Creates a raw drive (nee disk) descriptor.
4473 *
4474 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4475 * here much later. That's one of the reasons why we produce a descriptor just
4476 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4477 *
4478 * @returns VBox status code.
4479 * @param pImage The image.
4480 * @param ppRaw Where to return the raw drive descriptor. Caller must
4481 * free it using vmdkRawDescFree regardless of the status
4482 * code.
4483 * @internal
4484 */
4485static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4486{
4487 /* Make sure it's NULL. */
4488 *ppRaw = NULL;
4489
4490 /*
4491 * Read the configuration.
4492 */
4493 char *pszRawDrive = NULL;
4494 uint32_t fPartitions = 0; /* zero if whole-drive */
4495 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4496 void *pvBootSector = NULL;
4497 size_t cbBootSector = 0;
4498 bool fRelative = false;
4499 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4500 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4501 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4502 RTStrFree(pszFreeMe);
4503 if (RT_SUCCESS(rc))
4504 {
4505 /*
4506 * Open the device, getting the sector size and drive size.
4507 */
4508 uint64_t cbSize = 0;
4509 uint32_t cbSector = 0;
4510 RTFILE hRawDrive = NIL_RTFILE;
4511 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4512 if (RT_SUCCESS(rc))
4513 {
4514 /*
4515 * Create the raw-drive descriptor
4516 */
4517 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4518 if (pRawDesc)
4519 {
4520 pRawDesc->szSignature[0] = 'R';
4521 pRawDesc->szSignature[1] = 'A';
4522 pRawDesc->szSignature[2] = 'W';
4523 //pRawDesc->szSignature[3] = '\0';
4524 if (!fPartitions)
4525 {
4526 /*
4527 * It's simple for when doing the whole drive.
4528 */
4529 pRawDesc->uFlags = VDISKRAW_DISK;
4530 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4531 }
4532 else
4533 {
4534 /*
4535 * In selected partitions mode we've got a lot more work ahead of us.
4536 */
4537 pRawDesc->uFlags = VDISKRAW_NORMAL;
4538 //pRawDesc->pszRawDisk = NULL;
4539 //pRawDesc->cPartDescs = 0;
4540 //pRawDesc->pPartDescs = NULL;
4541
4542 /* We need to parse the partition map to complete the descriptor: */
4543 RTDVM hVolMgr = NIL_RTDVM;
4544 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4545 if (RT_SUCCESS(rc))
4546 {
4547 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4548 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4549 || enmFormatType == RTDVMFORMATTYPE_GPT)
4550 {
4551 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4552 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4553
4554 /* Add copies of the partition tables: */
4555 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4556 pvBootSector, cbBootSector);
4557 if (RT_SUCCESS(rc))
4558 {
4559 /* Add descriptors for the partitions/volumes, indicating which
4560 should be accessible and how to access them: */
4561 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4562 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4563 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4564 RTDvmVolumeRelease(hVolRelease);
4565
4566 /* Finally, sort the partition and check consistency (overlaps, etc): */
4567 if (RT_SUCCESS(rc))
4568 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4569 }
4570 }
4571 else
4572 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4573 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4574 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4575 RTDvmRelease(hVolMgr);
4576 }
4577 }
4578 if (RT_SUCCESS(rc))
4579 {
4580 /*
4581 * We succeeded.
4582 */
4583 *ppRaw = pRawDesc;
4584 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4585 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4586 if (pRawDesc->cPartDescs)
4587 {
4588 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4589 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4590 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4591 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4592 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4593 }
4594 }
4595 else
4596 vmdkRawDescFree(pRawDesc);
4597 }
4598 else
4599 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4600 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4601 pImage->pszFilename, sizeof(*pRawDesc));
4602 RTFileClose(hRawDrive);
4603 }
4604 }
4605 RTStrFree(pszRawDrive);
4606 RTMemFree(pvBootSector);
4607 return rc;
4608}
4609
4610/**
4611 * Internal: create VMDK images for raw disk/partition access.
4612 */
4613static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4614 uint64_t cbSize)
4615{
4616 int rc = VINF_SUCCESS;
4617 PVMDKEXTENT pExtent;
4618
4619 if (pRaw->uFlags & VDISKRAW_DISK)
4620 {
4621 /* Full raw disk access. This requires setting up a descriptor
4622 * file and open the (flat) raw disk. */
4623 rc = vmdkCreateExtents(pImage, 1);
4624 if (RT_FAILURE(rc))
4625 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4626 pExtent = &pImage->pExtents[0];
4627 /* Create raw disk descriptor file. */
4628 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4629 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4630 true /* fCreate */));
4631 if (RT_FAILURE(rc))
4632 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4633
4634 /* Set up basename for extent description. Cannot use StrDup. */
4635 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4636 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4637 if (!pszBasename)
4638 return VERR_NO_MEMORY;
4639 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4640 pExtent->pszBasename = pszBasename;
4641 /* For raw disks the full name is identical to the base name. */
4642 pExtent->pszFullname = RTStrDup(pszBasename);
4643 if (!pExtent->pszFullname)
4644 return VERR_NO_MEMORY;
4645 pExtent->enmType = VMDKETYPE_FLAT;
4646 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4647 pExtent->uSectorOffset = 0;
4648 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4649 pExtent->fMetaDirty = false;
4650
4651 /* Open flat image, the raw disk. */
4652 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4653 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4654 false /* fCreate */));
4655 if (RT_FAILURE(rc))
4656 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4657 }
4658 else
4659 {
4660 /* Raw partition access. This requires setting up a descriptor
4661 * file, write the partition information to a flat extent and
4662 * open all the (flat) raw disk partitions. */
4663
4664 /* First pass over the partition data areas to determine how many
4665 * extents we need. One data area can require up to 2 extents, as
4666 * it might be necessary to skip over unpartitioned space. */
4667 unsigned cExtents = 0;
4668 uint64_t uStart = 0;
4669 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4670 {
4671 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4672 if (uStart > pPart->offStartInVDisk)
4673 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4674 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4675
4676 if (uStart < pPart->offStartInVDisk)
4677 cExtents++;
4678 uStart = pPart->offStartInVDisk + pPart->cbData;
4679 cExtents++;
4680 }
4681 /* Another extent for filling up the rest of the image. */
4682 if (uStart != cbSize)
4683 cExtents++;
4684
4685 rc = vmdkCreateExtents(pImage, cExtents);
4686 if (RT_FAILURE(rc))
4687 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4688
4689 /* Create raw partition descriptor file. */
4690 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4691 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4692 true /* fCreate */));
4693 if (RT_FAILURE(rc))
4694 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4695
4696 /* Create base filename for the partition table extent. */
4697 /** @todo remove fixed buffer without creating memory leaks. */
4698 char pszPartition[1024];
4699 const char *pszBase = RTPathFilename(pImage->pszFilename);
4700 const char *pszSuff = RTPathSuffix(pszBase);
4701 if (pszSuff == NULL)
4702 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4703 char *pszBaseBase = RTStrDup(pszBase);
4704 if (!pszBaseBase)
4705 return VERR_NO_MEMORY;
4706 RTPathStripSuffix(pszBaseBase);
4707 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4708 pszBaseBase, pszSuff);
4709 RTStrFree(pszBaseBase);
4710
4711 /* Second pass over the partitions, now define all extents. */
4712 uint64_t uPartOffset = 0;
4713 cExtents = 0;
4714 uStart = 0;
4715 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4716 {
4717 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4718 pExtent = &pImage->pExtents[cExtents++];
4719
4720 if (uStart < pPart->offStartInVDisk)
4721 {
4722 pExtent->pszBasename = NULL;
4723 pExtent->pszFullname = NULL;
4724 pExtent->enmType = VMDKETYPE_ZERO;
4725 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4726 pExtent->uSectorOffset = 0;
4727 pExtent->enmAccess = VMDKACCESS_READWRITE;
4728 pExtent->fMetaDirty = false;
4729 /* go to next extent */
4730 pExtent = &pImage->pExtents[cExtents++];
4731 }
4732 uStart = pPart->offStartInVDisk + pPart->cbData;
4733
4734 if (pPart->pvPartitionData)
4735 {
4736 /* Set up basename for extent description. Can't use StrDup. */
4737 size_t cbBasename = strlen(pszPartition) + 1;
4738 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4739 if (!pszBasename)
4740 return VERR_NO_MEMORY;
4741 memcpy(pszBasename, pszPartition, cbBasename);
4742 pExtent->pszBasename = pszBasename;
4743
4744 /* Set up full name for partition extent. */
4745 char *pszDirname = RTStrDup(pImage->pszFilename);
4746 if (!pszDirname)
4747 return VERR_NO_STR_MEMORY;
4748 RTPathStripFilename(pszDirname);
4749 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4750 RTStrFree(pszDirname);
4751 if (!pszFullname)
4752 return VERR_NO_STR_MEMORY;
4753 pExtent->pszFullname = pszFullname;
4754 pExtent->enmType = VMDKETYPE_FLAT;
4755 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4756 pExtent->uSectorOffset = uPartOffset;
4757 pExtent->enmAccess = VMDKACCESS_READWRITE;
4758 pExtent->fMetaDirty = false;
4759
4760 /* Create partition table flat image. */
4761 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4762 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4763 true /* fCreate */));
4764 if (RT_FAILURE(rc))
4765 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4766 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4767 VMDK_SECTOR2BYTE(uPartOffset),
4768 pPart->pvPartitionData,
4769 pPart->cbData);
4770 if (RT_FAILURE(rc))
4771 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4772 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4773 }
4774 else
4775 {
4776 if (pPart->pszRawDevice)
4777 {
4778 /* Set up basename for extent descr. Can't use StrDup. */
4779 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4780 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4781 if (!pszBasename)
4782 return VERR_NO_MEMORY;
4783 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4784 pExtent->pszBasename = pszBasename;
4785 /* For raw disks full name is identical to base name. */
4786 pExtent->pszFullname = RTStrDup(pszBasename);
4787 if (!pExtent->pszFullname)
4788 return VERR_NO_MEMORY;
4789 pExtent->enmType = VMDKETYPE_FLAT;
4790 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4791 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4792 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4793 pExtent->fMetaDirty = false;
4794
4795 /* Open flat image, the raw partition. */
4796 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4797 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4798 false /* fCreate */));
4799 if (RT_FAILURE(rc))
4800 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4801 }
4802 else
4803 {
4804 pExtent->pszBasename = NULL;
4805 pExtent->pszFullname = NULL;
4806 pExtent->enmType = VMDKETYPE_ZERO;
4807 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4808 pExtent->uSectorOffset = 0;
4809 pExtent->enmAccess = VMDKACCESS_READWRITE;
4810 pExtent->fMetaDirty = false;
4811 }
4812 }
4813 }
4814 /* Another extent for filling up the rest of the image. */
4815 if (uStart != cbSize)
4816 {
4817 pExtent = &pImage->pExtents[cExtents++];
4818 pExtent->pszBasename = NULL;
4819 pExtent->pszFullname = NULL;
4820 pExtent->enmType = VMDKETYPE_ZERO;
4821 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4822 pExtent->uSectorOffset = 0;
4823 pExtent->enmAccess = VMDKACCESS_READWRITE;
4824 pExtent->fMetaDirty = false;
4825 }
4826 }
4827
4828 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4829 (pRaw->uFlags & VDISKRAW_DISK) ?
4830 "fullDevice" : "partitionedDevice");
4831 if (RT_FAILURE(rc))
4832 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4833 return rc;
4834}
4835
4836/**
4837 * Internal: create a regular (i.e. file-backed) VMDK image.
4838 */
4839static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4840 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4841 unsigned uPercentStart, unsigned uPercentSpan)
4842{
4843 int rc = VINF_SUCCESS;
4844 unsigned cExtents = 1;
4845 uint64_t cbOffset = 0;
4846 uint64_t cbRemaining = cbSize;
4847
4848 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4849 {
4850 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4851 /* Do proper extent computation: need one smaller extent if the total
4852 * size isn't evenly divisible by the split size. */
4853 if (cbSize % VMDK_2G_SPLIT_SIZE)
4854 cExtents++;
4855 }
4856 rc = vmdkCreateExtents(pImage, cExtents);
4857 if (RT_FAILURE(rc))
4858 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4859
4860 /* Basename strings needed for constructing the extent names. */
4861 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4862 AssertPtr(pszBasenameSubstr);
4863 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4864
4865 /* Create separate descriptor file if necessary. */
4866 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4867 {
4868 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4869 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4870 true /* fCreate */));
4871 if (RT_FAILURE(rc))
4872 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4873 }
4874 else
4875 pImage->pFile = NULL;
4876
4877 /* Set up all extents. */
4878 for (unsigned i = 0; i < cExtents; i++)
4879 {
4880 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4881 uint64_t cbExtent = cbRemaining;
4882
4883 /* Set up fullname/basename for extent description. Cannot use StrDup
4884 * for basename, as it is not guaranteed that the memory can be freed
4885 * with RTMemTmpFree, which must be used as in other code paths
4886 * StrDup is not usable. */
4887 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4888 {
4889 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4890 if (!pszBasename)
4891 return VERR_NO_MEMORY;
4892 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4893 pExtent->pszBasename = pszBasename;
4894 }
4895 else
4896 {
4897 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
4898 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
4899 RTPathStripSuffix(pszBasenameBase);
4900 char *pszTmp;
4901 size_t cbTmp;
4902 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4903 {
4904 if (cExtents == 1)
4905 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
4906 pszBasenameSuff);
4907 else
4908 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
4909 i+1, pszBasenameSuff);
4910 }
4911 else
4912 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
4913 pszBasenameSuff);
4914 RTStrFree(pszBasenameBase);
4915 if (!pszTmp)
4916 return VERR_NO_STR_MEMORY;
4917 cbTmp = strlen(pszTmp) + 1;
4918 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
4919 if (!pszBasename)
4920 {
4921 RTStrFree(pszTmp);
4922 return VERR_NO_MEMORY;
4923 }
4924 memcpy(pszBasename, pszTmp, cbTmp);
4925 RTStrFree(pszTmp);
4926 pExtent->pszBasename = pszBasename;
4927 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4928 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
4929 }
4930 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4931 if (!pszBasedirectory)
4932 return VERR_NO_STR_MEMORY;
4933 RTPathStripFilename(pszBasedirectory);
4934 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4935 RTStrFree(pszBasedirectory);
4936 if (!pszFullname)
4937 return VERR_NO_STR_MEMORY;
4938 pExtent->pszFullname = pszFullname;
4939
4940 /* Create file for extent. */
4941 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4942 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4943 true /* fCreate */));
4944 if (RT_FAILURE(rc))
4945 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4946 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4947 {
4948 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
4949 0 /* fFlags */, pIfProgress,
4950 uPercentStart + cbOffset * uPercentSpan / cbSize,
4951 cbExtent * uPercentSpan / cbSize);
4952 if (RT_FAILURE(rc))
4953 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
4954 }
4955
4956 /* Place descriptor file information (where integrated). */
4957 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4958 {
4959 pExtent->uDescriptorSector = 1;
4960 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4961 /* The descriptor is part of the (only) extent. */
4962 pExtent->pDescData = pImage->pDescData;
4963 pImage->pDescData = NULL;
4964 }
4965
4966 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4967 {
4968 uint64_t cSectorsPerGDE, cSectorsPerGD;
4969 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4970 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
4971 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4972 pExtent->cGTEntries = 512;
4973 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4974 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4975 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4976 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4977 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4978 {
4979 /* The spec says version is 1 for all VMDKs, but the vast
4980 * majority of streamOptimized VMDKs actually contain
4981 * version 3 - so go with the majority. Both are accepted. */
4982 pExtent->uVersion = 3;
4983 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4984 }
4985 }
4986 else
4987 {
4988 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4989 pExtent->enmType = VMDKETYPE_VMFS;
4990 else
4991 pExtent->enmType = VMDKETYPE_FLAT;
4992 }
4993
4994 pExtent->enmAccess = VMDKACCESS_READWRITE;
4995 pExtent->fUncleanShutdown = true;
4996 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4997 pExtent->uSectorOffset = 0;
4998 pExtent->fMetaDirty = true;
4999
5000 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5001 {
5002 /* fPreAlloc should never be false because VMware can't use such images. */
5003 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5004 RT_MAX( pExtent->uDescriptorSector
5005 + pExtent->cDescriptorSectors,
5006 1),
5007 true /* fPreAlloc */);
5008 if (RT_FAILURE(rc))
5009 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5010 }
5011
5012 cbOffset += cbExtent;
5013
5014 if (RT_SUCCESS(rc))
5015 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5016
5017 cbRemaining -= cbExtent;
5018 }
5019
5020 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5021 {
5022 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5023 * controller type is set in an image. */
5024 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5025 if (RT_FAILURE(rc))
5026 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5027 }
5028
5029 const char *pszDescType = NULL;
5030 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5031 {
5032 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5033 pszDescType = "vmfs";
5034 else
5035 pszDescType = (cExtents == 1)
5036 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5037 }
5038 else
5039 {
5040 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5041 pszDescType = "streamOptimized";
5042 else
5043 {
5044 pszDescType = (cExtents == 1)
5045 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5046 }
5047 }
5048 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5049 pszDescType);
5050 if (RT_FAILURE(rc))
5051 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5052 return rc;
5053}
5054
5055/**
5056 * Internal: Create a real stream optimized VMDK using only linear writes.
5057 */
5058static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5059{
5060 int rc = vmdkCreateExtents(pImage, 1);
5061 if (RT_FAILURE(rc))
5062 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5063
5064 /* Basename strings needed for constructing the extent names. */
5065 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5066 AssertPtr(pszBasenameSubstr);
5067 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5068
5069 /* No separate descriptor file. */
5070 pImage->pFile = NULL;
5071
5072 /* Set up all extents. */
5073 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5074
5075 /* Set up fullname/basename for extent description. Cannot use StrDup
5076 * for basename, as it is not guaranteed that the memory can be freed
5077 * with RTMemTmpFree, which must be used as in other code paths
5078 * StrDup is not usable. */
5079 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5080 if (!pszBasename)
5081 return VERR_NO_MEMORY;
5082 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5083 pExtent->pszBasename = pszBasename;
5084
5085 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5086 RTPathStripFilename(pszBasedirectory);
5087 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5088 RTStrFree(pszBasedirectory);
5089 if (!pszFullname)
5090 return VERR_NO_STR_MEMORY;
5091 pExtent->pszFullname = pszFullname;
5092
5093 /* Create file for extent. Make it write only, no reading allowed. */
5094 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5095 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5096 true /* fCreate */)
5097 & ~RTFILE_O_READ);
5098 if (RT_FAILURE(rc))
5099 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5100
5101 /* Place descriptor file information. */
5102 pExtent->uDescriptorSector = 1;
5103 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5104 /* The descriptor is part of the (only) extent. */
5105 pExtent->pDescData = pImage->pDescData;
5106 pImage->pDescData = NULL;
5107
5108 uint64_t cSectorsPerGDE, cSectorsPerGD;
5109 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5110 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5111 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5112 pExtent->cGTEntries = 512;
5113 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5114 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5115 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5116 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5117
5118 /* The spec says version is 1 for all VMDKs, but the vast
5119 * majority of streamOptimized VMDKs actually contain
5120 * version 3 - so go with the majority. Both are accepted. */
5121 pExtent->uVersion = 3;
5122 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5123 pExtent->fFooter = true;
5124
5125 pExtent->enmAccess = VMDKACCESS_READONLY;
5126 pExtent->fUncleanShutdown = false;
5127 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5128 pExtent->uSectorOffset = 0;
5129 pExtent->fMetaDirty = true;
5130
5131 /* Create grain directory, without preallocating it straight away. It will
5132 * be constructed on the fly when writing out the data and written when
5133 * closing the image. The end effect is that the full grain directory is
5134 * allocated, which is a requirement of the VMDK specs. */
5135 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5136 false /* fPreAlloc */);
5137 if (RT_FAILURE(rc))
5138 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5139
5140 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5141 "streamOptimized");
5142 if (RT_FAILURE(rc))
5143 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5144
5145 return rc;
5146}
5147
5148/**
5149 * Initializes the UUID fields in the DDB.
5150 *
5151 * @returns VBox status code.
5152 * @param pImage The VMDK image instance.
5153 */
5154static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5155{
5156 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5157 if (RT_SUCCESS(rc))
5158 {
5159 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5160 if (RT_SUCCESS(rc))
5161 {
5162 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5163 &pImage->ModificationUuid);
5164 if (RT_SUCCESS(rc))
5165 {
5166 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5167 &pImage->ParentModificationUuid);
5168 if (RT_FAILURE(rc))
5169 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5170 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5171 }
5172 else
5173 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5174 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5175 }
5176 else
5177 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5178 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5179 }
5180 else
5181 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5182 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5183
5184 return rc;
5185}
5186
5187/**
5188 * Internal: The actual code for creating any VMDK variant currently in
5189 * existence on hosted environments.
5190 */
5191static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5192 unsigned uImageFlags, const char *pszComment,
5193 PCVDGEOMETRY pPCHSGeometry,
5194 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5195 PVDINTERFACEPROGRESS pIfProgress,
5196 unsigned uPercentStart, unsigned uPercentSpan)
5197{
5198 pImage->uImageFlags = uImageFlags;
5199
5200 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5201 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5202 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5203
5204 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5205 &pImage->Descriptor);
5206 if (RT_SUCCESS(rc))
5207 {
5208 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5209 {
5210 /* Raw disk image (includes raw partition). */
5211 PVDISKRAW pRaw = NULL;
5212 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5213 if (RT_FAILURE(rc))
5214 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5215
5216 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5217 vmdkRawDescFree(pRaw);
5218 }
5219 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5220 {
5221 /* Stream optimized sparse image (monolithic). */
5222 rc = vmdkCreateStreamImage(pImage, cbSize);
5223 }
5224 else
5225 {
5226 /* Regular fixed or sparse image (monolithic or split). */
5227 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5228 pIfProgress, uPercentStart,
5229 uPercentSpan * 95 / 100);
5230 }
5231
5232 if (RT_SUCCESS(rc))
5233 {
5234 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5235
5236 pImage->cbSize = cbSize;
5237
5238 for (unsigned i = 0; i < pImage->cExtents; i++)
5239 {
5240 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5241
5242 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5243 pExtent->cNominalSectors, pExtent->enmType,
5244 pExtent->pszBasename, pExtent->uSectorOffset);
5245 if (RT_FAILURE(rc))
5246 {
5247 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5248 break;
5249 }
5250 }
5251
5252 if (RT_SUCCESS(rc))
5253 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5254
5255 if ( RT_SUCCESS(rc)
5256 && pPCHSGeometry->cCylinders != 0
5257 && pPCHSGeometry->cHeads != 0
5258 && pPCHSGeometry->cSectors != 0)
5259 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5260
5261 if ( RT_SUCCESS(rc)
5262 && pLCHSGeometry->cCylinders != 0
5263 && pLCHSGeometry->cHeads != 0
5264 && pLCHSGeometry->cSectors != 0)
5265 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5266
5267 pImage->LCHSGeometry = *pLCHSGeometry;
5268 pImage->PCHSGeometry = *pPCHSGeometry;
5269
5270 pImage->ImageUuid = *pUuid;
5271 RTUuidClear(&pImage->ParentUuid);
5272 RTUuidClear(&pImage->ModificationUuid);
5273 RTUuidClear(&pImage->ParentModificationUuid);
5274
5275 if (RT_SUCCESS(rc))
5276 rc = vmdkCreateImageDdbUuidsInit(pImage);
5277
5278 if (RT_SUCCESS(rc))
5279 rc = vmdkAllocateGrainTableCache(pImage);
5280
5281 if (RT_SUCCESS(rc))
5282 {
5283 rc = vmdkSetImageComment(pImage, pszComment);
5284 if (RT_FAILURE(rc))
5285 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5286 }
5287
5288 if (RT_SUCCESS(rc))
5289 {
5290 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5291
5292 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5293 {
5294 /* streamOptimized is a bit special, we cannot trigger the flush
5295 * until all data has been written. So we write the necessary
5296 * information explicitly. */
5297 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5298 - pImage->Descriptor.aLines[0], 512));
5299 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5300 if (RT_SUCCESS(rc))
5301 {
5302 rc = vmdkWriteDescriptor(pImage, NULL);
5303 if (RT_FAILURE(rc))
5304 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5305 }
5306 else
5307 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5308 }
5309 else
5310 rc = vmdkFlushImage(pImage, NULL);
5311 }
5312 }
5313 }
5314 else
5315 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5316
5317
5318 if (RT_SUCCESS(rc))
5319 {
5320 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5321 pImage->RegionList.fFlags = 0;
5322 pImage->RegionList.cRegions = 1;
5323
5324 pRegion->offRegion = 0; /* Disk start. */
5325 pRegion->cbBlock = 512;
5326 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5327 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5328 pRegion->cbData = 512;
5329 pRegion->cbMetadata = 0;
5330 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5331
5332 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5333 }
5334 else
5335 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5336 return rc;
5337}
5338
5339/**
5340 * Internal: Update image comment.
5341 */
5342static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5343{
5344 char *pszCommentEncoded = NULL;
5345 if (pszComment)
5346 {
5347 pszCommentEncoded = vmdkEncodeString(pszComment);
5348 if (!pszCommentEncoded)
5349 return VERR_NO_MEMORY;
5350 }
5351
5352 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5353 "ddb.comment", pszCommentEncoded);
5354 if (pszCommentEncoded)
5355 RTStrFree(pszCommentEncoded);
5356 if (RT_FAILURE(rc))
5357 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5358 return VINF_SUCCESS;
5359}
5360
5361/**
5362 * Internal. Clear the grain table buffer for real stream optimized writing.
5363 */
5364static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5365{
5366 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5367 for (uint32_t i = 0; i < cCacheLines; i++)
5368 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5369 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5370}
5371
5372/**
5373 * Internal. Flush the grain table buffer for real stream optimized writing.
5374 */
5375static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5376 uint32_t uGDEntry)
5377{
5378 int rc = VINF_SUCCESS;
5379 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5380
5381 /* VMware does not write out completely empty grain tables in the case
5382 * of streamOptimized images, which according to my interpretation of
5383 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5384 * handle it without problems do it the same way and save some bytes. */
5385 bool fAllZero = true;
5386 for (uint32_t i = 0; i < cCacheLines; i++)
5387 {
5388 /* Convert the grain table to little endian in place, as it will not
5389 * be used at all after this function has been called. */
5390 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5391 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5392 if (*pGTTmp)
5393 {
5394 fAllZero = false;
5395 break;
5396 }
5397 if (!fAllZero)
5398 break;
5399 }
5400 if (fAllZero)
5401 return VINF_SUCCESS;
5402
5403 uint64_t uFileOffset = pExtent->uAppendPosition;
5404 if (!uFileOffset)
5405 return VERR_INTERNAL_ERROR;
5406 /* Align to sector, as the previous write could have been any size. */
5407 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5408
5409 /* Grain table marker. */
5410 uint8_t aMarker[512];
5411 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5412 memset(pMarker, '\0', sizeof(aMarker));
5413 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5414 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5415 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5416 aMarker, sizeof(aMarker));
5417 AssertRC(rc);
5418 uFileOffset += 512;
5419
5420 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5421 return VERR_INTERNAL_ERROR;
5422
5423 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5424
5425 for (uint32_t i = 0; i < cCacheLines; i++)
5426 {
5427 /* Convert the grain table to little endian in place, as it will not
5428 * be used at all after this function has been called. */
5429 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5430 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5431 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5432
5433 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5434 &pImage->pGTCache->aGTCache[i].aGTData[0],
5435 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5436 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5437 if (RT_FAILURE(rc))
5438 break;
5439 }
5440 Assert(!(uFileOffset % 512));
5441 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5442 return rc;
5443}
5444
5445/**
5446 * Internal. Free all allocated space for representing an image, and optionally
5447 * delete the image from disk.
5448 */
5449static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5450{
5451 int rc = VINF_SUCCESS;
5452
5453 /* Freeing a never allocated image (e.g. because the open failed) is
5454 * not signalled as an error. After all nothing bad happens. */
5455 if (pImage)
5456 {
5457 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5458 {
5459 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5460 {
5461 /* Check if all extents are clean. */
5462 for (unsigned i = 0; i < pImage->cExtents; i++)
5463 {
5464 Assert(!pImage->pExtents[i].fUncleanShutdown);
5465 }
5466 }
5467 else
5468 {
5469 /* Mark all extents as clean. */
5470 for (unsigned i = 0; i < pImage->cExtents; i++)
5471 {
5472 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5473 && pImage->pExtents[i].fUncleanShutdown)
5474 {
5475 pImage->pExtents[i].fUncleanShutdown = false;
5476 pImage->pExtents[i].fMetaDirty = true;
5477 }
5478
5479 /* From now on it's not safe to append any more data. */
5480 pImage->pExtents[i].uAppendPosition = 0;
5481 }
5482 }
5483 }
5484
5485 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5486 {
5487 /* No need to write any pending data if the file will be deleted
5488 * or if the new file wasn't successfully created. */
5489 if ( !fDelete && pImage->pExtents
5490 && pImage->pExtents[0].cGTEntries
5491 && pImage->pExtents[0].uAppendPosition)
5492 {
5493 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5494 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5495 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5496 AssertRC(rc);
5497 vmdkStreamClearGT(pImage, pExtent);
5498 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5499 {
5500 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5501 AssertRC(rc);
5502 }
5503
5504 uint64_t uFileOffset = pExtent->uAppendPosition;
5505 if (!uFileOffset)
5506 return VERR_INTERNAL_ERROR;
5507 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5508
5509 /* From now on it's not safe to append any more data. */
5510 pExtent->uAppendPosition = 0;
5511
5512 /* Grain directory marker. */
5513 uint8_t aMarker[512];
5514 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5515 memset(pMarker, '\0', sizeof(aMarker));
5516 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5517 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5518 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5519 aMarker, sizeof(aMarker));
5520 AssertRC(rc);
5521 uFileOffset += 512;
5522
5523 /* Write grain directory in little endian style. The array will
5524 * not be used after this, so convert in place. */
5525 uint32_t *pGDTmp = pExtent->pGD;
5526 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5527 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5528 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5529 uFileOffset, pExtent->pGD,
5530 pExtent->cGDEntries * sizeof(uint32_t));
5531 AssertRC(rc);
5532
5533 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5534 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5535 uFileOffset = RT_ALIGN_64( uFileOffset
5536 + pExtent->cGDEntries * sizeof(uint32_t),
5537 512);
5538
5539 /* Footer marker. */
5540 memset(pMarker, '\0', sizeof(aMarker));
5541 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5542 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5543 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5544 uFileOffset, aMarker, sizeof(aMarker));
5545 AssertRC(rc);
5546
5547 uFileOffset += 512;
5548 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5549 AssertRC(rc);
5550
5551 uFileOffset += 512;
5552 /* End-of-stream marker. */
5553 memset(pMarker, '\0', sizeof(aMarker));
5554 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5555 uFileOffset, aMarker, sizeof(aMarker));
5556 AssertRC(rc);
5557 }
5558 }
5559 else if (!fDelete && fFlush)
5560 vmdkFlushImage(pImage, NULL);
5561
5562 if (pImage->pExtents != NULL)
5563 {
5564 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5565 {
5566 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5567 if (RT_SUCCESS(rc))
5568 rc = rc2; /* Propogate any error when closing the file. */
5569 }
5570 RTMemFree(pImage->pExtents);
5571 pImage->pExtents = NULL;
5572 }
5573 pImage->cExtents = 0;
5574 if (pImage->pFile != NULL)
5575 {
5576 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5577 if (RT_SUCCESS(rc))
5578 rc = rc2; /* Propogate any error when closing the file. */
5579 }
5580 int rc2 = vmdkFileCheckAllClose(pImage);
5581 if (RT_SUCCESS(rc))
5582 rc = rc2; /* Propogate any error when closing the file. */
5583
5584 if (pImage->pGTCache)
5585 {
5586 RTMemFree(pImage->pGTCache);
5587 pImage->pGTCache = NULL;
5588 }
5589 if (pImage->pDescData)
5590 {
5591 RTMemFree(pImage->pDescData);
5592 pImage->pDescData = NULL;
5593 }
5594 }
5595
5596 LogFlowFunc(("returns %Rrc\n", rc));
5597 return rc;
5598}
5599
5600/**
5601 * Internal. Flush image data (and metadata) to disk.
5602 */
5603static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5604{
5605 PVMDKEXTENT pExtent;
5606 int rc = VINF_SUCCESS;
5607
5608 /* Update descriptor if changed. */
5609 if (pImage->Descriptor.fDirty)
5610 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5611
5612 if (RT_SUCCESS(rc))
5613 {
5614 for (unsigned i = 0; i < pImage->cExtents; i++)
5615 {
5616 pExtent = &pImage->pExtents[i];
5617 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5618 {
5619 switch (pExtent->enmType)
5620 {
5621 case VMDKETYPE_HOSTED_SPARSE:
5622 if (!pExtent->fFooter)
5623 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5624 else
5625 {
5626 uint64_t uFileOffset = pExtent->uAppendPosition;
5627 /* Simply skip writing anything if the streamOptimized
5628 * image hasn't been just created. */
5629 if (!uFileOffset)
5630 break;
5631 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5632 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5633 uFileOffset, pIoCtx);
5634 }
5635 break;
5636 case VMDKETYPE_VMFS:
5637 case VMDKETYPE_FLAT:
5638 /* Nothing to do. */
5639 break;
5640 case VMDKETYPE_ZERO:
5641 default:
5642 AssertMsgFailed(("extent with type %d marked as dirty\n",
5643 pExtent->enmType));
5644 break;
5645 }
5646 }
5647
5648 if (RT_FAILURE(rc))
5649 break;
5650
5651 switch (pExtent->enmType)
5652 {
5653 case VMDKETYPE_HOSTED_SPARSE:
5654 case VMDKETYPE_VMFS:
5655 case VMDKETYPE_FLAT:
5656 /** @todo implement proper path absolute check. */
5657 if ( pExtent->pFile != NULL
5658 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5659 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5660 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5661 NULL, NULL);
5662 break;
5663 case VMDKETYPE_ZERO:
5664 /* No need to do anything for this extent. */
5665 break;
5666 default:
5667 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5668 break;
5669 }
5670 }
5671 }
5672
5673 return rc;
5674}
5675
5676/**
5677 * Internal. Find extent corresponding to the sector number in the disk.
5678 */
5679static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5680 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5681{
5682 PVMDKEXTENT pExtent = NULL;
5683 int rc = VINF_SUCCESS;
5684
5685 for (unsigned i = 0; i < pImage->cExtents; i++)
5686 {
5687 if (offSector < pImage->pExtents[i].cNominalSectors)
5688 {
5689 pExtent = &pImage->pExtents[i];
5690 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5691 break;
5692 }
5693 offSector -= pImage->pExtents[i].cNominalSectors;
5694 }
5695
5696 if (pExtent)
5697 *ppExtent = pExtent;
5698 else
5699 rc = VERR_IO_SECTOR_NOT_FOUND;
5700
5701 return rc;
5702}
5703
5704/**
5705 * Internal. Hash function for placing the grain table hash entries.
5706 */
5707static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5708 unsigned uExtent)
5709{
5710 /** @todo this hash function is quite simple, maybe use a better one which
5711 * scrambles the bits better. */
5712 return (uSector + uExtent) % pCache->cEntries;
5713}
5714
5715/**
5716 * Internal. Get sector number in the extent file from the relative sector
5717 * number in the extent.
5718 */
5719static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5720 PVMDKEXTENT pExtent, uint64_t uSector,
5721 uint64_t *puExtentSector)
5722{
5723 PVMDKGTCACHE pCache = pImage->pGTCache;
5724 uint64_t uGDIndex, uGTSector, uGTBlock;
5725 uint32_t uGTHash, uGTBlockIndex;
5726 PVMDKGTCACHEENTRY pGTCacheEntry;
5727 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5728 int rc;
5729
5730 /* For newly created and readonly/sequentially opened streamOptimized
5731 * images this must be a no-op, as the grain directory is not there. */
5732 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5733 && pExtent->uAppendPosition)
5734 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5735 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5736 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5737 {
5738 *puExtentSector = 0;
5739 return VINF_SUCCESS;
5740 }
5741
5742 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5743 if (uGDIndex >= pExtent->cGDEntries)
5744 return VERR_OUT_OF_RANGE;
5745 uGTSector = pExtent->pGD[uGDIndex];
5746 if (!uGTSector)
5747 {
5748 /* There is no grain table referenced by this grain directory
5749 * entry. So there is absolutely no data in this area. */
5750 *puExtentSector = 0;
5751 return VINF_SUCCESS;
5752 }
5753
5754 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5755 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5756 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5757 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5758 || pGTCacheEntry->uGTBlock != uGTBlock)
5759 {
5760 /* Cache miss, fetch data from disk. */
5761 PVDMETAXFER pMetaXfer;
5762 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5763 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5764 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5765 if (RT_FAILURE(rc))
5766 return rc;
5767 /* We can release the metadata transfer immediately. */
5768 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5769 pGTCacheEntry->uExtent = pExtent->uExtent;
5770 pGTCacheEntry->uGTBlock = uGTBlock;
5771 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5772 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5773 }
5774 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5775 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5776 if (uGrainSector)
5777 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5778 else
5779 *puExtentSector = 0;
5780 return VINF_SUCCESS;
5781}
5782
5783/**
5784 * Internal. Writes the grain and also if necessary the grain tables.
5785 * Uses the grain table cache as a true grain table.
5786 */
5787static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5788 uint64_t uSector, PVDIOCTX pIoCtx,
5789 uint64_t cbWrite)
5790{
5791 uint32_t uGrain;
5792 uint32_t uGDEntry, uLastGDEntry;
5793 uint32_t cbGrain = 0;
5794 uint32_t uCacheLine, uCacheEntry;
5795 const void *pData;
5796 int rc;
5797
5798 /* Very strict requirements: always write at least one full grain, with
5799 * proper alignment. Everything else would require reading of already
5800 * written data, which we don't support for obvious reasons. The only
5801 * exception is the last grain, and only if the image size specifies
5802 * that only some portion holds data. In any case the write must be
5803 * within the image limits, no "overshoot" allowed. */
5804 if ( cbWrite == 0
5805 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5806 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5807 || uSector % pExtent->cSectorsPerGrain
5808 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5809 return VERR_INVALID_PARAMETER;
5810
5811 /* Clip write range to at most the rest of the grain. */
5812 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5813
5814 /* Do not allow to go back. */
5815 uGrain = uSector / pExtent->cSectorsPerGrain;
5816 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5817 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5818 uGDEntry = uGrain / pExtent->cGTEntries;
5819 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5820 if (uGrain < pExtent->uLastGrainAccess)
5821 return VERR_VD_VMDK_INVALID_WRITE;
5822
5823 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5824 * to allocate something, we also need to detect the situation ourself. */
5825 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5826 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5827 return VINF_SUCCESS;
5828
5829 if (uGDEntry != uLastGDEntry)
5830 {
5831 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5832 if (RT_FAILURE(rc))
5833 return rc;
5834 vmdkStreamClearGT(pImage, pExtent);
5835 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5836 {
5837 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5838 if (RT_FAILURE(rc))
5839 return rc;
5840 }
5841 }
5842
5843 uint64_t uFileOffset;
5844 uFileOffset = pExtent->uAppendPosition;
5845 if (!uFileOffset)
5846 return VERR_INTERNAL_ERROR;
5847 /* Align to sector, as the previous write could have been any size. */
5848 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5849
5850 /* Paranoia check: extent type, grain table buffer presence and
5851 * grain table buffer space. Also grain table entry must be clear. */
5852 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5853 || !pImage->pGTCache
5854 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5855 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5856 return VERR_INTERNAL_ERROR;
5857
5858 /* Update grain table entry. */
5859 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5860
5861 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5862 {
5863 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5864 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5865 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5866 pData = pExtent->pvGrain;
5867 }
5868 else
5869 {
5870 RTSGSEG Segment;
5871 unsigned cSegments = 1;
5872 size_t cbSeg = 0;
5873
5874 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5875 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5876 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5877 pData = Segment.pvSeg;
5878 }
5879 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5880 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5881 uSector, &cbGrain);
5882 if (RT_FAILURE(rc))
5883 {
5884 pExtent->uGrainSectorAbs = 0;
5885 AssertRC(rc);
5886 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5887 }
5888 pExtent->uLastGrainAccess = uGrain;
5889 pExtent->uAppendPosition += cbGrain;
5890
5891 return rc;
5892}
5893
5894/**
5895 * Internal: Updates the grain table during grain allocation.
5896 */
5897static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5898 PVMDKGRAINALLOCASYNC pGrainAlloc)
5899{
5900 int rc = VINF_SUCCESS;
5901 PVMDKGTCACHE pCache = pImage->pGTCache;
5902 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5903 uint32_t uGTHash, uGTBlockIndex;
5904 uint64_t uGTSector, uRGTSector, uGTBlock;
5905 uint64_t uSector = pGrainAlloc->uSector;
5906 PVMDKGTCACHEENTRY pGTCacheEntry;
5907
5908 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5909 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5910
5911 uGTSector = pGrainAlloc->uGTSector;
5912 uRGTSector = pGrainAlloc->uRGTSector;
5913 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5914
5915 /* Update the grain table (and the cache). */
5916 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5917 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5918 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5919 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5920 || pGTCacheEntry->uGTBlock != uGTBlock)
5921 {
5922 /* Cache miss, fetch data from disk. */
5923 LogFlow(("Cache miss, fetch data from disk\n"));
5924 PVDMETAXFER pMetaXfer = NULL;
5925 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5926 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5927 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5928 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5929 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5930 {
5931 pGrainAlloc->cIoXfersPending++;
5932 pGrainAlloc->fGTUpdateNeeded = true;
5933 /* Leave early, we will be called again after the read completed. */
5934 LogFlowFunc(("Metadata read in progress, leaving\n"));
5935 return rc;
5936 }
5937 else if (RT_FAILURE(rc))
5938 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5939 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5940 pGTCacheEntry->uExtent = pExtent->uExtent;
5941 pGTCacheEntry->uGTBlock = uGTBlock;
5942 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5943 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5944 }
5945 else
5946 {
5947 /* Cache hit. Convert grain table block back to disk format, otherwise
5948 * the code below will write garbage for all but the updated entry. */
5949 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5950 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5951 }
5952 pGrainAlloc->fGTUpdateNeeded = false;
5953 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5954 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5955 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5956 /* Update grain table on disk. */
5957 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5958 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5959 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5960 vmdkAllocGrainComplete, pGrainAlloc);
5961 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5962 pGrainAlloc->cIoXfersPending++;
5963 else if (RT_FAILURE(rc))
5964 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5965 if (pExtent->pRGD)
5966 {
5967 /* Update backup grain table on disk. */
5968 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5969 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5970 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5971 vmdkAllocGrainComplete, pGrainAlloc);
5972 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5973 pGrainAlloc->cIoXfersPending++;
5974 else if (RT_FAILURE(rc))
5975 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5976 }
5977
5978 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5979 return rc;
5980}
5981
5982/**
5983 * Internal - complete the grain allocation by updating disk grain table if required.
5984 */
5985static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5986{
5987 RT_NOREF1(rcReq);
5988 int rc = VINF_SUCCESS;
5989 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5990 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5991
5992 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5993 pBackendData, pIoCtx, pvUser, rcReq));
5994
5995 pGrainAlloc->cIoXfersPending--;
5996 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5997 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
5998
5999 if (!pGrainAlloc->cIoXfersPending)
6000 {
6001 /* Grain allocation completed. */
6002 RTMemFree(pGrainAlloc);
6003 }
6004
6005 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6006 return rc;
6007}
6008
6009/**
6010 * Internal. Allocates a new grain table (if necessary).
6011 */
6012static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6013 uint64_t uSector, uint64_t cbWrite)
6014{
6015 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6016 uint64_t uGDIndex, uGTSector, uRGTSector;
6017 uint64_t uFileOffset;
6018 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6019 int rc;
6020
6021 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6022 pCache, pExtent, pIoCtx, uSector, cbWrite));
6023
6024 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6025 if (!pGrainAlloc)
6026 return VERR_NO_MEMORY;
6027
6028 pGrainAlloc->pExtent = pExtent;
6029 pGrainAlloc->uSector = uSector;
6030
6031 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6032 if (uGDIndex >= pExtent->cGDEntries)
6033 {
6034 RTMemFree(pGrainAlloc);
6035 return VERR_OUT_OF_RANGE;
6036 }
6037 uGTSector = pExtent->pGD[uGDIndex];
6038 if (pExtent->pRGD)
6039 uRGTSector = pExtent->pRGD[uGDIndex];
6040 else
6041 uRGTSector = 0; /**< avoid compiler warning */
6042 if (!uGTSector)
6043 {
6044 LogFlow(("Allocating new grain table\n"));
6045
6046 /* There is no grain table referenced by this grain directory
6047 * entry. So there is absolutely no data in this area. Allocate
6048 * a new grain table and put the reference to it in the GDs. */
6049 uFileOffset = pExtent->uAppendPosition;
6050 if (!uFileOffset)
6051 {
6052 RTMemFree(pGrainAlloc);
6053 return VERR_INTERNAL_ERROR;
6054 }
6055 Assert(!(uFileOffset % 512));
6056
6057 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6058 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6059
6060 /* Normally the grain table is preallocated for hosted sparse extents
6061 * that support more than 32 bit sector numbers. So this shouldn't
6062 * ever happen on a valid extent. */
6063 if (uGTSector > UINT32_MAX)
6064 {
6065 RTMemFree(pGrainAlloc);
6066 return VERR_VD_VMDK_INVALID_HEADER;
6067 }
6068
6069 /* Write grain table by writing the required number of grain table
6070 * cache chunks. Allocate memory dynamically here or we flood the
6071 * metadata cache with very small entries. */
6072 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6073 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6074
6075 if (!paGTDataTmp)
6076 {
6077 RTMemFree(pGrainAlloc);
6078 return VERR_NO_MEMORY;
6079 }
6080
6081 memset(paGTDataTmp, '\0', cbGTDataTmp);
6082 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6083 VMDK_SECTOR2BYTE(uGTSector),
6084 paGTDataTmp, cbGTDataTmp, pIoCtx,
6085 vmdkAllocGrainComplete, pGrainAlloc);
6086 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6087 pGrainAlloc->cIoXfersPending++;
6088 else if (RT_FAILURE(rc))
6089 {
6090 RTMemTmpFree(paGTDataTmp);
6091 RTMemFree(pGrainAlloc);
6092 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6093 }
6094 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6095 + cbGTDataTmp, 512);
6096
6097 if (pExtent->pRGD)
6098 {
6099 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6100 uFileOffset = pExtent->uAppendPosition;
6101 if (!uFileOffset)
6102 return VERR_INTERNAL_ERROR;
6103 Assert(!(uFileOffset % 512));
6104 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6105
6106 /* Normally the redundant grain table is preallocated for hosted
6107 * sparse extents that support more than 32 bit sector numbers. So
6108 * this shouldn't ever happen on a valid extent. */
6109 if (uRGTSector > UINT32_MAX)
6110 {
6111 RTMemTmpFree(paGTDataTmp);
6112 return VERR_VD_VMDK_INVALID_HEADER;
6113 }
6114
6115 /* Write grain table by writing the required number of grain table
6116 * cache chunks. Allocate memory dynamically here or we flood the
6117 * metadata cache with very small entries. */
6118 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6119 VMDK_SECTOR2BYTE(uRGTSector),
6120 paGTDataTmp, cbGTDataTmp, pIoCtx,
6121 vmdkAllocGrainComplete, pGrainAlloc);
6122 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6123 pGrainAlloc->cIoXfersPending++;
6124 else if (RT_FAILURE(rc))
6125 {
6126 RTMemTmpFree(paGTDataTmp);
6127 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6128 }
6129
6130 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6131 }
6132
6133 RTMemTmpFree(paGTDataTmp);
6134
6135 /* Update the grain directory on disk (doing it before writing the
6136 * grain table will result in a garbled extent if the operation is
6137 * aborted for some reason. Otherwise the worst that can happen is
6138 * some unused sectors in the extent. */
6139 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6140 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6141 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6142 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6143 vmdkAllocGrainComplete, pGrainAlloc);
6144 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6145 pGrainAlloc->cIoXfersPending++;
6146 else if (RT_FAILURE(rc))
6147 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6148 if (pExtent->pRGD)
6149 {
6150 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6151 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6152 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6153 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6154 vmdkAllocGrainComplete, pGrainAlloc);
6155 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6156 pGrainAlloc->cIoXfersPending++;
6157 else if (RT_FAILURE(rc))
6158 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6159 }
6160
6161 /* As the final step update the in-memory copy of the GDs. */
6162 pExtent->pGD[uGDIndex] = uGTSector;
6163 if (pExtent->pRGD)
6164 pExtent->pRGD[uGDIndex] = uRGTSector;
6165 }
6166
6167 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6168 pGrainAlloc->uGTSector = uGTSector;
6169 pGrainAlloc->uRGTSector = uRGTSector;
6170
6171 uFileOffset = pExtent->uAppendPosition;
6172 if (!uFileOffset)
6173 return VERR_INTERNAL_ERROR;
6174 Assert(!(uFileOffset % 512));
6175
6176 pGrainAlloc->uGrainOffset = uFileOffset;
6177
6178 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6179 {
6180 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6181 ("Accesses to stream optimized images must be synchronous\n"),
6182 VERR_INVALID_STATE);
6183
6184 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6185 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6186
6187 /* Invalidate cache, just in case some code incorrectly allows mixing
6188 * of reads and writes. Normally shouldn't be needed. */
6189 pExtent->uGrainSectorAbs = 0;
6190
6191 /* Write compressed data block and the markers. */
6192 uint32_t cbGrain = 0;
6193 size_t cbSeg = 0;
6194 RTSGSEG Segment;
6195 unsigned cSegments = 1;
6196
6197 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6198 &cSegments, cbWrite);
6199 Assert(cbSeg == cbWrite);
6200
6201 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6202 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6203 if (RT_FAILURE(rc))
6204 {
6205 AssertRC(rc);
6206 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6207 }
6208 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6209 pExtent->uAppendPosition += cbGrain;
6210 }
6211 else
6212 {
6213 /* Write the data. Always a full grain, or we're in big trouble. */
6214 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6215 uFileOffset, pIoCtx, cbWrite,
6216 vmdkAllocGrainComplete, pGrainAlloc);
6217 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6218 pGrainAlloc->cIoXfersPending++;
6219 else if (RT_FAILURE(rc))
6220 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6221
6222 pExtent->uAppendPosition += cbWrite;
6223 }
6224
6225 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6226
6227 if (!pGrainAlloc->cIoXfersPending)
6228 {
6229 /* Grain allocation completed. */
6230 RTMemFree(pGrainAlloc);
6231 }
6232
6233 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6234
6235 return rc;
6236}
6237
6238/**
6239 * Internal. Reads the contents by sequentially going over the compressed
6240 * grains (hoping that they are in sequence).
6241 */
6242static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6243 uint64_t uSector, PVDIOCTX pIoCtx,
6244 uint64_t cbRead)
6245{
6246 int rc;
6247
6248 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6249 pImage, pExtent, uSector, pIoCtx, cbRead));
6250
6251 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6252 ("Async I/O not supported for sequential stream optimized images\n"),
6253 VERR_INVALID_STATE);
6254
6255 /* Do not allow to go back. */
6256 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6257 if (uGrain < pExtent->uLastGrainAccess)
6258 return VERR_VD_VMDK_INVALID_STATE;
6259 pExtent->uLastGrainAccess = uGrain;
6260
6261 /* After a previous error do not attempt to recover, as it would need
6262 * seeking (in the general case backwards which is forbidden). */
6263 if (!pExtent->uGrainSectorAbs)
6264 return VERR_VD_VMDK_INVALID_STATE;
6265
6266 /* Check if we need to read something from the image or if what we have
6267 * in the buffer is good to fulfill the request. */
6268 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6269 {
6270 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6271 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6272
6273 /* Get the marker from the next data block - and skip everything which
6274 * is not a compressed grain. If it's a compressed grain which is for
6275 * the requested sector (or after), read it. */
6276 VMDKMARKER Marker;
6277 do
6278 {
6279 RT_ZERO(Marker);
6280 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6281 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6282 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6283 if (RT_FAILURE(rc))
6284 return rc;
6285 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6286 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6287
6288 if (Marker.cbSize == 0)
6289 {
6290 /* A marker for something else than a compressed grain. */
6291 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6292 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6293 + RT_UOFFSETOF(VMDKMARKER, uType),
6294 &Marker.uType, sizeof(Marker.uType));
6295 if (RT_FAILURE(rc))
6296 return rc;
6297 Marker.uType = RT_LE2H_U32(Marker.uType);
6298 switch (Marker.uType)
6299 {
6300 case VMDK_MARKER_EOS:
6301 uGrainSectorAbs++;
6302 /* Read (or mostly skip) to the end of file. Uses the
6303 * Marker (LBA sector) as it is unused anyway. This
6304 * makes sure that really everything is read in the
6305 * success case. If this read fails it means the image
6306 * is truncated, but this is harmless so ignore. */
6307 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6308 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6309 + 511,
6310 &Marker.uSector, 1);
6311 break;
6312 case VMDK_MARKER_GT:
6313 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6314 break;
6315 case VMDK_MARKER_GD:
6316 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6317 break;
6318 case VMDK_MARKER_FOOTER:
6319 uGrainSectorAbs += 2;
6320 break;
6321 case VMDK_MARKER_UNSPECIFIED:
6322 /* Skip over the contents of the unspecified marker
6323 * type 4 which exists in some vSphere created files. */
6324 /** @todo figure out what the payload means. */
6325 uGrainSectorAbs += 1;
6326 break;
6327 default:
6328 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6329 pExtent->uGrainSectorAbs = 0;
6330 return VERR_VD_VMDK_INVALID_STATE;
6331 }
6332 pExtent->cbGrainStreamRead = 0;
6333 }
6334 else
6335 {
6336 /* A compressed grain marker. If it is at/after what we're
6337 * interested in read and decompress data. */
6338 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6339 {
6340 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6341 continue;
6342 }
6343 uint64_t uLBA = 0;
6344 uint32_t cbGrainStreamRead = 0;
6345 rc = vmdkFileInflateSync(pImage, pExtent,
6346 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6347 pExtent->pvGrain,
6348 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6349 &Marker, &uLBA, &cbGrainStreamRead);
6350 if (RT_FAILURE(rc))
6351 {
6352 pExtent->uGrainSectorAbs = 0;
6353 return rc;
6354 }
6355 if ( pExtent->uGrain
6356 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6357 {
6358 pExtent->uGrainSectorAbs = 0;
6359 return VERR_VD_VMDK_INVALID_STATE;
6360 }
6361 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6362 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6363 break;
6364 }
6365 } while (Marker.uType != VMDK_MARKER_EOS);
6366
6367 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6368
6369 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6370 {
6371 pExtent->uGrain = UINT32_MAX;
6372 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6373 * the next read would try to get more data, and we're at EOF. */
6374 pExtent->cbGrainStreamRead = 1;
6375 }
6376 }
6377
6378 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6379 {
6380 /* The next data block we have is not for this area, so just return
6381 * that there is no data. */
6382 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6383 return VERR_VD_BLOCK_FREE;
6384 }
6385
6386 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6387 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6388 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6389 cbRead);
6390 LogFlowFunc(("returns VINF_SUCCESS\n"));
6391 return VINF_SUCCESS;
6392}
6393
6394/**
6395 * Replaces a fragment of a string with the specified string.
6396 *
6397 * @returns Pointer to the allocated UTF-8 string.
6398 * @param pszWhere UTF-8 string to search in.
6399 * @param pszWhat UTF-8 string to search for.
6400 * @param pszByWhat UTF-8 string to replace the found string with.
6401 *
6402 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6403 * for updating the base name in the descriptor, the second is for
6404 * generating new filenames for extents. This code borked when
6405 * RTPathAbs started correcting the driver letter case on windows,
6406 * when strstr failed because the pExtent->pszFullname was not
6407 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6408 * this by apply RTPathAbs to the places it wasn't applied.
6409 *
6410 * However, this highlights some undocumented ASSUMPTIONS as well as
6411 * terrible short commings of the approach.
6412 *
6413 * Given the right filename, it may also screw up the descriptor. Take
6414 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6415 * we'll be asked to replace "Test0" with something, no problem. No,
6416 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6417 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6418 * its bum. The descriptor string must be parsed and reconstructed,
6419 * the lazy strstr approach doesn't cut it.
6420 *
6421 * I'm also curious as to what would be the correct escaping of '"' in
6422 * the file name and how that is supposed to be handled, because it
6423 * needs to be or such names must be rejected in several places (maybe
6424 * they are, I didn't check).
6425 *
6426 * When this function is used to replace the start of a path, I think
6427 * the assumption from the prep/setup code is that we kind of knows
6428 * what we're working on (I could be wrong). However, using strstr
6429 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6430 * Especially on unix systems, weird stuff could happen if someone
6431 * unwittingly tinkers with the prep/setup code. What should really be
6432 * done here is using a new RTPathStartEx function that (via flags)
6433 * allows matching partial final component and returns the length of
6434 * what it matched up (in case it skipped slashes and '.' components).
6435 *
6436 */
6437static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6438 const char *pszByWhat)
6439{
6440 AssertPtr(pszWhere);
6441 AssertPtr(pszWhat);
6442 AssertPtr(pszByWhat);
6443 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6444 if (!pszFoundStr)
6445 {
6446 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6447 return NULL;
6448 }
6449 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6450 char *pszNewStr = RTStrAlloc(cbFinal);
6451 if (pszNewStr)
6452 {
6453 char *pszTmp = pszNewStr;
6454 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6455 pszTmp += pszFoundStr - pszWhere;
6456 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6457 pszTmp += strlen(pszByWhat);
6458 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6459 }
6460 return pszNewStr;
6461}
6462
6463
6464/** @copydoc VDIMAGEBACKEND::pfnProbe */
6465static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6466 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6467{
6468 RT_NOREF(enmDesiredType);
6469 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6470 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6471
6472 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6473
6474 int rc = VINF_SUCCESS;
6475 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6476 if (RT_LIKELY(pImage))
6477 {
6478 pImage->pszFilename = pszFilename;
6479 pImage->pFile = NULL;
6480 pImage->pExtents = NULL;
6481 pImage->pFiles = NULL;
6482 pImage->pGTCache = NULL;
6483 pImage->pDescData = NULL;
6484 pImage->pVDIfsDisk = pVDIfsDisk;
6485 pImage->pVDIfsImage = pVDIfsImage;
6486 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6487 * much as possible in vmdkOpenImage. */
6488 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6489 vmdkFreeImage(pImage, false, false /*fFlush*/);
6490 RTMemFree(pImage);
6491
6492 if (RT_SUCCESS(rc))
6493 *penmType = VDTYPE_HDD;
6494 }
6495 else
6496 rc = VERR_NO_MEMORY;
6497
6498 LogFlowFunc(("returns %Rrc\n", rc));
6499 return rc;
6500}
6501
6502/** @copydoc VDIMAGEBACKEND::pfnOpen */
6503static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6504 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6505 VDTYPE enmType, void **ppBackendData)
6506{
6507 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6508
6509 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6510 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6511 int rc;
6512
6513 /* Check open flags. All valid flags are supported. */
6514 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6515 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6516
6517 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6518 if (RT_LIKELY(pImage))
6519 {
6520 pImage->pszFilename = pszFilename;
6521 pImage->pFile = NULL;
6522 pImage->pExtents = NULL;
6523 pImage->pFiles = NULL;
6524 pImage->pGTCache = NULL;
6525 pImage->pDescData = NULL;
6526 pImage->pVDIfsDisk = pVDIfsDisk;
6527 pImage->pVDIfsImage = pVDIfsImage;
6528
6529 rc = vmdkOpenImage(pImage, uOpenFlags);
6530 if (RT_SUCCESS(rc))
6531 *ppBackendData = pImage;
6532 else
6533 RTMemFree(pImage);
6534 }
6535 else
6536 rc = VERR_NO_MEMORY;
6537
6538 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6539 return rc;
6540}
6541
6542/** @copydoc VDIMAGEBACKEND::pfnCreate */
6543static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6544 unsigned uImageFlags, const char *pszComment,
6545 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6546 PCRTUUID pUuid, unsigned uOpenFlags,
6547 unsigned uPercentStart, unsigned uPercentSpan,
6548 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6549 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6550 void **ppBackendData)
6551{
6552 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6553 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6554 int rc;
6555
6556 /* Check the VD container type and image flags. */
6557 if ( enmType != VDTYPE_HDD
6558 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6559 return VERR_VD_INVALID_TYPE;
6560
6561 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6562 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6563 && ( !cbSize
6564 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6565 return VERR_VD_INVALID_SIZE;
6566
6567 /* Check image flags for invalid combinations. */
6568 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6569 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6570 return VERR_INVALID_PARAMETER;
6571
6572 /* Check open flags. All valid flags are supported. */
6573 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6574 AssertReturn( VALID_PTR(pszFilename)
6575 && *pszFilename
6576 && VALID_PTR(pPCHSGeometry)
6577 && VALID_PTR(pLCHSGeometry)
6578 && !( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6579 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6580 VERR_INVALID_PARAMETER);
6581
6582 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6583 if (RT_LIKELY(pImage))
6584 {
6585 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6586
6587 pImage->pszFilename = pszFilename;
6588 pImage->pFile = NULL;
6589 pImage->pExtents = NULL;
6590 pImage->pFiles = NULL;
6591 pImage->pGTCache = NULL;
6592 pImage->pDescData = NULL;
6593 pImage->pVDIfsDisk = pVDIfsDisk;
6594 pImage->pVDIfsImage = pVDIfsImage;
6595 /* Descriptors for split images can be pretty large, especially if the
6596 * filename is long. So prepare for the worst, and allocate quite some
6597 * memory for the descriptor in this case. */
6598 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6599 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6600 else
6601 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6602 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6603 if (RT_LIKELY(pImage->pDescData))
6604 {
6605 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6606 pPCHSGeometry, pLCHSGeometry, pUuid,
6607 pIfProgress, uPercentStart, uPercentSpan);
6608 if (RT_SUCCESS(rc))
6609 {
6610 /* So far the image is opened in read/write mode. Make sure the
6611 * image is opened in read-only mode if the caller requested that. */
6612 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6613 {
6614 vmdkFreeImage(pImage, false, true /*fFlush*/);
6615 rc = vmdkOpenImage(pImage, uOpenFlags);
6616 }
6617
6618 if (RT_SUCCESS(rc))
6619 *ppBackendData = pImage;
6620 }
6621
6622 if (RT_FAILURE(rc))
6623 RTMemFree(pImage->pDescData);
6624 }
6625 else
6626 rc = VERR_NO_MEMORY;
6627
6628 if (RT_FAILURE(rc))
6629 RTMemFree(pImage);
6630 }
6631 else
6632 rc = VERR_NO_MEMORY;
6633
6634 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6635 return rc;
6636}
6637
6638/**
6639 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6640 * memory.
6641 *
6642 * @returns VBox status code.
6643 * @param pImage VMDK image instance.
6644 * @param pRenameState The state to initialize.
6645 * @param pszFilename The new filename.
6646 */
6647static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6648{
6649 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6650
6651 int rc = VINF_SUCCESS;
6652
6653 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6654
6655 /*
6656 * Allocate an array to store both old and new names of renamed files
6657 * in case we have to roll back the changes. Arrays are initialized
6658 * with zeros. We actually save stuff when and if we change it.
6659 */
6660 pRenameState->cExtents = pImage->cExtents;
6661 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6662 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6663 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6664 if ( pRenameState->apszOldName
6665 && pRenameState->apszNewName
6666 && pRenameState->apszNewLines)
6667 {
6668 /* Save the descriptor size and position. */
6669 if (pImage->pDescData)
6670 {
6671 /* Separate descriptor file. */
6672 pRenameState->fEmbeddedDesc = false;
6673 }
6674 else
6675 {
6676 /* Embedded descriptor file. */
6677 pRenameState->ExtentCopy = pImage->pExtents[0];
6678 pRenameState->fEmbeddedDesc = true;
6679 }
6680
6681 /* Save the descriptor content. */
6682 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6683 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6684 {
6685 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6686 if (!pRenameState->DescriptorCopy.aLines[i])
6687 {
6688 rc = VERR_NO_MEMORY;
6689 break;
6690 }
6691 }
6692
6693 if (RT_SUCCESS(rc))
6694 {
6695 /* Prepare both old and new base names used for string replacement. */
6696 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6697 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6698 RTPathStripSuffix(pRenameState->pszNewBaseName);
6699
6700 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6701 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6702 RTPathStripSuffix(pRenameState->pszOldBaseName);
6703
6704 /* Prepare both old and new full names used for string replacement.
6705 Note! Must abspath the stuff here, so the strstr weirdness later in
6706 the renaming process get a match against abspath'ed extent paths.
6707 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6708 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6709 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6710 RTPathStripSuffix(pRenameState->pszNewFullName);
6711
6712 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6713 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6714 RTPathStripSuffix(pRenameState->pszOldFullName);
6715
6716 /* Save the old name for easy access to the old descriptor file. */
6717 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6718 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6719
6720 /* Save old image name. */
6721 pRenameState->pszOldImageName = pImage->pszFilename;
6722 }
6723 }
6724 else
6725 rc = VERR_NO_TMP_MEMORY;
6726
6727 return rc;
6728}
6729
6730/**
6731 * Destroys the given rename state, freeing all allocated memory.
6732 *
6733 * @returns nothing.
6734 * @param pRenameState The rename state to destroy.
6735 */
6736static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6737{
6738 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6739 if (pRenameState->DescriptorCopy.aLines[i])
6740 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6741 if (pRenameState->apszOldName)
6742 {
6743 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6744 if (pRenameState->apszOldName[i])
6745 RTStrFree(pRenameState->apszOldName[i]);
6746 RTMemTmpFree(pRenameState->apszOldName);
6747 }
6748 if (pRenameState->apszNewName)
6749 {
6750 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6751 if (pRenameState->apszNewName[i])
6752 RTStrFree(pRenameState->apszNewName[i]);
6753 RTMemTmpFree(pRenameState->apszNewName);
6754 }
6755 if (pRenameState->apszNewLines)
6756 {
6757 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6758 if (pRenameState->apszNewLines[i])
6759 RTStrFree(pRenameState->apszNewLines[i]);
6760 RTMemTmpFree(pRenameState->apszNewLines);
6761 }
6762 if (pRenameState->pszOldDescName)
6763 RTStrFree(pRenameState->pszOldDescName);
6764 if (pRenameState->pszOldBaseName)
6765 RTStrFree(pRenameState->pszOldBaseName);
6766 if (pRenameState->pszNewBaseName)
6767 RTStrFree(pRenameState->pszNewBaseName);
6768 if (pRenameState->pszOldFullName)
6769 RTStrFree(pRenameState->pszOldFullName);
6770 if (pRenameState->pszNewFullName)
6771 RTStrFree(pRenameState->pszNewFullName);
6772}
6773
6774/**
6775 * Rolls back the rename operation to the original state.
6776 *
6777 * @returns VBox status code.
6778 * @param pImage VMDK image instance.
6779 * @param pRenameState The rename state.
6780 */
6781static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6782{
6783 int rc = VINF_SUCCESS;
6784
6785 if (!pRenameState->fImageFreed)
6786 {
6787 /*
6788 * Some extents may have been closed, close the rest. We will
6789 * re-open the whole thing later.
6790 */
6791 vmdkFreeImage(pImage, false, true /*fFlush*/);
6792 }
6793
6794 /* Rename files back. */
6795 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6796 {
6797 if (pRenameState->apszOldName[i])
6798 {
6799 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6800 AssertRC(rc);
6801 }
6802 }
6803 /* Restore the old descriptor. */
6804 PVMDKFILE pFile;
6805 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6806 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6807 false /* fCreate */));
6808 AssertRC(rc);
6809 if (pRenameState->fEmbeddedDesc)
6810 {
6811 pRenameState->ExtentCopy.pFile = pFile;
6812 pImage->pExtents = &pRenameState->ExtentCopy;
6813 }
6814 else
6815 {
6816 /* Shouldn't be null for separate descriptor.
6817 * There will be no access to the actual content.
6818 */
6819 pImage->pDescData = pRenameState->pszOldDescName;
6820 pImage->pFile = pFile;
6821 }
6822 pImage->Descriptor = pRenameState->DescriptorCopy;
6823 vmdkWriteDescriptor(pImage, NULL);
6824 vmdkFileClose(pImage, &pFile, false);
6825 /* Get rid of the stuff we implanted. */
6826 pImage->pExtents = NULL;
6827 pImage->pFile = NULL;
6828 pImage->pDescData = NULL;
6829 /* Re-open the image back. */
6830 pImage->pszFilename = pRenameState->pszOldImageName;
6831 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6832
6833 return rc;
6834}
6835
6836/**
6837 * Rename worker doing the real work.
6838 *
6839 * @returns VBox status code.
6840 * @param pImage VMDK image instance.
6841 * @param pRenameState The rename state.
6842 * @param pszFilename The new filename.
6843 */
6844static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6845{
6846 int rc = VINF_SUCCESS;
6847 unsigned i, line;
6848
6849 /* Update the descriptor with modified extent names. */
6850 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6851 i < pRenameState->cExtents;
6852 i++, line = pImage->Descriptor.aNextLines[line])
6853 {
6854 /* Update the descriptor. */
6855 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6856 pRenameState->pszOldBaseName,
6857 pRenameState->pszNewBaseName);
6858 if (!pRenameState->apszNewLines[i])
6859 {
6860 rc = VERR_NO_MEMORY;
6861 break;
6862 }
6863 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6864 }
6865
6866 if (RT_SUCCESS(rc))
6867 {
6868 /* Make sure the descriptor gets written back. */
6869 pImage->Descriptor.fDirty = true;
6870 /* Flush the descriptor now, in case it is embedded. */
6871 vmdkFlushImage(pImage, NULL);
6872
6873 /* Close and rename/move extents. */
6874 for (i = 0; i < pRenameState->cExtents; i++)
6875 {
6876 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6877 /* Compose new name for the extent. */
6878 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6879 pRenameState->pszOldFullName,
6880 pRenameState->pszNewFullName);
6881 if (!pRenameState->apszNewName[i])
6882 {
6883 rc = VERR_NO_MEMORY;
6884 break;
6885 }
6886 /* Close the extent file. */
6887 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6888 if (RT_FAILURE(rc))
6889 break;;
6890
6891 /* Rename the extent file. */
6892 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6893 if (RT_FAILURE(rc))
6894 break;
6895 /* Remember the old name. */
6896 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6897 }
6898
6899 if (RT_SUCCESS(rc))
6900 {
6901 /* Release all old stuff. */
6902 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6903 if (RT_SUCCESS(rc))
6904 {
6905 pRenameState->fImageFreed = true;
6906
6907 /* Last elements of new/old name arrays are intended for
6908 * storing descriptor's names.
6909 */
6910 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6911 /* Rename the descriptor file if it's separate. */
6912 if (!pRenameState->fEmbeddedDesc)
6913 {
6914 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6915 if (RT_SUCCESS(rc))
6916 {
6917 /* Save old name only if we may need to change it back. */
6918 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6919 }
6920 }
6921
6922 /* Update pImage with the new information. */
6923 pImage->pszFilename = pszFilename;
6924
6925 /* Open the new image. */
6926 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6927 }
6928 }
6929 }
6930
6931 return rc;
6932}
6933
6934/** @copydoc VDIMAGEBACKEND::pfnRename */
6935static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6936{
6937 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6938
6939 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6940 VMDKRENAMESTATE RenameState;
6941
6942 memset(&RenameState, 0, sizeof(RenameState));
6943
6944 /* Check arguments. */
6945 AssertReturn(( pImage
6946 && VALID_PTR(pszFilename)
6947 && *pszFilename
6948 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)), VERR_INVALID_PARAMETER);
6949
6950 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6951 if (RT_SUCCESS(rc))
6952 {
6953 /* --- Up to this point we have not done any damage yet. --- */
6954
6955 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6956 /* Roll back all changes in case of failure. */
6957 if (RT_FAILURE(rc))
6958 {
6959 int rrc = vmdkRenameRollback(pImage, &RenameState);
6960 AssertRC(rrc);
6961 }
6962 }
6963
6964 vmdkRenameStateDestroy(&RenameState);
6965 LogFlowFunc(("returns %Rrc\n", rc));
6966 return rc;
6967}
6968
6969/** @copydoc VDIMAGEBACKEND::pfnClose */
6970static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6971{
6972 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6973 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6974
6975 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6976 RTMemFree(pImage);
6977
6978 LogFlowFunc(("returns %Rrc\n", rc));
6979 return rc;
6980}
6981
6982/** @copydoc VDIMAGEBACKEND::pfnRead */
6983static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6984 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6985{
6986 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6987 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6988 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6989
6990 AssertPtr(pImage);
6991 Assert(uOffset % 512 == 0);
6992 Assert(cbToRead % 512 == 0);
6993 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
6994 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6995
6996 /* Find the extent and check access permissions as defined in the extent descriptor. */
6997 PVMDKEXTENT pExtent;
6998 uint64_t uSectorExtentRel;
6999 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7000 &pExtent, &uSectorExtentRel);
7001 if ( RT_SUCCESS(rc)
7002 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7003 {
7004 /* Clip read range to remain in this extent. */
7005 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7006
7007 /* Handle the read according to the current extent type. */
7008 switch (pExtent->enmType)
7009 {
7010 case VMDKETYPE_HOSTED_SPARSE:
7011 {
7012 uint64_t uSectorExtentAbs;
7013
7014 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7015 if (RT_FAILURE(rc))
7016 break;
7017 /* Clip read range to at most the rest of the grain. */
7018 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7019 Assert(!(cbToRead % 512));
7020 if (uSectorExtentAbs == 0)
7021 {
7022 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7023 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7024 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7025 rc = VERR_VD_BLOCK_FREE;
7026 else
7027 rc = vmdkStreamReadSequential(pImage, pExtent,
7028 uSectorExtentRel,
7029 pIoCtx, cbToRead);
7030 }
7031 else
7032 {
7033 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7034 {
7035 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7036 ("Async I/O is not supported for stream optimized VMDK's\n"));
7037
7038 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7039 uSectorExtentAbs -= uSectorInGrain;
7040 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7041 {
7042 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7043 rc = vmdkFileInflateSync(pImage, pExtent,
7044 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7045 pExtent->pvGrain,
7046 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7047 NULL, &uLBA, NULL);
7048 if (RT_FAILURE(rc))
7049 {
7050 pExtent->uGrainSectorAbs = 0;
7051 break;
7052 }
7053 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7054 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7055 Assert(uLBA == uSectorExtentRel);
7056 }
7057 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7058 (uint8_t *)pExtent->pvGrain
7059 + VMDK_SECTOR2BYTE(uSectorInGrain),
7060 cbToRead);
7061 }
7062 else
7063 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7064 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7065 pIoCtx, cbToRead);
7066 }
7067 break;
7068 }
7069 case VMDKETYPE_VMFS:
7070 case VMDKETYPE_FLAT:
7071 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7072 VMDK_SECTOR2BYTE(uSectorExtentRel),
7073 pIoCtx, cbToRead);
7074 break;
7075 case VMDKETYPE_ZERO:
7076 {
7077 size_t cbSet;
7078
7079 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7080 Assert(cbSet == cbToRead);
7081 break;
7082 }
7083 }
7084 if (pcbActuallyRead)
7085 *pcbActuallyRead = cbToRead;
7086 }
7087 else if (RT_SUCCESS(rc))
7088 rc = VERR_VD_VMDK_INVALID_STATE;
7089
7090 LogFlowFunc(("returns %Rrc\n", rc));
7091 return rc;
7092}
7093
7094/** @copydoc VDIMAGEBACKEND::pfnWrite */
7095static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7096 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7097 size_t *pcbPostRead, unsigned fWrite)
7098{
7099 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7100 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7101 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7102 int rc;
7103
7104 AssertPtr(pImage);
7105 Assert(uOffset % 512 == 0);
7106 Assert(cbToWrite % 512 == 0);
7107 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
7108
7109 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7110 {
7111 PVMDKEXTENT pExtent;
7112 uint64_t uSectorExtentRel;
7113 uint64_t uSectorExtentAbs;
7114
7115 /* No size check here, will do that later when the extent is located.
7116 * There are sparse images out there which according to the spec are
7117 * invalid, because the total size is not a multiple of the grain size.
7118 * Also for sparse images which are stitched together in odd ways (not at
7119 * grain boundaries, and with the nominal size not being a multiple of the
7120 * grain size), this would prevent writing to the last grain. */
7121
7122 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7123 &pExtent, &uSectorExtentRel);
7124 if (RT_SUCCESS(rc))
7125 {
7126 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7127 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7128 && !pImage->pExtents[0].uAppendPosition
7129 && pExtent->enmAccess != VMDKACCESS_READONLY))
7130 rc = VERR_VD_VMDK_INVALID_STATE;
7131 else
7132 {
7133 /* Handle the write according to the current extent type. */
7134 switch (pExtent->enmType)
7135 {
7136 case VMDKETYPE_HOSTED_SPARSE:
7137 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7138 if (RT_SUCCESS(rc))
7139 {
7140 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7141 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7142 rc = VERR_VD_VMDK_INVALID_WRITE;
7143 else
7144 {
7145 /* Clip write range to at most the rest of the grain. */
7146 cbToWrite = RT_MIN(cbToWrite,
7147 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7148 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7149 if (uSectorExtentAbs == 0)
7150 {
7151 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7152 {
7153 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7154 {
7155 /* Full block write to a previously unallocated block.
7156 * Check if the caller wants to avoid the automatic alloc. */
7157 if (!(fWrite & VD_WRITE_NO_ALLOC))
7158 {
7159 /* Allocate GT and find out where to store the grain. */
7160 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7161 uSectorExtentRel, cbToWrite);
7162 }
7163 else
7164 rc = VERR_VD_BLOCK_FREE;
7165 *pcbPreRead = 0;
7166 *pcbPostRead = 0;
7167 }
7168 else
7169 {
7170 /* Clip write range to remain in this extent. */
7171 cbToWrite = RT_MIN(cbToWrite,
7172 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7173 + pExtent->cNominalSectors - uSectorExtentRel));
7174 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7175 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7176 rc = VERR_VD_BLOCK_FREE;
7177 }
7178 }
7179 else
7180 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7181 pIoCtx, cbToWrite);
7182 }
7183 else
7184 {
7185 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7186 {
7187 /* A partial write to a streamOptimized image is simply
7188 * invalid. It requires rewriting already compressed data
7189 * which is somewhere between expensive and impossible. */
7190 rc = VERR_VD_VMDK_INVALID_STATE;
7191 pExtent->uGrainSectorAbs = 0;
7192 AssertRC(rc);
7193 }
7194 else
7195 {
7196 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7197 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7198 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7199 pIoCtx, cbToWrite, NULL, NULL);
7200 }
7201 }
7202 }
7203 }
7204 break;
7205 case VMDKETYPE_VMFS:
7206 case VMDKETYPE_FLAT:
7207 /* Clip write range to remain in this extent. */
7208 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7209 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7210 VMDK_SECTOR2BYTE(uSectorExtentRel),
7211 pIoCtx, cbToWrite, NULL, NULL);
7212 break;
7213 case VMDKETYPE_ZERO:
7214 /* Clip write range to remain in this extent. */
7215 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7216 break;
7217 }
7218 }
7219
7220 if (pcbWriteProcess)
7221 *pcbWriteProcess = cbToWrite;
7222 }
7223 }
7224 else
7225 rc = VERR_VD_IMAGE_READ_ONLY;
7226
7227 LogFlowFunc(("returns %Rrc\n", rc));
7228 return rc;
7229}
7230
7231/** @copydoc VDIMAGEBACKEND::pfnFlush */
7232static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7233{
7234 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7235
7236 return vmdkFlushImage(pImage, pIoCtx);
7237}
7238
7239/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7240static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7241{
7242 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7243 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7244
7245 AssertPtrReturn(pImage, 0);
7246
7247 return VMDK_IMAGE_VERSION;
7248}
7249
7250/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7251static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7252{
7253 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7254 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7255 uint64_t cb = 0;
7256
7257 AssertPtrReturn(pImage, 0);
7258
7259 if (pImage->pFile != NULL)
7260 {
7261 uint64_t cbFile;
7262 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7263 if (RT_SUCCESS(rc))
7264 cb += cbFile;
7265 }
7266 for (unsigned i = 0; i < pImage->cExtents; i++)
7267 {
7268 if (pImage->pExtents[i].pFile != NULL)
7269 {
7270 uint64_t cbFile;
7271 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7272 if (RT_SUCCESS(rc))
7273 cb += cbFile;
7274 }
7275 }
7276
7277 LogFlowFunc(("returns %lld\n", cb));
7278 return cb;
7279}
7280
7281/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7282static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7283{
7284 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7285 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7286 int rc = VINF_SUCCESS;
7287
7288 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7289
7290 if (pImage->PCHSGeometry.cCylinders)
7291 *pPCHSGeometry = pImage->PCHSGeometry;
7292 else
7293 rc = VERR_VD_GEOMETRY_NOT_SET;
7294
7295 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7296 return rc;
7297}
7298
7299/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7300static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7301{
7302 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7303 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7304 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7305 int rc = VINF_SUCCESS;
7306
7307 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7308
7309 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7310 {
7311 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7312 {
7313 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7314 if (RT_SUCCESS(rc))
7315 pImage->PCHSGeometry = *pPCHSGeometry;
7316 }
7317 else
7318 rc = VERR_NOT_SUPPORTED;
7319 }
7320 else
7321 rc = VERR_VD_IMAGE_READ_ONLY;
7322
7323 LogFlowFunc(("returns %Rrc\n", rc));
7324 return rc;
7325}
7326
7327/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7328static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7329{
7330 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7331 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7332 int rc = VINF_SUCCESS;
7333
7334 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7335
7336 if (pImage->LCHSGeometry.cCylinders)
7337 *pLCHSGeometry = pImage->LCHSGeometry;
7338 else
7339 rc = VERR_VD_GEOMETRY_NOT_SET;
7340
7341 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7342 return rc;
7343}
7344
7345/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7346static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7347{
7348 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7349 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7350 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7351 int rc = VINF_SUCCESS;
7352
7353 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7354
7355 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7356 {
7357 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7358 {
7359 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7360 if (RT_SUCCESS(rc))
7361 pImage->LCHSGeometry = *pLCHSGeometry;
7362 }
7363 else
7364 rc = VERR_NOT_SUPPORTED;
7365 }
7366 else
7367 rc = VERR_VD_IMAGE_READ_ONLY;
7368
7369 LogFlowFunc(("returns %Rrc\n", rc));
7370 return rc;
7371}
7372
7373/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7374static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7375{
7376 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7377 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7378
7379 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7380
7381 *ppRegionList = &pThis->RegionList;
7382 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7383 return VINF_SUCCESS;
7384}
7385
7386/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7387static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7388{
7389 RT_NOREF1(pRegionList);
7390 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7391 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7392 AssertPtr(pThis); RT_NOREF(pThis);
7393
7394 /* Nothing to do here. */
7395}
7396
7397/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7398static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7399{
7400 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7401 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7402
7403 AssertPtrReturn(pImage, 0);
7404
7405 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7406 return pImage->uImageFlags;
7407}
7408
7409/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7410static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7411{
7412 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7413 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7414
7415 AssertPtrReturn(pImage, 0);
7416
7417 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7418 return pImage->uOpenFlags;
7419}
7420
7421/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7422static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7423{
7424 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7425 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7426 int rc;
7427
7428 /* Image must be opened and the new flags must be valid. */
7429 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7430 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7431 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7432 rc = VERR_INVALID_PARAMETER;
7433 else
7434 {
7435 /* StreamOptimized images need special treatment: reopen is prohibited. */
7436 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7437 {
7438 if (pImage->uOpenFlags == uOpenFlags)
7439 rc = VINF_SUCCESS;
7440 else
7441 rc = VERR_INVALID_PARAMETER;
7442 }
7443 else
7444 {
7445 /* Implement this operation via reopening the image. */
7446 vmdkFreeImage(pImage, false, true /*fFlush*/);
7447 rc = vmdkOpenImage(pImage, uOpenFlags);
7448 }
7449 }
7450
7451 LogFlowFunc(("returns %Rrc\n", rc));
7452 return rc;
7453}
7454
7455/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7456static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7457{
7458 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7459 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7460
7461 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7462
7463 char *pszCommentEncoded = NULL;
7464 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7465 "ddb.comment", &pszCommentEncoded);
7466 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7467 {
7468 pszCommentEncoded = NULL;
7469 rc = VINF_SUCCESS;
7470 }
7471
7472 if (RT_SUCCESS(rc))
7473 {
7474 if (pszComment && pszCommentEncoded)
7475 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7476 else if (pszComment)
7477 *pszComment = '\0';
7478
7479 if (pszCommentEncoded)
7480 RTMemTmpFree(pszCommentEncoded);
7481 }
7482
7483 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7484 return rc;
7485}
7486
7487/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7488static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7489{
7490 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7491 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7492 int rc;
7493
7494 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7495
7496 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7497 {
7498 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7499 rc = vmdkSetImageComment(pImage, pszComment);
7500 else
7501 rc = VERR_NOT_SUPPORTED;
7502 }
7503 else
7504 rc = VERR_VD_IMAGE_READ_ONLY;
7505
7506 LogFlowFunc(("returns %Rrc\n", rc));
7507 return rc;
7508}
7509
7510/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7511static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7512{
7513 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7514 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7515
7516 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7517
7518 *pUuid = pImage->ImageUuid;
7519
7520 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7521 return VINF_SUCCESS;
7522}
7523
7524/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7525static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7526{
7527 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7528 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7529 int rc = VINF_SUCCESS;
7530
7531 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7532
7533 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7534 {
7535 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7536 {
7537 pImage->ImageUuid = *pUuid;
7538 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7539 VMDK_DDB_IMAGE_UUID, pUuid);
7540 if (RT_FAILURE(rc))
7541 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7542 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7543 }
7544 else
7545 rc = VERR_NOT_SUPPORTED;
7546 }
7547 else
7548 rc = VERR_VD_IMAGE_READ_ONLY;
7549
7550 LogFlowFunc(("returns %Rrc\n", rc));
7551 return rc;
7552}
7553
7554/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7555static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7556{
7557 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7558 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7559
7560 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7561
7562 *pUuid = pImage->ModificationUuid;
7563
7564 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7565 return VINF_SUCCESS;
7566}
7567
7568/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7569static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7570{
7571 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7572 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7573 int rc = VINF_SUCCESS;
7574
7575 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7576
7577 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7578 {
7579 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7580 {
7581 /* Only touch the modification uuid if it changed. */
7582 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7583 {
7584 pImage->ModificationUuid = *pUuid;
7585 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7586 VMDK_DDB_MODIFICATION_UUID, pUuid);
7587 if (RT_FAILURE(rc))
7588 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7589 }
7590 }
7591 else
7592 rc = VERR_NOT_SUPPORTED;
7593 }
7594 else
7595 rc = VERR_VD_IMAGE_READ_ONLY;
7596
7597 LogFlowFunc(("returns %Rrc\n", rc));
7598 return rc;
7599}
7600
7601/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7602static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7603{
7604 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7605 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7606
7607 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7608
7609 *pUuid = pImage->ParentUuid;
7610
7611 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7612 return VINF_SUCCESS;
7613}
7614
7615/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7616static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7617{
7618 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7619 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7620 int rc = VINF_SUCCESS;
7621
7622 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7623
7624 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7625 {
7626 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7627 {
7628 pImage->ParentUuid = *pUuid;
7629 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7630 VMDK_DDB_PARENT_UUID, pUuid);
7631 if (RT_FAILURE(rc))
7632 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7633 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7634 }
7635 else
7636 rc = VERR_NOT_SUPPORTED;
7637 }
7638 else
7639 rc = VERR_VD_IMAGE_READ_ONLY;
7640
7641 LogFlowFunc(("returns %Rrc\n", rc));
7642 return rc;
7643}
7644
7645/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7646static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7647{
7648 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7649 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7650
7651 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7652
7653 *pUuid = pImage->ParentModificationUuid;
7654
7655 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7656 return VINF_SUCCESS;
7657}
7658
7659/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7660static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7661{
7662 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7663 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7664 int rc = VINF_SUCCESS;
7665
7666 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7667
7668 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7669 {
7670 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7671 {
7672 pImage->ParentModificationUuid = *pUuid;
7673 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7674 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7675 if (RT_FAILURE(rc))
7676 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7677 }
7678 else
7679 rc = VERR_NOT_SUPPORTED;
7680 }
7681 else
7682 rc = VERR_VD_IMAGE_READ_ONLY;
7683
7684 LogFlowFunc(("returns %Rrc\n", rc));
7685 return rc;
7686}
7687
7688/** @copydoc VDIMAGEBACKEND::pfnDump */
7689static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7690{
7691 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7692
7693 AssertPtrReturnVoid(pImage);
7694 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7695 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7696 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7697 VMDK_BYTE2SECTOR(pImage->cbSize));
7698 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7699 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7700 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7701 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7702}
7703
7704
7705
7706const VDIMAGEBACKEND g_VmdkBackend =
7707{
7708 /* u32Version */
7709 VD_IMGBACKEND_VERSION,
7710 /* pszBackendName */
7711 "VMDK",
7712 /* uBackendCaps */
7713 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7714 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7715 | VD_CAP_VFS | VD_CAP_PREFERRED,
7716 /* paFileExtensions */
7717 s_aVmdkFileExtensions,
7718 /* paConfigInfo */
7719 s_aVmdkConfigInfo,
7720 /* pfnProbe */
7721 vmdkProbe,
7722 /* pfnOpen */
7723 vmdkOpen,
7724 /* pfnCreate */
7725 vmdkCreate,
7726 /* pfnRename */
7727 vmdkRename,
7728 /* pfnClose */
7729 vmdkClose,
7730 /* pfnRead */
7731 vmdkRead,
7732 /* pfnWrite */
7733 vmdkWrite,
7734 /* pfnFlush */
7735 vmdkFlush,
7736 /* pfnDiscard */
7737 NULL,
7738 /* pfnGetVersion */
7739 vmdkGetVersion,
7740 /* pfnGetFileSize */
7741 vmdkGetFileSize,
7742 /* pfnGetPCHSGeometry */
7743 vmdkGetPCHSGeometry,
7744 /* pfnSetPCHSGeometry */
7745 vmdkSetPCHSGeometry,
7746 /* pfnGetLCHSGeometry */
7747 vmdkGetLCHSGeometry,
7748 /* pfnSetLCHSGeometry */
7749 vmdkSetLCHSGeometry,
7750 /* pfnQueryRegions */
7751 vmdkQueryRegions,
7752 /* pfnRegionListRelease */
7753 vmdkRegionListRelease,
7754 /* pfnGetImageFlags */
7755 vmdkGetImageFlags,
7756 /* pfnGetOpenFlags */
7757 vmdkGetOpenFlags,
7758 /* pfnSetOpenFlags */
7759 vmdkSetOpenFlags,
7760 /* pfnGetComment */
7761 vmdkGetComment,
7762 /* pfnSetComment */
7763 vmdkSetComment,
7764 /* pfnGetUuid */
7765 vmdkGetUuid,
7766 /* pfnSetUuid */
7767 vmdkSetUuid,
7768 /* pfnGetModificationUuid */
7769 vmdkGetModificationUuid,
7770 /* pfnSetModificationUuid */
7771 vmdkSetModificationUuid,
7772 /* pfnGetParentUuid */
7773 vmdkGetParentUuid,
7774 /* pfnSetParentUuid */
7775 vmdkSetParentUuid,
7776 /* pfnGetParentModificationUuid */
7777 vmdkGetParentModificationUuid,
7778 /* pfnSetParentModificationUuid */
7779 vmdkSetParentModificationUuid,
7780 /* pfnDump */
7781 vmdkDump,
7782 /* pfnGetTimestamp */
7783 NULL,
7784 /* pfnGetParentTimestamp */
7785 NULL,
7786 /* pfnSetParentTimestamp */
7787 NULL,
7788 /* pfnGetParentFilename */
7789 NULL,
7790 /* pfnSetParentFilename */
7791 NULL,
7792 /* pfnComposeLocation */
7793 genericFileComposeLocation,
7794 /* pfnComposeName */
7795 genericFileComposeName,
7796 /* pfnCompact */
7797 NULL,
7798 /* pfnResize */
7799 NULL,
7800 /* pfnRepair */
7801 NULL,
7802 /* pfnTraverseMetadata */
7803 NULL,
7804 /* u32VersionEnd */
7805 VD_IMGBACKEND_VERSION
7806};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette