VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 96833

Last change on this file since 96833 was 96833, checked in by vboxsync, 2 years ago

Storage: added resize functionality for VMDK monolithicFlat and twoGbMaxExtentFlat. bugref:8707

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 352.7 KB
Line 
1/* $Id: VMDK.cpp 96833 2022-09-22 19:07:02Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5/*
6 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * SPDX-License-Identifier: GPL-3.0-only
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_VD_VMDK
32#include <VBox/log.h> /* before VBox/vd-ifs.h */
33#include <VBox/vd-plugin.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/alloc.h>
37#include <iprt/base64.h>
38#include <iprt/ctype.h>
39#include <iprt/crc.h>
40#include <iprt/dvm.h>
41#include <iprt/uuid.h>
42#include <iprt/path.h>
43#include <iprt/rand.h>
44#include <iprt/string.h>
45#include <iprt/sort.h>
46#include <iprt/zip.h>
47#include <iprt/asm.h>
48#ifdef RT_OS_WINDOWS
49# include <iprt/utf16.h>
50# include <iprt/uni.h>
51# include <iprt/uni.h>
52# include <iprt/nt/nt-and-windows.h>
53# include <winioctl.h>
54#endif
55#ifdef RT_OS_LINUX
56# include <errno.h>
57# include <sys/stat.h>
58# include <iprt/dir.h>
59# include <iprt/symlink.h>
60# include <iprt/linux/sysfs.h>
61#endif
62#ifdef RT_OS_FREEBSD
63#include <libgeom.h>
64#include <sys/stat.h>
65#include <stdlib.h>
66#endif
67#ifdef RT_OS_SOLARIS
68#include <sys/dkio.h>
69#include <sys/vtoc.h>
70#include <sys/efi_partition.h>
71#include <unistd.h>
72#include <errno.h>
73#endif
74#ifdef RT_OS_DARWIN
75# include <sys/stat.h>
76# include <sys/disk.h>
77# include <errno.h>
78/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
79 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
80 While we could try include the header from the Kernel.framework, it's a lot
81 easier to just add the structure and 4 defines here. */
82typedef struct
83{
84 uint64_t offset;
85 uint64_t length;
86 uint8_t reserved0128[12];
87 dev_t dev;
88} dk_physical_extent_t;
89# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
90# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
91# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
92# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
93#endif /* RT_OS_DARWIN */
94#include "VDBackends.h"
95
96
97/*********************************************************************************************************************************
98* Constants And Macros, Structures and Typedefs *
99*********************************************************************************************************************************/
100/** Maximum encoded string size (including NUL) we allow for VMDK images.
101 * Deliberately not set high to avoid running out of descriptor space. */
102#define VMDK_ENCODED_COMMENT_MAX 1024
103/** VMDK descriptor DDB entry for PCHS cylinders. */
104#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
105/** VMDK descriptor DDB entry for PCHS heads. */
106#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
107/** VMDK descriptor DDB entry for PCHS sectors. */
108#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
109/** VMDK descriptor DDB entry for LCHS cylinders. */
110#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
111/** VMDK descriptor DDB entry for LCHS heads. */
112#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
113/** VMDK descriptor DDB entry for LCHS sectors. */
114#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
115/** VMDK descriptor DDB entry for image UUID. */
116#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
117/** VMDK descriptor DDB entry for image modification UUID. */
118#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
119/** VMDK descriptor DDB entry for parent image UUID. */
120#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
121/** VMDK descriptor DDB entry for parent image modification UUID. */
122#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
123/** No compression for streamOptimized files. */
124#define VMDK_COMPRESSION_NONE 0
125/** Deflate compression for streamOptimized files. */
126#define VMDK_COMPRESSION_DEFLATE 1
127/** Marker that the actual GD value is stored in the footer. */
128#define VMDK_GD_AT_END 0xffffffffffffffffULL
129/** Marker for end-of-stream in streamOptimized images. */
130#define VMDK_MARKER_EOS 0
131/** Marker for grain table block in streamOptimized images. */
132#define VMDK_MARKER_GT 1
133/** Marker for grain directory block in streamOptimized images. */
134#define VMDK_MARKER_GD 2
135/** Marker for footer in streamOptimized images. */
136#define VMDK_MARKER_FOOTER 3
137/** Marker for unknown purpose in streamOptimized images.
138 * Shows up in very recent images created by vSphere, but only sporadically.
139 * They "forgot" to document that one in the VMDK specification. */
140#define VMDK_MARKER_UNSPECIFIED 4
141/** Dummy marker for "don't check the marker value". */
142#define VMDK_MARKER_IGNORE 0xffffffffU
143/**
144 * Magic number for hosted images created by VMware Workstation 4, VMware
145 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
146 */
147#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
148/** VMDK sector size in bytes. */
149#define VMDK_SECTOR_SIZE 512
150/** Max string buffer size for uint64_t with null term */
151#define UINT64_MAX_BUFF_SIZE 21
152/** Grain directory entry size in bytes */
153#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
154/** Grain table size in bytes */
155#define VMDK_GRAIN_TABLE_SIZE 2048
156/**
157 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
158 * this header is also used for monolithic flat images.
159 */
160#pragma pack(1)
161typedef struct SparseExtentHeader
162{
163 uint32_t magicNumber;
164 uint32_t version;
165 uint32_t flags;
166 uint64_t capacity;
167 uint64_t grainSize;
168 uint64_t descriptorOffset;
169 uint64_t descriptorSize;
170 uint32_t numGTEsPerGT;
171 uint64_t rgdOffset;
172 uint64_t gdOffset;
173 uint64_t overHead;
174 bool uncleanShutdown;
175 char singleEndLineChar;
176 char nonEndLineChar;
177 char doubleEndLineChar1;
178 char doubleEndLineChar2;
179 uint16_t compressAlgorithm;
180 uint8_t pad[433];
181} SparseExtentHeader;
182#pragma pack()
183/** The maximum allowed descriptor size in the extent header in sectors. */
184#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
185/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
186 * divisible by the default grain size (64K) */
187#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
188/** VMDK streamOptimized file format marker. The type field may or may not
189 * be actually valid, but there's always data to read there. */
190#pragma pack(1)
191typedef struct VMDKMARKER
192{
193 uint64_t uSector;
194 uint32_t cbSize;
195 uint32_t uType;
196} VMDKMARKER, *PVMDKMARKER;
197#pragma pack()
198/** Convert sector number/size to byte offset/size. */
199#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
200/** Convert byte offset/size to sector number/size. */
201#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
202/**
203 * VMDK extent type.
204 */
205typedef enum VMDKETYPE
206{
207 /** Hosted sparse extent. */
208 VMDKETYPE_HOSTED_SPARSE = 1,
209 /** Flat extent. */
210 VMDKETYPE_FLAT,
211 /** Zero extent. */
212 VMDKETYPE_ZERO,
213 /** VMFS extent, used by ESX. */
214 VMDKETYPE_VMFS
215} VMDKETYPE, *PVMDKETYPE;
216/**
217 * VMDK access type for a extent.
218 */
219typedef enum VMDKACCESS
220{
221 /** No access allowed. */
222 VMDKACCESS_NOACCESS = 0,
223 /** Read-only access. */
224 VMDKACCESS_READONLY,
225 /** Read-write access. */
226 VMDKACCESS_READWRITE
227} VMDKACCESS, *PVMDKACCESS;
228/** Forward declaration for PVMDKIMAGE. */
229typedef struct VMDKIMAGE *PVMDKIMAGE;
230/**
231 * Extents files entry. Used for opening a particular file only once.
232 */
233typedef struct VMDKFILE
234{
235 /** Pointer to file path. Local copy. */
236 const char *pszFilename;
237 /** Pointer to base name. Local copy. */
238 const char *pszBasename;
239 /** File open flags for consistency checking. */
240 unsigned fOpen;
241 /** Handle for sync/async file abstraction.*/
242 PVDIOSTORAGE pStorage;
243 /** Reference counter. */
244 unsigned uReferences;
245 /** Flag whether the file should be deleted on last close. */
246 bool fDelete;
247 /** Pointer to the image we belong to (for debugging purposes). */
248 PVMDKIMAGE pImage;
249 /** Pointer to next file descriptor. */
250 struct VMDKFILE *pNext;
251 /** Pointer to the previous file descriptor. */
252 struct VMDKFILE *pPrev;
253} VMDKFILE, *PVMDKFILE;
254/**
255 * VMDK extent data structure.
256 */
257typedef struct VMDKEXTENT
258{
259 /** File handle. */
260 PVMDKFILE pFile;
261 /** Base name of the image extent. */
262 const char *pszBasename;
263 /** Full name of the image extent. */
264 const char *pszFullname;
265 /** Number of sectors in this extent. */
266 uint64_t cSectors;
267 /** Number of sectors per block (grain in VMDK speak). */
268 uint64_t cSectorsPerGrain;
269 /** Starting sector number of descriptor. */
270 uint64_t uDescriptorSector;
271 /** Size of descriptor in sectors. */
272 uint64_t cDescriptorSectors;
273 /** Starting sector number of grain directory. */
274 uint64_t uSectorGD;
275 /** Starting sector number of redundant grain directory. */
276 uint64_t uSectorRGD;
277 /** Total number of metadata sectors. */
278 uint64_t cOverheadSectors;
279 /** Nominal size (i.e. as described by the descriptor) of this extent. */
280 uint64_t cNominalSectors;
281 /** Sector offset (i.e. as described by the descriptor) of this extent. */
282 uint64_t uSectorOffset;
283 /** Number of entries in a grain table. */
284 uint32_t cGTEntries;
285 /** Number of sectors reachable via a grain directory entry. */
286 uint32_t cSectorsPerGDE;
287 /** Number of entries in the grain directory. */
288 uint32_t cGDEntries;
289 /** Pointer to the next free sector. Legacy information. Do not use. */
290 uint32_t uFreeSector;
291 /** Number of this extent in the list of images. */
292 uint32_t uExtent;
293 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
294 char *pDescData;
295 /** Pointer to the grain directory. */
296 uint32_t *pGD;
297 /** Pointer to the redundant grain directory. */
298 uint32_t *pRGD;
299 /** VMDK version of this extent. 1=1.0/1.1 */
300 uint32_t uVersion;
301 /** Type of this extent. */
302 VMDKETYPE enmType;
303 /** Access to this extent. */
304 VMDKACCESS enmAccess;
305 /** Flag whether this extent is marked as unclean. */
306 bool fUncleanShutdown;
307 /** Flag whether the metadata in the extent header needs to be updated. */
308 bool fMetaDirty;
309 /** Flag whether there is a footer in this extent. */
310 bool fFooter;
311 /** Compression type for this extent. */
312 uint16_t uCompression;
313 /** Append position for writing new grain. Only for sparse extents. */
314 uint64_t uAppendPosition;
315 /** Last grain which was accessed. Only for streamOptimized extents. */
316 uint32_t uLastGrainAccess;
317 /** Starting sector corresponding to the grain buffer. */
318 uint32_t uGrainSectorAbs;
319 /** Grain number corresponding to the grain buffer. */
320 uint32_t uGrain;
321 /** Actual size of the compressed data, only valid for reading. */
322 uint32_t cbGrainStreamRead;
323 /** Size of compressed grain buffer for streamOptimized extents. */
324 size_t cbCompGrain;
325 /** Compressed grain buffer for streamOptimized extents, with marker. */
326 void *pvCompGrain;
327 /** Decompressed grain buffer for streamOptimized extents. */
328 void *pvGrain;
329 /** Reference to the image in which this extent is used. Do not use this
330 * on a regular basis to avoid passing pImage references to functions
331 * explicitly. */
332 struct VMDKIMAGE *pImage;
333} VMDKEXTENT, *PVMDKEXTENT;
334/**
335 * Grain table cache size. Allocated per image.
336 */
337#define VMDK_GT_CACHE_SIZE 256
338/**
339 * Grain table block size. Smaller than an actual grain table block to allow
340 * more grain table blocks to be cached without having to allocate excessive
341 * amounts of memory for the cache.
342 */
343#define VMDK_GT_CACHELINE_SIZE 128
344/**
345 * Maximum number of lines in a descriptor file. Not worth the effort of
346 * making it variable. Descriptor files are generally very short (~20 lines),
347 * with the exception of sparse files split in 2G chunks, which need for the
348 * maximum size (almost 2T) exactly 1025 lines for the disk database.
349 */
350#define VMDK_DESCRIPTOR_LINES_MAX 1100U
351/**
352 * Parsed descriptor information. Allows easy access and update of the
353 * descriptor (whether separate file or not). Free form text files suck.
354 */
355typedef struct VMDKDESCRIPTOR
356{
357 /** Line number of first entry of the disk descriptor. */
358 unsigned uFirstDesc;
359 /** Line number of first entry in the extent description. */
360 unsigned uFirstExtent;
361 /** Line number of first disk database entry. */
362 unsigned uFirstDDB;
363 /** Total number of lines. */
364 unsigned cLines;
365 /** Total amount of memory available for the descriptor. */
366 size_t cbDescAlloc;
367 /** Set if descriptor has been changed and not yet written to disk. */
368 bool fDirty;
369 /** Array of pointers to the data in the descriptor. */
370 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
371 /** Array of line indices pointing to the next non-comment line. */
372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
373} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
374/**
375 * Cache entry for translating extent/sector to a sector number in that
376 * extent.
377 */
378typedef struct VMDKGTCACHEENTRY
379{
380 /** Extent number for which this entry is valid. */
381 uint32_t uExtent;
382 /** GT data block number. */
383 uint64_t uGTBlock;
384 /** Data part of the cache entry. */
385 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
386} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
387/**
388 * Cache data structure for blocks of grain table entries. For now this is a
389 * fixed size direct mapping cache, but this should be adapted to the size of
390 * the sparse image and maybe converted to a set-associative cache. The
391 * implementation below implements a write-through cache with write allocate.
392 */
393typedef struct VMDKGTCACHE
394{
395 /** Cache entries. */
396 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
397 /** Number of cache entries (currently unused). */
398 unsigned cEntries;
399} VMDKGTCACHE, *PVMDKGTCACHE;
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Image name. */
407 const char *pszFilename;
408 /** Descriptor file if applicable. */
409 PVMDKFILE pFile;
410 /** Pointer to the per-disk VD interface list. */
411 PVDINTERFACE pVDIfsDisk;
412 /** Pointer to the per-image VD interface list. */
413 PVDINTERFACE pVDIfsImage;
414 /** Error interface. */
415 PVDINTERFACEERROR pIfError;
416 /** I/O interface. */
417 PVDINTERFACEIOINT pIfIo;
418 /** Pointer to the image extents. */
419 PVMDKEXTENT pExtents;
420 /** Number of image extents. */
421 unsigned cExtents;
422 /** Pointer to the files list, for opening a file referenced multiple
423 * times only once (happens mainly with raw partition access). */
424 PVMDKFILE pFiles;
425 /**
426 * Pointer to an array of segment entries for async I/O.
427 * This is an optimization because the task number to submit is not known
428 * and allocating/freeing an array in the read/write functions every time
429 * is too expensive.
430 */
431 PPDMDATASEG paSegments;
432 /** Entries available in the segments array. */
433 unsigned cSegments;
434 /** Open flags passed by VBoxHD layer. */
435 unsigned uOpenFlags;
436 /** Image flags defined during creation or determined during open. */
437 unsigned uImageFlags;
438 /** Total size of the image. */
439 uint64_t cbSize;
440 /** Physical geometry of this image. */
441 VDGEOMETRY PCHSGeometry;
442 /** Logical geometry of this image. */
443 VDGEOMETRY LCHSGeometry;
444 /** Image UUID. */
445 RTUUID ImageUuid;
446 /** Image modification UUID. */
447 RTUUID ModificationUuid;
448 /** Parent image UUID. */
449 RTUUID ParentUuid;
450 /** Parent image modification UUID. */
451 RTUUID ParentModificationUuid;
452 /** Pointer to grain table cache, if this image contains sparse extents. */
453 PVMDKGTCACHE pGTCache;
454 /** Pointer to the descriptor (NULL if no separate descriptor file). */
455 char *pDescData;
456 /** Allocation size of the descriptor file. */
457 size_t cbDescAlloc;
458 /** Parsed descriptor file content. */
459 VMDKDESCRIPTOR Descriptor;
460 /** The static region list. */
461 VDREGIONLIST RegionList;
462} VMDKIMAGE;
463/** State for the input/output callout of the inflate reader/deflate writer. */
464typedef struct VMDKCOMPRESSIO
465{
466 /* Image this operation relates to. */
467 PVMDKIMAGE pImage;
468 /* Current read position. */
469 ssize_t iOffset;
470 /* Size of the compressed grain buffer (available data). */
471 size_t cbCompGrain;
472 /* Pointer to the compressed grain buffer. */
473 void *pvCompGrain;
474} VMDKCOMPRESSIO;
475/** Tracks async grain allocation. */
476typedef struct VMDKGRAINALLOCASYNC
477{
478 /** Flag whether the allocation failed. */
479 bool fIoErr;
480 /** Current number of transfers pending.
481 * If reached 0 and there is an error the old state is restored. */
482 unsigned cIoXfersPending;
483 /** Sector number */
484 uint64_t uSector;
485 /** Flag whether the grain table needs to be updated. */
486 bool fGTUpdateNeeded;
487 /** Extent the allocation happens. */
488 PVMDKEXTENT pExtent;
489 /** Position of the new grain, required for the grain table update. */
490 uint64_t uGrainOffset;
491 /** Grain table sector. */
492 uint64_t uGTSector;
493 /** Backup grain table sector. */
494 uint64_t uRGTSector;
495} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
496/**
497 * State information for vmdkRename() and helpers.
498 */
499typedef struct VMDKRENAMESTATE
500{
501 /** Array of old filenames. */
502 char **apszOldName;
503 /** Array of new filenames. */
504 char **apszNewName;
505 /** Array of new lines in the extent descriptor. */
506 char **apszNewLines;
507 /** Name of the old descriptor file if not a sparse image. */
508 char *pszOldDescName;
509 /** Flag whether we called vmdkFreeImage(). */
510 bool fImageFreed;
511 /** Flag whther the descriptor is embedded in the image (sparse) or
512 * in a separate file. */
513 bool fEmbeddedDesc;
514 /** Number of extents in the image. */
515 unsigned cExtents;
516 /** New base filename. */
517 char *pszNewBaseName;
518 /** The old base filename. */
519 char *pszOldBaseName;
520 /** New full filename. */
521 char *pszNewFullName;
522 /** Old full filename. */
523 char *pszOldFullName;
524 /** The old image name. */
525 const char *pszOldImageName;
526 /** Copy of the original VMDK descriptor. */
527 VMDKDESCRIPTOR DescriptorCopy;
528 /** Copy of the extent state for sparse images. */
529 VMDKEXTENT ExtentCopy;
530} VMDKRENAMESTATE;
531/** Pointer to a VMDK rename state. */
532typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
533
534
535/*********************************************************************************************************************************
536* Static Variables *
537*********************************************************************************************************************************/
538/** NULL-terminated array of supported file extensions. */
539static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
540{
541 {"vmdk", VDTYPE_HDD},
542 {NULL, VDTYPE_INVALID}
543};
544/** NULL-terminated array of configuration option. */
545static const VDCONFIGINFO s_aVmdkConfigInfo[] =
546{
547 /* Options for VMDK raw disks */
548 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
549 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
550 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
551 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
552 /* End of options list */
553 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
554};
555
556
557/*********************************************************************************************************************************
558* Internal Functions *
559*********************************************************************************************************************************/
560static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
561static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
562 bool fDelete);
563static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
564static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
565static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
566static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
567static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
568 void *pvUser, int rcReq);
569/**
570 * Internal: open a file (using a file descriptor cache to ensure each file
571 * is only opened once - anything else can cause locking problems).
572 */
573static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
574 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
575{
576 int rc = VINF_SUCCESS;
577 PVMDKFILE pVmdkFile;
578 for (pVmdkFile = pImage->pFiles;
579 pVmdkFile != NULL;
580 pVmdkFile = pVmdkFile->pNext)
581 {
582 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
583 {
584 Assert(fOpen == pVmdkFile->fOpen);
585 pVmdkFile->uReferences++;
586 *ppVmdkFile = pVmdkFile;
587 return rc;
588 }
589 }
590 /* If we get here, there's no matching entry in the cache. */
591 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
592 if (!pVmdkFile)
593 {
594 *ppVmdkFile = NULL;
595 return VERR_NO_MEMORY;
596 }
597 pVmdkFile->pszFilename = RTStrDup(pszFilename);
598 if (!pVmdkFile->pszFilename)
599 {
600 RTMemFree(pVmdkFile);
601 *ppVmdkFile = NULL;
602 return VERR_NO_MEMORY;
603 }
604 if (pszBasename)
605 {
606 pVmdkFile->pszBasename = RTStrDup(pszBasename);
607 if (!pVmdkFile->pszBasename)
608 {
609 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
610 RTMemFree(pVmdkFile);
611 *ppVmdkFile = NULL;
612 return VERR_NO_MEMORY;
613 }
614 }
615 pVmdkFile->fOpen = fOpen;
616 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
617 &pVmdkFile->pStorage);
618 if (RT_SUCCESS(rc))
619 {
620 pVmdkFile->uReferences = 1;
621 pVmdkFile->pImage = pImage;
622 pVmdkFile->pNext = pImage->pFiles;
623 if (pImage->pFiles)
624 pImage->pFiles->pPrev = pVmdkFile;
625 pImage->pFiles = pVmdkFile;
626 *ppVmdkFile = pVmdkFile;
627 }
628 else
629 {
630 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
631 RTMemFree(pVmdkFile);
632 *ppVmdkFile = NULL;
633 }
634 return rc;
635}
636/**
637 * Internal: close a file, updating the file descriptor cache.
638 */
639static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
640{
641 int rc = VINF_SUCCESS;
642 PVMDKFILE pVmdkFile = *ppVmdkFile;
643 AssertPtr(pVmdkFile);
644 pVmdkFile->fDelete |= fDelete;
645 Assert(pVmdkFile->uReferences);
646 pVmdkFile->uReferences--;
647 if (pVmdkFile->uReferences == 0)
648 {
649 PVMDKFILE pPrev;
650 PVMDKFILE pNext;
651 /* Unchain the element from the list. */
652 pPrev = pVmdkFile->pPrev;
653 pNext = pVmdkFile->pNext;
654 if (pNext)
655 pNext->pPrev = pPrev;
656 if (pPrev)
657 pPrev->pNext = pNext;
658 else
659 pImage->pFiles = pNext;
660 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
661 bool fFileDel = pVmdkFile->fDelete;
662 if ( pVmdkFile->pszBasename
663 && fFileDel)
664 {
665 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
666 if ( RTPathHasPath(pVmdkFile->pszBasename)
667 || !pszSuffix
668 || ( strcmp(pszSuffix, ".vmdk")
669 && strcmp(pszSuffix, ".bin")
670 && strcmp(pszSuffix, ".img")))
671 fFileDel = false;
672 }
673 if (fFileDel)
674 {
675 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
676 if (RT_SUCCESS(rc))
677 rc = rc2;
678 }
679 else if (pVmdkFile->fDelete)
680 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
681 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
682 if (pVmdkFile->pszBasename)
683 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
684 RTMemFree(pVmdkFile);
685 }
686 *ppVmdkFile = NULL;
687 return rc;
688}
689/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
690#ifndef VMDK_USE_BLOCK_DECOMP_API
691static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
692{
693 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
694 size_t cbInjected = 0;
695 Assert(cbBuf);
696 if (pInflateState->iOffset < 0)
697 {
698 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
699 pvBuf = (uint8_t *)pvBuf + 1;
700 cbBuf--;
701 cbInjected = 1;
702 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
703 }
704 if (!cbBuf)
705 {
706 if (pcbBuf)
707 *pcbBuf = cbInjected;
708 return VINF_SUCCESS;
709 }
710 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
711 memcpy(pvBuf,
712 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
713 cbBuf);
714 pInflateState->iOffset += cbBuf;
715 Assert(pcbBuf);
716 *pcbBuf = cbBuf + cbInjected;
717 return VINF_SUCCESS;
718}
719#endif
720/**
721 * Internal: read from a file and inflate the compressed data,
722 * distinguishing between async and normal operation
723 */
724DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
725 uint64_t uOffset, void *pvBuf,
726 size_t cbToRead, const void *pcvMarker,
727 uint64_t *puLBA, uint32_t *pcbMarkerData)
728{
729 int rc;
730#ifndef VMDK_USE_BLOCK_DECOMP_API
731 PRTZIPDECOMP pZip = NULL;
732#endif
733 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
734 size_t cbCompSize, cbActuallyRead;
735 if (!pcvMarker)
736 {
737 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
738 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
739 if (RT_FAILURE(rc))
740 return rc;
741 }
742 else
743 {
744 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
745 /* pcvMarker endianness has already been partially transformed, fix it */
746 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
747 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
748 }
749 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
750 if (cbCompSize == 0)
751 {
752 AssertMsgFailed(("VMDK: corrupted marker\n"));
753 return VERR_VD_VMDK_INVALID_FORMAT;
754 }
755 /* Sanity check - the expansion ratio should be much less than 2. */
756 Assert(cbCompSize < 2 * cbToRead);
757 if (cbCompSize >= 2 * cbToRead)
758 return VERR_VD_VMDK_INVALID_FORMAT;
759 /* Compressed grain marker. Data follows immediately. */
760 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
761 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
762 (uint8_t *)pExtent->pvCompGrain
763 + RT_UOFFSETOF(VMDKMARKER, uType),
764 RT_ALIGN_Z( cbCompSize
765 + RT_UOFFSETOF(VMDKMARKER, uType),
766 512)
767 - RT_UOFFSETOF(VMDKMARKER, uType));
768 if (puLBA)
769 *puLBA = RT_LE2H_U64(pMarker->uSector);
770 if (pcbMarkerData)
771 *pcbMarkerData = RT_ALIGN( cbCompSize
772 + RT_UOFFSETOF(VMDKMARKER, uType),
773 512);
774#ifdef VMDK_USE_BLOCK_DECOMP_API
775 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
776 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
777 pvBuf, cbToRead, &cbActuallyRead);
778#else
779 VMDKCOMPRESSIO InflateState;
780 InflateState.pImage = pImage;
781 InflateState.iOffset = -1;
782 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
783 InflateState.pvCompGrain = pExtent->pvCompGrain;
784 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
785 if (RT_FAILURE(rc))
786 return rc;
787 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
788 RTZipDecompDestroy(pZip);
789#endif /* !VMDK_USE_BLOCK_DECOMP_API */
790 if (RT_FAILURE(rc))
791 {
792 if (rc == VERR_ZIP_CORRUPTED)
793 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
794 return rc;
795 }
796 if (cbActuallyRead != cbToRead)
797 rc = VERR_VD_VMDK_INVALID_FORMAT;
798 return rc;
799}
800static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
801{
802 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
803 Assert(cbBuf);
804 if (pDeflateState->iOffset < 0)
805 {
806 pvBuf = (const uint8_t *)pvBuf + 1;
807 cbBuf--;
808 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
809 }
810 if (!cbBuf)
811 return VINF_SUCCESS;
812 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
813 return VERR_BUFFER_OVERFLOW;
814 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
815 pvBuf, cbBuf);
816 pDeflateState->iOffset += cbBuf;
817 return VINF_SUCCESS;
818}
819/**
820 * Internal: deflate the uncompressed data and write to a file,
821 * distinguishing between async and normal operation
822 */
823DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
824 uint64_t uOffset, const void *pvBuf,
825 size_t cbToWrite, uint64_t uLBA,
826 uint32_t *pcbMarkerData)
827{
828 int rc;
829 PRTZIPCOMP pZip = NULL;
830 VMDKCOMPRESSIO DeflateState;
831 DeflateState.pImage = pImage;
832 DeflateState.iOffset = -1;
833 DeflateState.cbCompGrain = pExtent->cbCompGrain;
834 DeflateState.pvCompGrain = pExtent->pvCompGrain;
835 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
836 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
837 if (RT_FAILURE(rc))
838 return rc;
839 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
840 if (RT_SUCCESS(rc))
841 rc = RTZipCompFinish(pZip);
842 RTZipCompDestroy(pZip);
843 if (RT_SUCCESS(rc))
844 {
845 Assert( DeflateState.iOffset > 0
846 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
847 /* pad with zeroes to get to a full sector size */
848 uint32_t uSize = DeflateState.iOffset;
849 if (uSize % 512)
850 {
851 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
852 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
853 uSizeAlign - uSize);
854 uSize = uSizeAlign;
855 }
856 if (pcbMarkerData)
857 *pcbMarkerData = uSize;
858 /* Compressed grain marker. Data follows immediately. */
859 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
860 pMarker->uSector = RT_H2LE_U64(uLBA);
861 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
862 - RT_UOFFSETOF(VMDKMARKER, uType));
863 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
864 uOffset, pMarker, uSize);
865 if (RT_FAILURE(rc))
866 return rc;
867 }
868 return rc;
869}
870/**
871 * Internal: check if all files are closed, prevent leaking resources.
872 */
873static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
874{
875 int rc = VINF_SUCCESS, rc2;
876 PVMDKFILE pVmdkFile;
877 Assert(pImage->pFiles == NULL);
878 for (pVmdkFile = pImage->pFiles;
879 pVmdkFile != NULL;
880 pVmdkFile = pVmdkFile->pNext)
881 {
882 LogRel(("VMDK: leaking reference to file \"%s\"\n",
883 pVmdkFile->pszFilename));
884 pImage->pFiles = pVmdkFile->pNext;
885 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
886 if (RT_SUCCESS(rc))
887 rc = rc2;
888 }
889 return rc;
890}
891/**
892 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
893 * critical non-ASCII characters.
894 */
895static char *vmdkEncodeString(const char *psz)
896{
897 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
898 char *pszDst = szEnc;
899 AssertPtr(psz);
900 for (; *psz; psz = RTStrNextCp(psz))
901 {
902 char *pszDstPrev = pszDst;
903 RTUNICP Cp = RTStrGetCp(psz);
904 if (Cp == '\\')
905 {
906 pszDst = RTStrPutCp(pszDst, Cp);
907 pszDst = RTStrPutCp(pszDst, Cp);
908 }
909 else if (Cp == '\n')
910 {
911 pszDst = RTStrPutCp(pszDst, '\\');
912 pszDst = RTStrPutCp(pszDst, 'n');
913 }
914 else if (Cp == '\r')
915 {
916 pszDst = RTStrPutCp(pszDst, '\\');
917 pszDst = RTStrPutCp(pszDst, 'r');
918 }
919 else
920 pszDst = RTStrPutCp(pszDst, Cp);
921 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
922 {
923 pszDst = pszDstPrev;
924 break;
925 }
926 }
927 *pszDst = '\0';
928 return RTStrDup(szEnc);
929}
930/**
931 * Internal: decode a string and store it into the specified string.
932 */
933static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
934{
935 int rc = VINF_SUCCESS;
936 char szBuf[4];
937 if (!cb)
938 return VERR_BUFFER_OVERFLOW;
939 AssertPtr(psz);
940 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
941 {
942 char *pszDst = szBuf;
943 RTUNICP Cp = RTStrGetCp(pszEncoded);
944 if (Cp == '\\')
945 {
946 pszEncoded = RTStrNextCp(pszEncoded);
947 RTUNICP CpQ = RTStrGetCp(pszEncoded);
948 if (CpQ == 'n')
949 RTStrPutCp(pszDst, '\n');
950 else if (CpQ == 'r')
951 RTStrPutCp(pszDst, '\r');
952 else if (CpQ == '\0')
953 {
954 rc = VERR_VD_VMDK_INVALID_HEADER;
955 break;
956 }
957 else
958 RTStrPutCp(pszDst, CpQ);
959 }
960 else
961 pszDst = RTStrPutCp(pszDst, Cp);
962 /* Need to leave space for terminating NUL. */
963 if ((size_t)(pszDst - szBuf) + 1 >= cb)
964 {
965 rc = VERR_BUFFER_OVERFLOW;
966 break;
967 }
968 memcpy(psz, szBuf, pszDst - szBuf);
969 psz += pszDst - szBuf;
970 }
971 *psz = '\0';
972 return rc;
973}
974/**
975 * Internal: free all buffers associated with grain directories.
976 */
977static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
978{
979 if (pExtent->pGD)
980 {
981 RTMemFree(pExtent->pGD);
982 pExtent->pGD = NULL;
983 }
984 if (pExtent->pRGD)
985 {
986 RTMemFree(pExtent->pRGD);
987 pExtent->pRGD = NULL;
988 }
989}
990/**
991 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
992 * images.
993 */
994static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
995{
996 int rc = VINF_SUCCESS;
997 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
998 {
999 /* streamOptimized extents need a compressed grain buffer, which must
1000 * be big enough to hold uncompressible data (which needs ~8 bytes
1001 * more than the uncompressed data), the marker and padding. */
1002 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1003 + 8 + sizeof(VMDKMARKER), 512);
1004 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1005 if (RT_LIKELY(pExtent->pvCompGrain))
1006 {
1007 /* streamOptimized extents need a decompressed grain buffer. */
1008 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1009 if (!pExtent->pvGrain)
1010 rc = VERR_NO_MEMORY;
1011 }
1012 else
1013 rc = VERR_NO_MEMORY;
1014 }
1015 if (RT_FAILURE(rc))
1016 vmdkFreeStreamBuffers(pExtent);
1017 return rc;
1018}
1019/**
1020 * Internal: allocate all buffers associated with grain directories.
1021 */
1022static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1023{
1024 RT_NOREF1(pImage);
1025 int rc = VINF_SUCCESS;
1026 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1027 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1028 if (RT_LIKELY(pExtent->pGD))
1029 {
1030 if (pExtent->uSectorRGD)
1031 {
1032 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1033 if (RT_UNLIKELY(!pExtent->pRGD))
1034 rc = VERR_NO_MEMORY;
1035 }
1036 }
1037 else
1038 rc = VERR_NO_MEMORY;
1039 if (RT_FAILURE(rc))
1040 vmdkFreeGrainDirectory(pExtent);
1041 return rc;
1042}
1043/**
1044 * Converts the grain directory from little to host endianess.
1045 *
1046 * @returns nothing.
1047 * @param pGD The grain directory.
1048 * @param cGDEntries Number of entries in the grain directory to convert.
1049 */
1050DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1051{
1052 uint32_t *pGDTmp = pGD;
1053 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1054 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1055}
1056/**
1057 * Read the grain directory and allocated grain tables verifying them against
1058 * their back up copies if available.
1059 *
1060 * @returns VBox status code.
1061 * @param pImage Image instance data.
1062 * @param pExtent The VMDK extent.
1063 */
1064static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1065{
1066 int rc = VINF_SUCCESS;
1067 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1068 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1069 && pExtent->uSectorGD != VMDK_GD_AT_END
1070 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1071 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1072 if (RT_SUCCESS(rc))
1073 {
1074 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1075 * but in reality they are not compressed. */
1076 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1077 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1078 pExtent->pGD, cbGD);
1079 if (RT_SUCCESS(rc))
1080 {
1081 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1082 if ( pExtent->uSectorRGD
1083 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1084 {
1085 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1086 * but in reality they are not compressed. */
1087 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1088 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1089 pExtent->pRGD, cbGD);
1090 if (RT_SUCCESS(rc))
1091 {
1092 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1093 /* Check grain table and redundant grain table for consistency. */
1094 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1095 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1096 size_t cbGTBuffersMax = _1M;
1097 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1098 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1099 if ( !pTmpGT1
1100 || !pTmpGT2)
1101 rc = VERR_NO_MEMORY;
1102 size_t i = 0;
1103 uint32_t *pGDTmp = pExtent->pGD;
1104 uint32_t *pRGDTmp = pExtent->pRGD;
1105 /* Loop through all entries. */
1106 while (i < pExtent->cGDEntries)
1107 {
1108 uint32_t uGTStart = *pGDTmp;
1109 uint32_t uRGTStart = *pRGDTmp;
1110 size_t cbGTRead = cbGT;
1111 /* If no grain table is allocated skip the entry. */
1112 if (*pGDTmp == 0 && *pRGDTmp == 0)
1113 {
1114 i++;
1115 continue;
1116 }
1117 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1118 {
1119 /* Just one grain directory entry refers to a not yet allocated
1120 * grain table or both grain directory copies refer to the same
1121 * grain table. Not allowed. */
1122 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1123 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1124 break;
1125 }
1126 i++;
1127 pGDTmp++;
1128 pRGDTmp++;
1129 /*
1130 * Read a few tables at once if adjacent to decrease the number
1131 * of I/O requests. Read at maximum 1MB at once.
1132 */
1133 while ( i < pExtent->cGDEntries
1134 && cbGTRead < cbGTBuffersMax)
1135 {
1136 /* If no grain table is allocated skip the entry. */
1137 if (*pGDTmp == 0 && *pRGDTmp == 0)
1138 {
1139 i++;
1140 continue;
1141 }
1142 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1143 {
1144 /* Just one grain directory entry refers to a not yet allocated
1145 * grain table or both grain directory copies refer to the same
1146 * grain table. Not allowed. */
1147 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1148 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1149 break;
1150 }
1151 /* Check that the start offsets are adjacent.*/
1152 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1153 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1154 break;
1155 i++;
1156 pGDTmp++;
1157 pRGDTmp++;
1158 cbGTRead += cbGT;
1159 }
1160 /* Increase buffers if required. */
1161 if ( RT_SUCCESS(rc)
1162 && cbGTBuffers < cbGTRead)
1163 {
1164 uint32_t *pTmp;
1165 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1166 if (pTmp)
1167 {
1168 pTmpGT1 = pTmp;
1169 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1170 if (pTmp)
1171 pTmpGT2 = pTmp;
1172 else
1173 rc = VERR_NO_MEMORY;
1174 }
1175 else
1176 rc = VERR_NO_MEMORY;
1177 if (rc == VERR_NO_MEMORY)
1178 {
1179 /* Reset to the old values. */
1180 rc = VINF_SUCCESS;
1181 i -= cbGTRead / cbGT;
1182 cbGTRead = cbGT;
1183 /* Don't try to increase the buffer again in the next run. */
1184 cbGTBuffersMax = cbGTBuffers;
1185 }
1186 }
1187 if (RT_SUCCESS(rc))
1188 {
1189 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1190 * but in reality they are not compressed. */
1191 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1192 VMDK_SECTOR2BYTE(uGTStart),
1193 pTmpGT1, cbGTRead);
1194 if (RT_FAILURE(rc))
1195 {
1196 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1197 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1198 break;
1199 }
1200 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1201 * but in reality they are not compressed. */
1202 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1203 VMDK_SECTOR2BYTE(uRGTStart),
1204 pTmpGT2, cbGTRead);
1205 if (RT_FAILURE(rc))
1206 {
1207 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1208 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1209 break;
1210 }
1211 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1212 {
1213 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1214 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1215 break;
1216 }
1217 }
1218 } /* while (i < pExtent->cGDEntries) */
1219 /** @todo figure out what to do for unclean VMDKs. */
1220 if (pTmpGT1)
1221 RTMemFree(pTmpGT1);
1222 if (pTmpGT2)
1223 RTMemFree(pTmpGT2);
1224 }
1225 else
1226 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1227 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1228 }
1229 }
1230 else
1231 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1232 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1233 }
1234 if (RT_FAILURE(rc))
1235 vmdkFreeGrainDirectory(pExtent);
1236 return rc;
1237}
1238/**
1239 * Creates a new grain directory for the given extent at the given start sector.
1240 *
1241 * @returns VBox status code.
1242 * @param pImage Image instance data.
1243 * @param pExtent The VMDK extent.
1244 * @param uStartSector Where the grain directory should be stored in the image.
1245 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1246 */
1247static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1248 uint64_t uStartSector, bool fPreAlloc,
1249 bool fExisting = false)
1250{
1251 int rc = VINF_SUCCESS;
1252 unsigned i;
1253 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1254 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1255 size_t cbGTRounded;
1256 uint64_t cbOverhead;
1257 if (fPreAlloc)
1258 {
1259 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1260 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1261 }
1262 else
1263 {
1264 /* Use a dummy start sector for layout computation. */
1265 if (uStartSector == VMDK_GD_AT_END)
1266 uStartSector = 1;
1267 cbGTRounded = 0;
1268 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1269 }
1270 /* For streamOptimized extents there is only one grain directory,
1271 * and for all others take redundant grain directory into account. */
1272 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1273 {
1274 cbOverhead = RT_ALIGN_64(cbOverhead,
1275 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1276 }
1277 else
1278 {
1279 cbOverhead += cbGDRounded + cbGTRounded;
1280 cbOverhead = RT_ALIGN_64(cbOverhead,
1281 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1282
1283 if (!fExisting)
1284 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1285 }
1286 if (RT_SUCCESS(rc))
1287 {
1288 pExtent->uAppendPosition = cbOverhead;
1289 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1290 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1291 {
1292 pExtent->uSectorRGD = 0;
1293 pExtent->uSectorGD = uStartSector;
1294 }
1295 else
1296 {
1297 pExtent->uSectorRGD = uStartSector;
1298 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1299 }
1300 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1301 if (RT_SUCCESS(rc))
1302 {
1303 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1304 if ( RT_SUCCESS(rc)
1305 && fPreAlloc)
1306 {
1307 uint32_t uGTSectorLE;
1308 uint64_t uOffsetSectors;
1309 if (pExtent->pRGD)
1310 {
1311 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1312 for (i = 0; i < pExtent->cGDEntries; i++)
1313 {
1314 pExtent->pRGD[i] = uOffsetSectors;
1315 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1316 /* Write the redundant grain directory entry to disk. */
1317 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1318 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1319 &uGTSectorLE, sizeof(uGTSectorLE));
1320 if (RT_FAILURE(rc))
1321 {
1322 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1323 break;
1324 }
1325 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1326 }
1327 }
1328 if (RT_SUCCESS(rc))
1329 {
1330 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1331 for (i = 0; i < pExtent->cGDEntries; i++)
1332 {
1333 pExtent->pGD[i] = uOffsetSectors;
1334 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1335 /* Write the grain directory entry to disk. */
1336 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1337 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1338 &uGTSectorLE, sizeof(uGTSectorLE));
1339 if (RT_FAILURE(rc))
1340 {
1341 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1342 break;
1343 }
1344 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1345 }
1346 }
1347 }
1348 }
1349 }
1350 if (RT_FAILURE(rc))
1351 vmdkFreeGrainDirectory(pExtent);
1352 return rc;
1353}
1354/**
1355 * Unquotes the given string returning the result in a separate buffer.
1356 *
1357 * @returns VBox status code.
1358 * @param pImage The VMDK image state.
1359 * @param pszStr The string to unquote.
1360 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1361 * free.
1362 * @param ppszNext Where to store the pointer to any character following
1363 * the quoted value, optional.
1364 */
1365static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1366 char **ppszUnquoted, char **ppszNext)
1367{
1368 const char *pszStart = pszStr;
1369 char *pszQ;
1370 char *pszUnquoted;
1371 /* Skip over whitespace. */
1372 while (*pszStr == ' ' || *pszStr == '\t')
1373 pszStr++;
1374 if (*pszStr != '"')
1375 {
1376 pszQ = (char *)pszStr;
1377 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1378 pszQ++;
1379 }
1380 else
1381 {
1382 pszStr++;
1383 pszQ = (char *)strchr(pszStr, '"');
1384 if (pszQ == NULL)
1385 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1386 pImage->pszFilename, pszStart);
1387 }
1388 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1389 if (!pszUnquoted)
1390 return VERR_NO_MEMORY;
1391 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1392 pszUnquoted[pszQ - pszStr] = '\0';
1393 *ppszUnquoted = pszUnquoted;
1394 if (ppszNext)
1395 *ppszNext = pszQ + 1;
1396 return VINF_SUCCESS;
1397}
1398static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1399 const char *pszLine)
1400{
1401 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1402 ssize_t cbDiff = strlen(pszLine) + 1;
1403 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1404 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1405 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1406 memcpy(pEnd, pszLine, cbDiff);
1407 pDescriptor->cLines++;
1408 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1409 pDescriptor->fDirty = true;
1410 return VINF_SUCCESS;
1411}
1412static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1413 const char *pszKey, const char **ppszValue)
1414{
1415 size_t cbKey = strlen(pszKey);
1416 const char *pszValue;
1417 while (uStart != 0)
1418 {
1419 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1420 {
1421 /* Key matches, check for a '=' (preceded by whitespace). */
1422 pszValue = pDescriptor->aLines[uStart] + cbKey;
1423 while (*pszValue == ' ' || *pszValue == '\t')
1424 pszValue++;
1425 if (*pszValue == '=')
1426 {
1427 *ppszValue = pszValue + 1;
1428 break;
1429 }
1430 }
1431 uStart = pDescriptor->aNextLines[uStart];
1432 }
1433 return !!uStart;
1434}
1435static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1436 unsigned uStart,
1437 const char *pszKey, const char *pszValue)
1438{
1439 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1440 size_t cbKey = strlen(pszKey);
1441 unsigned uLast = 0;
1442 while (uStart != 0)
1443 {
1444 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1445 {
1446 /* Key matches, check for a '=' (preceded by whitespace). */
1447 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1448 while (*pszTmp == ' ' || *pszTmp == '\t')
1449 pszTmp++;
1450 if (*pszTmp == '=')
1451 {
1452 pszTmp++;
1453 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1454 * bloat and potentially out of space error? */
1455 while (*pszTmp == ' ' || *pszTmp == '\t')
1456 pszTmp++;
1457 break;
1458 }
1459 }
1460 if (!pDescriptor->aNextLines[uStart])
1461 uLast = uStart;
1462 uStart = pDescriptor->aNextLines[uStart];
1463 }
1464 if (uStart)
1465 {
1466 if (pszValue)
1467 {
1468 /* Key already exists, replace existing value. */
1469 size_t cbOldVal = strlen(pszTmp);
1470 size_t cbNewVal = strlen(pszValue);
1471 ssize_t cbDiff = cbNewVal - cbOldVal;
1472 /* Check for buffer overflow. */
1473 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1474 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1475 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1476 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1477 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1478 memcpy(pszTmp, pszValue, cbNewVal + 1);
1479 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1480 pDescriptor->aLines[i] += cbDiff;
1481 }
1482 else
1483 {
1484 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1485 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1486 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1487 {
1488 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1489 if (pDescriptor->aNextLines[i])
1490 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1491 else
1492 pDescriptor->aNextLines[i-1] = 0;
1493 }
1494 pDescriptor->cLines--;
1495 /* Adjust starting line numbers of following descriptor sections. */
1496 if (uStart < pDescriptor->uFirstExtent)
1497 pDescriptor->uFirstExtent--;
1498 if (uStart < pDescriptor->uFirstDDB)
1499 pDescriptor->uFirstDDB--;
1500 }
1501 }
1502 else
1503 {
1504 /* Key doesn't exist, append after the last entry in this category. */
1505 if (!pszValue)
1506 {
1507 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1508 return VINF_SUCCESS;
1509 }
1510 cbKey = strlen(pszKey);
1511 size_t cbValue = strlen(pszValue);
1512 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1513 /* Check for buffer overflow. */
1514 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1515 || ( pDescriptor->aLines[pDescriptor->cLines]
1516 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1517 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1518 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1519 {
1520 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1521 if (pDescriptor->aNextLines[i - 1])
1522 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1523 else
1524 pDescriptor->aNextLines[i] = 0;
1525 }
1526 uStart = uLast + 1;
1527 pDescriptor->aNextLines[uLast] = uStart;
1528 pDescriptor->aNextLines[uStart] = 0;
1529 pDescriptor->cLines++;
1530 pszTmp = pDescriptor->aLines[uStart];
1531 memmove(pszTmp + cbDiff, pszTmp,
1532 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1533 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1534 pDescriptor->aLines[uStart][cbKey] = '=';
1535 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1536 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1537 pDescriptor->aLines[i] += cbDiff;
1538 /* Adjust starting line numbers of following descriptor sections. */
1539 if (uStart <= pDescriptor->uFirstExtent)
1540 pDescriptor->uFirstExtent++;
1541 if (uStart <= pDescriptor->uFirstDDB)
1542 pDescriptor->uFirstDDB++;
1543 }
1544 pDescriptor->fDirty = true;
1545 return VINF_SUCCESS;
1546}
1547static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1548 uint32_t *puValue)
1549{
1550 const char *pszValue;
1551 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1552 &pszValue))
1553 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1554 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1555}
1556/**
1557 * Returns the value of the given key as a string allocating the necessary memory.
1558 *
1559 * @returns VBox status code.
1560 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1561 * @param pImage The VMDK image state.
1562 * @param pDescriptor The descriptor to fetch the value from.
1563 * @param pszKey The key to get the value from.
1564 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1565 * free.
1566 */
1567static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1568 const char *pszKey, char **ppszValue)
1569{
1570 const char *pszValue;
1571 char *pszValueUnquoted;
1572 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1573 &pszValue))
1574 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1575 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1576 if (RT_FAILURE(rc))
1577 return rc;
1578 *ppszValue = pszValueUnquoted;
1579 return rc;
1580}
1581static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1582 const char *pszKey, const char *pszValue)
1583{
1584 char *pszValueQuoted;
1585 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1586 if (!pszValueQuoted)
1587 return VERR_NO_STR_MEMORY;
1588 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1589 pszValueQuoted);
1590 RTStrFree(pszValueQuoted);
1591 return rc;
1592}
1593static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1594 PVMDKDESCRIPTOR pDescriptor)
1595{
1596 RT_NOREF1(pImage);
1597 unsigned uEntry = pDescriptor->uFirstExtent;
1598 ssize_t cbDiff;
1599 if (!uEntry)
1600 return;
1601 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1602 /* Move everything including \0 in the entry marking the end of buffer. */
1603 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1604 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1605 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1606 {
1607 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1608 if (pDescriptor->aNextLines[i])
1609 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1610 else
1611 pDescriptor->aNextLines[i - 1] = 0;
1612 }
1613 pDescriptor->cLines--;
1614 if (pDescriptor->uFirstDDB)
1615 pDescriptor->uFirstDDB--;
1616 return;
1617}
1618static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1619 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1620 VMDKETYPE enmType, const char *pszBasename,
1621 uint64_t uSectorOffset)
1622{
1623 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1624 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1625 char *pszTmp;
1626 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1627 char szExt[1024];
1628 ssize_t cbDiff;
1629 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1630 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1631 /* Find last entry in extent description. */
1632 while (uStart)
1633 {
1634 if (!pDescriptor->aNextLines[uStart])
1635 uLast = uStart;
1636 uStart = pDescriptor->aNextLines[uStart];
1637 }
1638 if (enmType == VMDKETYPE_ZERO)
1639 {
1640 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1641 cNominalSectors, apszType[enmType]);
1642 }
1643 else if (enmType == VMDKETYPE_FLAT)
1644 {
1645 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1646 apszAccess[enmAccess], cNominalSectors,
1647 apszType[enmType], pszBasename, uSectorOffset);
1648 }
1649 else
1650 {
1651 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1652 apszAccess[enmAccess], cNominalSectors,
1653 apszType[enmType], pszBasename);
1654 }
1655 cbDiff = strlen(szExt) + 1;
1656 /* Check for buffer overflow. */
1657 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1658 || ( pDescriptor->aLines[pDescriptor->cLines]
1659 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1660 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1661 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1662 {
1663 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1664 if (pDescriptor->aNextLines[i - 1])
1665 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1666 else
1667 pDescriptor->aNextLines[i] = 0;
1668 }
1669 uStart = uLast + 1;
1670 pDescriptor->aNextLines[uLast] = uStart;
1671 pDescriptor->aNextLines[uStart] = 0;
1672 pDescriptor->cLines++;
1673 pszTmp = pDescriptor->aLines[uStart];
1674 memmove(pszTmp + cbDiff, pszTmp,
1675 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1676 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1677 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1678 pDescriptor->aLines[i] += cbDiff;
1679 /* Adjust starting line numbers of following descriptor sections. */
1680 if (uStart <= pDescriptor->uFirstDDB)
1681 pDescriptor->uFirstDDB++;
1682 pDescriptor->fDirty = true;
1683 return VINF_SUCCESS;
1684}
1685/**
1686 * Returns the value of the given key from the DDB as a string allocating
1687 * the necessary memory.
1688 *
1689 * @returns VBox status code.
1690 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1691 * @param pImage The VMDK image state.
1692 * @param pDescriptor The descriptor to fetch the value from.
1693 * @param pszKey The key to get the value from.
1694 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1695 * free.
1696 */
1697static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1698 const char *pszKey, char **ppszValue)
1699{
1700 const char *pszValue;
1701 char *pszValueUnquoted;
1702 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1703 &pszValue))
1704 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1705 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1706 if (RT_FAILURE(rc))
1707 return rc;
1708 *ppszValue = pszValueUnquoted;
1709 return rc;
1710}
1711static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1712 const char *pszKey, uint32_t *puValue)
1713{
1714 const char *pszValue;
1715 char *pszValueUnquoted;
1716 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1717 &pszValue))
1718 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1719 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1720 if (RT_FAILURE(rc))
1721 return rc;
1722 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1723 RTMemTmpFree(pszValueUnquoted);
1724 return rc;
1725}
1726static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1727 const char *pszKey, PRTUUID pUuid)
1728{
1729 const char *pszValue;
1730 char *pszValueUnquoted;
1731 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1732 &pszValue))
1733 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1734 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1735 if (RT_FAILURE(rc))
1736 return rc;
1737 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1738 RTMemTmpFree(pszValueUnquoted);
1739 return rc;
1740}
1741static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1742 const char *pszKey, const char *pszVal)
1743{
1744 int rc;
1745 char *pszValQuoted;
1746 if (pszVal)
1747 {
1748 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1749 if (!pszValQuoted)
1750 return VERR_NO_STR_MEMORY;
1751 }
1752 else
1753 pszValQuoted = NULL;
1754 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1755 pszValQuoted);
1756 if (pszValQuoted)
1757 RTStrFree(pszValQuoted);
1758 return rc;
1759}
1760static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1761 const char *pszKey, PCRTUUID pUuid)
1762{
1763 char *pszUuid;
1764 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1765 if (!pszUuid)
1766 return VERR_NO_STR_MEMORY;
1767 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1768 pszUuid);
1769 RTStrFree(pszUuid);
1770 return rc;
1771}
1772static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1773 const char *pszKey, uint32_t uValue)
1774{
1775 char *pszValue;
1776 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1777 if (!pszValue)
1778 return VERR_NO_STR_MEMORY;
1779 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1780 pszValue);
1781 RTStrFree(pszValue);
1782 return rc;
1783}
1784/**
1785 * Splits the descriptor data into individual lines checking for correct line
1786 * endings and descriptor size.
1787 *
1788 * @returns VBox status code.
1789 * @param pImage The image instance.
1790 * @param pDesc The descriptor.
1791 * @param pszTmp The raw descriptor data from the image.
1792 */
1793static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1794{
1795 unsigned cLine = 0;
1796 int rc = VINF_SUCCESS;
1797 while ( RT_SUCCESS(rc)
1798 && *pszTmp != '\0')
1799 {
1800 pDesc->aLines[cLine++] = pszTmp;
1801 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1802 {
1803 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1804 rc = VERR_VD_VMDK_INVALID_HEADER;
1805 break;
1806 }
1807 while (*pszTmp != '\0' && *pszTmp != '\n')
1808 {
1809 if (*pszTmp == '\r')
1810 {
1811 if (*(pszTmp + 1) != '\n')
1812 {
1813 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1814 break;
1815 }
1816 else
1817 {
1818 /* Get rid of CR character. */
1819 *pszTmp = '\0';
1820 }
1821 }
1822 pszTmp++;
1823 }
1824 if (RT_FAILURE(rc))
1825 break;
1826 /* Get rid of LF character. */
1827 if (*pszTmp == '\n')
1828 {
1829 *pszTmp = '\0';
1830 pszTmp++;
1831 }
1832 }
1833 if (RT_SUCCESS(rc))
1834 {
1835 pDesc->cLines = cLine;
1836 /* Pointer right after the end of the used part of the buffer. */
1837 pDesc->aLines[cLine] = pszTmp;
1838 }
1839 return rc;
1840}
1841static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1842 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1843{
1844 pDescriptor->cbDescAlloc = cbDescData;
1845 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1846 if (RT_SUCCESS(rc))
1847 {
1848 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1849 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1850 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1851 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1852 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1853 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1854 else
1855 {
1856 unsigned uLastNonEmptyLine = 0;
1857 /* Initialize those, because we need to be able to reopen an image. */
1858 pDescriptor->uFirstDesc = 0;
1859 pDescriptor->uFirstExtent = 0;
1860 pDescriptor->uFirstDDB = 0;
1861 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1862 {
1863 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1864 {
1865 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1866 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1867 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1868 {
1869 /* An extent descriptor. */
1870 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1871 {
1872 /* Incorrect ordering of entries. */
1873 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1874 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1875 break;
1876 }
1877 if (!pDescriptor->uFirstExtent)
1878 {
1879 pDescriptor->uFirstExtent = i;
1880 uLastNonEmptyLine = 0;
1881 }
1882 }
1883 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1884 {
1885 /* A disk database entry. */
1886 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1887 {
1888 /* Incorrect ordering of entries. */
1889 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1890 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1891 break;
1892 }
1893 if (!pDescriptor->uFirstDDB)
1894 {
1895 pDescriptor->uFirstDDB = i;
1896 uLastNonEmptyLine = 0;
1897 }
1898 }
1899 else
1900 {
1901 /* A normal entry. */
1902 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1903 {
1904 /* Incorrect ordering of entries. */
1905 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1906 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1907 break;
1908 }
1909 if (!pDescriptor->uFirstDesc)
1910 {
1911 pDescriptor->uFirstDesc = i;
1912 uLastNonEmptyLine = 0;
1913 }
1914 }
1915 if (uLastNonEmptyLine)
1916 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1917 uLastNonEmptyLine = i;
1918 }
1919 }
1920 }
1921 }
1922 return rc;
1923}
1924static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1925 PCVDGEOMETRY pPCHSGeometry)
1926{
1927 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1928 VMDK_DDB_GEO_PCHS_CYLINDERS,
1929 pPCHSGeometry->cCylinders);
1930 if (RT_FAILURE(rc))
1931 return rc;
1932 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1933 VMDK_DDB_GEO_PCHS_HEADS,
1934 pPCHSGeometry->cHeads);
1935 if (RT_FAILURE(rc))
1936 return rc;
1937 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1938 VMDK_DDB_GEO_PCHS_SECTORS,
1939 pPCHSGeometry->cSectors);
1940 return rc;
1941}
1942static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1943 PCVDGEOMETRY pLCHSGeometry)
1944{
1945 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1946 VMDK_DDB_GEO_LCHS_CYLINDERS,
1947 pLCHSGeometry->cCylinders);
1948 if (RT_FAILURE(rc))
1949 return rc;
1950 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1951 VMDK_DDB_GEO_LCHS_HEADS,
1952 pLCHSGeometry->cHeads);
1953 if (RT_FAILURE(rc))
1954 return rc;
1955 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1956 VMDK_DDB_GEO_LCHS_SECTORS,
1957 pLCHSGeometry->cSectors);
1958 return rc;
1959}
1960static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1961 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1962{
1963 pDescriptor->uFirstDesc = 0;
1964 pDescriptor->uFirstExtent = 0;
1965 pDescriptor->uFirstDDB = 0;
1966 pDescriptor->cLines = 0;
1967 pDescriptor->cbDescAlloc = cbDescData;
1968 pDescriptor->fDirty = false;
1969 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1970 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1971 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1972 if (RT_SUCCESS(rc))
1973 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1974 if (RT_SUCCESS(rc))
1975 {
1976 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1977 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1978 }
1979 if (RT_SUCCESS(rc))
1980 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1981 if (RT_SUCCESS(rc))
1982 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1983 if (RT_SUCCESS(rc))
1984 {
1985 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
1986 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1987 }
1988 if (RT_SUCCESS(rc))
1989 {
1990 /* The trailing space is created by VMware, too. */
1991 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
1992 }
1993 if (RT_SUCCESS(rc))
1994 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
1995 if (RT_SUCCESS(rc))
1996 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1997 if (RT_SUCCESS(rc))
1998 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
1999 if (RT_SUCCESS(rc))
2000 {
2001 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2002 /* Now that the framework is in place, use the normal functions to insert
2003 * the remaining keys. */
2004 char szBuf[9];
2005 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2006 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2007 "CID", szBuf);
2008 }
2009 if (RT_SUCCESS(rc))
2010 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2011 "parentCID", "ffffffff");
2012 if (RT_SUCCESS(rc))
2013 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2014 return rc;
2015}
2016static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2017{
2018 int rc;
2019 unsigned cExtents;
2020 unsigned uLine;
2021 unsigned i;
2022 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2023 &pImage->Descriptor);
2024 if (RT_FAILURE(rc))
2025 return rc;
2026 /* Check version, must be 1. */
2027 uint32_t uVersion;
2028 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2029 if (RT_FAILURE(rc))
2030 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2031 if (uVersion != 1)
2032 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2033 /* Get image creation type and determine image flags. */
2034 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2035 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2036 &pszCreateType);
2037 if (RT_FAILURE(rc))
2038 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2039 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2040 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2041 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2042 else if ( !strcmp(pszCreateType, "partitionedDevice")
2043 || !strcmp(pszCreateType, "fullDevice"))
2044 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2045 else if (!strcmp(pszCreateType, "streamOptimized"))
2046 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2047 else if (!strcmp(pszCreateType, "vmfs"))
2048 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2049 RTMemTmpFree(pszCreateType);
2050 /* Count the number of extent config entries. */
2051 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2052 uLine != 0;
2053 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2054 /* nothing */;
2055 if (!pImage->pDescData && cExtents != 1)
2056 {
2057 /* Monolithic image, must have only one extent (already opened). */
2058 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2059 }
2060 if (pImage->pDescData)
2061 {
2062 /* Non-monolithic image, extents need to be allocated. */
2063 rc = vmdkCreateExtents(pImage, cExtents);
2064 if (RT_FAILURE(rc))
2065 return rc;
2066 }
2067 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2068 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2069 {
2070 char *pszLine = pImage->Descriptor.aLines[uLine];
2071 /* Access type of the extent. */
2072 if (!strncmp(pszLine, "RW", 2))
2073 {
2074 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2075 pszLine += 2;
2076 }
2077 else if (!strncmp(pszLine, "RDONLY", 6))
2078 {
2079 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2080 pszLine += 6;
2081 }
2082 else if (!strncmp(pszLine, "NOACCESS", 8))
2083 {
2084 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2085 pszLine += 8;
2086 }
2087 else
2088 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2089 if (*pszLine++ != ' ')
2090 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2091 /* Nominal size of the extent. */
2092 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2093 &pImage->pExtents[i].cNominalSectors);
2094 if (RT_FAILURE(rc))
2095 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2096 if (*pszLine++ != ' ')
2097 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2098 /* Type of the extent. */
2099 if (!strncmp(pszLine, "SPARSE", 6))
2100 {
2101 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2102 pszLine += 6;
2103 }
2104 else if (!strncmp(pszLine, "FLAT", 4))
2105 {
2106 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2107 pszLine += 4;
2108 }
2109 else if (!strncmp(pszLine, "ZERO", 4))
2110 {
2111 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2112 pszLine += 4;
2113 }
2114 else if (!strncmp(pszLine, "VMFS", 4))
2115 {
2116 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2117 pszLine += 4;
2118 }
2119 else
2120 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2121 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2122 {
2123 /* This one has no basename or offset. */
2124 if (*pszLine == ' ')
2125 pszLine++;
2126 if (*pszLine != '\0')
2127 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2128 pImage->pExtents[i].pszBasename = NULL;
2129 }
2130 else
2131 {
2132 /* All other extent types have basename and optional offset. */
2133 if (*pszLine++ != ' ')
2134 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2135 /* Basename of the image. Surrounded by quotes. */
2136 char *pszBasename;
2137 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2138 if (RT_FAILURE(rc))
2139 return rc;
2140 pImage->pExtents[i].pszBasename = pszBasename;
2141 if (*pszLine == ' ')
2142 {
2143 pszLine++;
2144 if (*pszLine != '\0')
2145 {
2146 /* Optional offset in extent specified. */
2147 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2148 &pImage->pExtents[i].uSectorOffset);
2149 if (RT_FAILURE(rc))
2150 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2151 }
2152 }
2153 if (*pszLine != '\0')
2154 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2155 }
2156 }
2157 /* Determine PCHS geometry (autogenerate if necessary). */
2158 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2159 VMDK_DDB_GEO_PCHS_CYLINDERS,
2160 &pImage->PCHSGeometry.cCylinders);
2161 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2162 pImage->PCHSGeometry.cCylinders = 0;
2163 else if (RT_FAILURE(rc))
2164 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2165 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2166 VMDK_DDB_GEO_PCHS_HEADS,
2167 &pImage->PCHSGeometry.cHeads);
2168 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2169 pImage->PCHSGeometry.cHeads = 0;
2170 else if (RT_FAILURE(rc))
2171 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2172 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2173 VMDK_DDB_GEO_PCHS_SECTORS,
2174 &pImage->PCHSGeometry.cSectors);
2175 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2176 pImage->PCHSGeometry.cSectors = 0;
2177 else if (RT_FAILURE(rc))
2178 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2179 if ( pImage->PCHSGeometry.cCylinders == 0
2180 || pImage->PCHSGeometry.cHeads == 0
2181 || pImage->PCHSGeometry.cHeads > 16
2182 || pImage->PCHSGeometry.cSectors == 0
2183 || pImage->PCHSGeometry.cSectors > 63)
2184 {
2185 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2186 * as the total image size isn't known yet). */
2187 pImage->PCHSGeometry.cCylinders = 0;
2188 pImage->PCHSGeometry.cHeads = 16;
2189 pImage->PCHSGeometry.cSectors = 63;
2190 }
2191 /* Determine LCHS geometry (set to 0 if not specified). */
2192 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2193 VMDK_DDB_GEO_LCHS_CYLINDERS,
2194 &pImage->LCHSGeometry.cCylinders);
2195 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2196 pImage->LCHSGeometry.cCylinders = 0;
2197 else if (RT_FAILURE(rc))
2198 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2199 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2200 VMDK_DDB_GEO_LCHS_HEADS,
2201 &pImage->LCHSGeometry.cHeads);
2202 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2203 pImage->LCHSGeometry.cHeads = 0;
2204 else if (RT_FAILURE(rc))
2205 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2206 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2207 VMDK_DDB_GEO_LCHS_SECTORS,
2208 &pImage->LCHSGeometry.cSectors);
2209 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2210 pImage->LCHSGeometry.cSectors = 0;
2211 else if (RT_FAILURE(rc))
2212 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2213 if ( pImage->LCHSGeometry.cCylinders == 0
2214 || pImage->LCHSGeometry.cHeads == 0
2215 || pImage->LCHSGeometry.cSectors == 0)
2216 {
2217 pImage->LCHSGeometry.cCylinders = 0;
2218 pImage->LCHSGeometry.cHeads = 0;
2219 pImage->LCHSGeometry.cSectors = 0;
2220 }
2221 /* Get image UUID. */
2222 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2223 &pImage->ImageUuid);
2224 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2225 {
2226 /* Image without UUID. Probably created by VMware and not yet used
2227 * by VirtualBox. Can only be added for images opened in read/write
2228 * mode, so don't bother producing a sensible UUID otherwise. */
2229 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2230 RTUuidClear(&pImage->ImageUuid);
2231 else
2232 {
2233 rc = RTUuidCreate(&pImage->ImageUuid);
2234 if (RT_FAILURE(rc))
2235 return rc;
2236 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2237 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2238 if (RT_FAILURE(rc))
2239 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2240 }
2241 }
2242 else if (RT_FAILURE(rc))
2243 return rc;
2244 /* Get image modification UUID. */
2245 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2246 VMDK_DDB_MODIFICATION_UUID,
2247 &pImage->ModificationUuid);
2248 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2249 {
2250 /* Image without UUID. Probably created by VMware and not yet used
2251 * by VirtualBox. Can only be added for images opened in read/write
2252 * mode, so don't bother producing a sensible UUID otherwise. */
2253 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2254 RTUuidClear(&pImage->ModificationUuid);
2255 else
2256 {
2257 rc = RTUuidCreate(&pImage->ModificationUuid);
2258 if (RT_FAILURE(rc))
2259 return rc;
2260 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2261 VMDK_DDB_MODIFICATION_UUID,
2262 &pImage->ModificationUuid);
2263 if (RT_FAILURE(rc))
2264 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2265 }
2266 }
2267 else if (RT_FAILURE(rc))
2268 return rc;
2269 /* Get UUID of parent image. */
2270 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2271 &pImage->ParentUuid);
2272 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2273 {
2274 /* Image without UUID. Probably created by VMware and not yet used
2275 * by VirtualBox. Can only be added for images opened in read/write
2276 * mode, so don't bother producing a sensible UUID otherwise. */
2277 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2278 RTUuidClear(&pImage->ParentUuid);
2279 else
2280 {
2281 rc = RTUuidClear(&pImage->ParentUuid);
2282 if (RT_FAILURE(rc))
2283 return rc;
2284 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2285 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2286 if (RT_FAILURE(rc))
2287 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2288 }
2289 }
2290 else if (RT_FAILURE(rc))
2291 return rc;
2292 /* Get parent image modification UUID. */
2293 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2294 VMDK_DDB_PARENT_MODIFICATION_UUID,
2295 &pImage->ParentModificationUuid);
2296 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2297 {
2298 /* Image without UUID. Probably created by VMware and not yet used
2299 * by VirtualBox. Can only be added for images opened in read/write
2300 * mode, so don't bother producing a sensible UUID otherwise. */
2301 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2302 RTUuidClear(&pImage->ParentModificationUuid);
2303 else
2304 {
2305 RTUuidClear(&pImage->ParentModificationUuid);
2306 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2307 VMDK_DDB_PARENT_MODIFICATION_UUID,
2308 &pImage->ParentModificationUuid);
2309 if (RT_FAILURE(rc))
2310 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2311 }
2312 }
2313 else if (RT_FAILURE(rc))
2314 return rc;
2315 return VINF_SUCCESS;
2316}
2317/**
2318 * Internal : Prepares the descriptor to write to the image.
2319 */
2320static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2321 void **ppvData, size_t *pcbData)
2322{
2323 int rc = VINF_SUCCESS;
2324 /*
2325 * Allocate temporary descriptor buffer.
2326 * In case there is no limit allocate a default
2327 * and increase if required.
2328 */
2329 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2330 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2331 size_t offDescriptor = 0;
2332 if (!pszDescriptor)
2333 return VERR_NO_MEMORY;
2334 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2335 {
2336 const char *psz = pImage->Descriptor.aLines[i];
2337 size_t cb = strlen(psz);
2338 /*
2339 * Increase the descriptor if there is no limit and
2340 * there is not enough room left for this line.
2341 */
2342 if (offDescriptor + cb + 1 > cbDescriptor)
2343 {
2344 if (cbLimit)
2345 {
2346 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2347 break;
2348 }
2349 else
2350 {
2351 char *pszDescriptorNew = NULL;
2352 LogFlow(("Increasing descriptor cache\n"));
2353 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2354 if (!pszDescriptorNew)
2355 {
2356 rc = VERR_NO_MEMORY;
2357 break;
2358 }
2359 pszDescriptor = pszDescriptorNew;
2360 cbDescriptor += cb + 4 * _1K;
2361 }
2362 }
2363 if (cb > 0)
2364 {
2365 memcpy(pszDescriptor + offDescriptor, psz, cb);
2366 offDescriptor += cb;
2367 }
2368 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2369 offDescriptor++;
2370 }
2371 if (RT_SUCCESS(rc))
2372 {
2373 *ppvData = pszDescriptor;
2374 *pcbData = offDescriptor;
2375 }
2376 else if (pszDescriptor)
2377 RTMemFree(pszDescriptor);
2378 return rc;
2379}
2380/**
2381 * Internal: write/update the descriptor part of the image.
2382 */
2383static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2384{
2385 int rc = VINF_SUCCESS;
2386 uint64_t cbLimit;
2387 uint64_t uOffset;
2388 PVMDKFILE pDescFile;
2389 void *pvDescriptor = NULL;
2390 size_t cbDescriptor;
2391 if (pImage->pDescData)
2392 {
2393 /* Separate descriptor file. */
2394 uOffset = 0;
2395 cbLimit = 0;
2396 pDescFile = pImage->pFile;
2397 }
2398 else
2399 {
2400 /* Embedded descriptor file. */
2401 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2402 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2403 pDescFile = pImage->pExtents[0].pFile;
2404 }
2405 /* Bail out if there is no file to write to. */
2406 if (pDescFile == NULL)
2407 return VERR_INVALID_PARAMETER;
2408 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2409 if (RT_SUCCESS(rc))
2410 {
2411 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2412 uOffset, pvDescriptor,
2413 cbLimit ? cbLimit : cbDescriptor,
2414 pIoCtx, NULL, NULL);
2415 if ( RT_FAILURE(rc)
2416 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2417 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2418 }
2419 if (RT_SUCCESS(rc) && !cbLimit)
2420 {
2421 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2422 if (RT_FAILURE(rc))
2423 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2424 }
2425 if (RT_SUCCESS(rc))
2426 pImage->Descriptor.fDirty = false;
2427 if (pvDescriptor)
2428 RTMemFree(pvDescriptor);
2429 return rc;
2430}
2431/**
2432 * Internal: validate the consistency check values in a binary header.
2433 */
2434static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2435{
2436 int rc = VINF_SUCCESS;
2437 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2438 {
2439 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2440 return rc;
2441 }
2442 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2443 {
2444 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2445 return rc;
2446 }
2447 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2448 && ( pHeader->singleEndLineChar != '\n'
2449 || pHeader->nonEndLineChar != ' '
2450 || pHeader->doubleEndLineChar1 != '\r'
2451 || pHeader->doubleEndLineChar2 != '\n') )
2452 {
2453 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2454 return rc;
2455 }
2456 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2457 {
2458 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2459 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2460 return rc;
2461 }
2462 return rc;
2463}
2464/**
2465 * Internal: read metadata belonging to an extent with binary header, i.e.
2466 * as found in monolithic files.
2467 */
2468static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2469 bool fMagicAlreadyRead)
2470{
2471 SparseExtentHeader Header;
2472 int rc;
2473 if (!fMagicAlreadyRead)
2474 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2475 &Header, sizeof(Header));
2476 else
2477 {
2478 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2479 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2480 RT_UOFFSETOF(SparseExtentHeader, version),
2481 &Header.version,
2482 sizeof(Header)
2483 - RT_UOFFSETOF(SparseExtentHeader, version));
2484 }
2485 if (RT_SUCCESS(rc))
2486 {
2487 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2488 if (RT_SUCCESS(rc))
2489 {
2490 uint64_t cbFile = 0;
2491 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2492 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2493 pExtent->fFooter = true;
2494 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2495 || ( pExtent->fFooter
2496 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2497 {
2498 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2499 if (RT_FAILURE(rc))
2500 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2501 }
2502 if (RT_SUCCESS(rc))
2503 {
2504 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2505 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2506 if ( pExtent->fFooter
2507 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2508 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2509 {
2510 /* Read the footer, which comes before the end-of-stream marker. */
2511 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2512 cbFile - 2*512, &Header,
2513 sizeof(Header));
2514 if (RT_FAILURE(rc))
2515 {
2516 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2517 rc = VERR_VD_VMDK_INVALID_HEADER;
2518 }
2519 if (RT_SUCCESS(rc))
2520 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2521 /* Prohibit any writes to this extent. */
2522 pExtent->uAppendPosition = 0;
2523 }
2524 if (RT_SUCCESS(rc))
2525 {
2526 pExtent->uVersion = RT_LE2H_U32(Header.version);
2527 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2528 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2529 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2530 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2531 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2532 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2533 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2534 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2535 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2536 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2537 {
2538 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2539 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2540 }
2541 else
2542 {
2543 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2544 pExtent->uSectorRGD = 0;
2545 }
2546 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2547 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2548 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2549 if ( RT_SUCCESS(rc)
2550 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2551 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2552 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2553 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2554 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2555 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2556 if (RT_SUCCESS(rc))
2557 {
2558 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2559 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2560 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2561 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2562 else
2563 {
2564 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2565 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2566 /* Fix up the number of descriptor sectors, as some flat images have
2567 * really just one, and this causes failures when inserting the UUID
2568 * values and other extra information. */
2569 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2570 {
2571 /* Do it the easy way - just fix it for flat images which have no
2572 * other complicated metadata which needs space too. */
2573 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2574 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2575 pExtent->cDescriptorSectors = 4;
2576 }
2577 }
2578 }
2579 }
2580 }
2581 }
2582 }
2583 else
2584 {
2585 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2586 rc = VERR_VD_VMDK_INVALID_HEADER;
2587 }
2588 if (RT_FAILURE(rc))
2589 vmdkFreeExtentData(pImage, pExtent, false);
2590 return rc;
2591}
2592/**
2593 * Internal: read additional metadata belonging to an extent. For those
2594 * extents which have no additional metadata just verify the information.
2595 */
2596static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2597{
2598 int rc = VINF_SUCCESS;
2599/* disabled the check as there are too many truncated vmdk images out there */
2600#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2601 uint64_t cbExtentSize;
2602 /* The image must be a multiple of a sector in size and contain the data
2603 * area (flat images only). If not, it means the image is at least
2604 * truncated, or even seriously garbled. */
2605 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2606 if (RT_FAILURE(rc))
2607 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2608 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2609 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2610 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2611 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2612#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2613 if ( RT_SUCCESS(rc)
2614 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2615 {
2616 /* The spec says that this must be a power of two and greater than 8,
2617 * but probably they meant not less than 8. */
2618 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2619 || pExtent->cSectorsPerGrain < 8)
2620 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2621 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2622 else
2623 {
2624 /* This code requires that a grain table must hold a power of two multiple
2625 * of the number of entries per GT cache entry. */
2626 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2627 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2628 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2629 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2630 else
2631 {
2632 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2633 if (RT_SUCCESS(rc))
2634 {
2635 /* Prohibit any writes to this streamOptimized extent. */
2636 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2637 pExtent->uAppendPosition = 0;
2638 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2639 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2640 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2641 rc = vmdkReadGrainDirectory(pImage, pExtent);
2642 else
2643 {
2644 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2645 pExtent->cbGrainStreamRead = 0;
2646 }
2647 }
2648 }
2649 }
2650 }
2651 if (RT_FAILURE(rc))
2652 vmdkFreeExtentData(pImage, pExtent, false);
2653 return rc;
2654}
2655/**
2656 * Internal: write/update the metadata for a sparse extent.
2657 */
2658static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2659 uint64_t uOffset, PVDIOCTX pIoCtx)
2660{
2661 SparseExtentHeader Header;
2662 memset(&Header, '\0', sizeof(Header));
2663 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2664 Header.version = RT_H2LE_U32(pExtent->uVersion);
2665 Header.flags = RT_H2LE_U32(RT_BIT(0));
2666 if (pExtent->pRGD)
2667 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2668 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2669 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2670 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2671 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2672 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2673 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2674 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2675 if (pExtent->fFooter && uOffset == 0)
2676 {
2677 if (pExtent->pRGD)
2678 {
2679 Assert(pExtent->uSectorRGD);
2680 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2681 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2682 }
2683 else
2684 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2685 }
2686 else
2687 {
2688 if (pExtent->pRGD)
2689 {
2690 Assert(pExtent->uSectorRGD);
2691 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2692 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2693 }
2694 else
2695 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2696 }
2697 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2698 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2699 Header.singleEndLineChar = '\n';
2700 Header.nonEndLineChar = ' ';
2701 Header.doubleEndLineChar1 = '\r';
2702 Header.doubleEndLineChar2 = '\n';
2703 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2704 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2705 uOffset, &Header, sizeof(Header),
2706 pIoCtx, NULL, NULL);
2707 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2708 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2709 return rc;
2710}
2711/**
2712 * Internal: free the buffers used for streamOptimized images.
2713 */
2714static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2715{
2716 if (pExtent->pvCompGrain)
2717 {
2718 RTMemFree(pExtent->pvCompGrain);
2719 pExtent->pvCompGrain = NULL;
2720 }
2721 if (pExtent->pvGrain)
2722 {
2723 RTMemFree(pExtent->pvGrain);
2724 pExtent->pvGrain = NULL;
2725 }
2726}
2727/**
2728 * Internal: free the memory used by the extent data structure, optionally
2729 * deleting the referenced files.
2730 *
2731 * @returns VBox status code.
2732 * @param pImage Pointer to the image instance data.
2733 * @param pExtent The extent to free.
2734 * @param fDelete Flag whether to delete the backing storage.
2735 */
2736static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2737 bool fDelete)
2738{
2739 int rc = VINF_SUCCESS;
2740 vmdkFreeGrainDirectory(pExtent);
2741 if (pExtent->pDescData)
2742 {
2743 RTMemFree(pExtent->pDescData);
2744 pExtent->pDescData = NULL;
2745 }
2746 if (pExtent->pFile != NULL)
2747 {
2748 /* Do not delete raw extents, these have full and base names equal. */
2749 rc = vmdkFileClose(pImage, &pExtent->pFile,
2750 fDelete
2751 && pExtent->pszFullname
2752 && pExtent->pszBasename
2753 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2754 }
2755 if (pExtent->pszBasename)
2756 {
2757 RTMemTmpFree((void *)pExtent->pszBasename);
2758 pExtent->pszBasename = NULL;
2759 }
2760 if (pExtent->pszFullname)
2761 {
2762 RTStrFree((char *)(void *)pExtent->pszFullname);
2763 pExtent->pszFullname = NULL;
2764 }
2765 vmdkFreeStreamBuffers(pExtent);
2766 return rc;
2767}
2768/**
2769 * Internal: allocate grain table cache if necessary for this image.
2770 */
2771static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2772{
2773 PVMDKEXTENT pExtent;
2774 /* Allocate grain table cache if any sparse extent is present. */
2775 for (unsigned i = 0; i < pImage->cExtents; i++)
2776 {
2777 pExtent = &pImage->pExtents[i];
2778 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2779 {
2780 /* Allocate grain table cache. */
2781 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2782 if (!pImage->pGTCache)
2783 return VERR_NO_MEMORY;
2784 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2785 {
2786 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2787 pGCE->uExtent = UINT32_MAX;
2788 }
2789 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2790 break;
2791 }
2792 }
2793 return VINF_SUCCESS;
2794}
2795/**
2796 * Internal: allocate the given number of extents.
2797 */
2798static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2799{
2800 int rc = VINF_SUCCESS;
2801 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2802 if (pExtents)
2803 {
2804 for (unsigned i = 0; i < cExtents; i++)
2805 {
2806 pExtents[i].pFile = NULL;
2807 pExtents[i].pszBasename = NULL;
2808 pExtents[i].pszFullname = NULL;
2809 pExtents[i].pGD = NULL;
2810 pExtents[i].pRGD = NULL;
2811 pExtents[i].pDescData = NULL;
2812 pExtents[i].uVersion = 1;
2813 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2814 pExtents[i].uExtent = i;
2815 pExtents[i].pImage = pImage;
2816 }
2817 pImage->pExtents = pExtents;
2818 pImage->cExtents = cExtents;
2819 }
2820 else
2821 rc = VERR_NO_MEMORY;
2822 return rc;
2823}
2824/**
2825 * Internal: allocate and describes an additional, file-backed extent
2826 * for the given size. Preserves original extents.
2827 */
2828static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
2829{
2830 int rc = VINF_SUCCESS;
2831 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
2832 if (pNewExtents)
2833 {
2834 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
2835 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
2836
2837 pExtent->pFile = NULL;
2838 pExtent->pszBasename = NULL;
2839 pExtent->pszFullname = NULL;
2840 pExtent->pGD = NULL;
2841 pExtent->pRGD = NULL;
2842 pExtent->pDescData = NULL;
2843 pExtent->uVersion = 1;
2844 pExtent->uCompression = VMDK_COMPRESSION_NONE;
2845 pExtent->uExtent = pImage->cExtents;
2846 pExtent->pImage = pImage;
2847 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
2848 pExtent->enmType = VMDKETYPE_FLAT;
2849 pExtent->enmAccess = VMDKACCESS_READWRITE;
2850 pExtent->uSectorOffset = 0;
2851
2852 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
2853 AssertPtr(pszBasenameSubstr);
2854
2855 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
2856 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
2857 RTPathStripSuffix(pszBasenameBase);
2858 char *pszTmp;
2859 size_t cbTmp;
2860
2861 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
2862 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
2863 pExtent->uExtent + 1, pszBasenameSuff);
2864 else
2865 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
2866 pszBasenameSuff);
2867
2868 RTStrFree(pszBasenameBase);
2869 if (!pszTmp)
2870 return VERR_NO_STR_MEMORY;
2871 cbTmp = strlen(pszTmp) + 1;
2872 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
2873 if (!pszBasename)
2874 {
2875 RTStrFree(pszTmp);
2876 return VERR_NO_MEMORY;
2877 }
2878
2879 memcpy(pszBasename, pszTmp, cbTmp);
2880 RTStrFree(pszTmp);
2881
2882 pExtent->pszBasename = pszBasename;
2883
2884 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
2885 if (!pszBasedirectory)
2886 return VERR_NO_STR_MEMORY;
2887 RTPathStripFilename(pszBasedirectory);
2888 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
2889 RTStrFree(pszBasedirectory);
2890 if (!pszFullname)
2891 return VERR_NO_STR_MEMORY;
2892 pExtent->pszFullname = pszFullname;
2893
2894 /* Create file for extent. */
2895 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
2896 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
2897 true /* fCreate */));
2898 if (RT_FAILURE(rc))
2899 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
2900
2901 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
2902 pExtent->cNominalSectors, pExtent->enmType,
2903 pExtent->pszBasename, pExtent->uSectorOffset);
2904 if (RT_FAILURE(rc))
2905 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
2906
2907 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
2908 0 /* fFlags */, NULL, 0, 0);
2909
2910 if (RT_FAILURE(rc))
2911 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
2912
2913 pImage->pExtents = pNewExtents;
2914 pImage->cExtents++;
2915 }
2916 else
2917 rc = VERR_NO_MEMORY;
2918 return rc;
2919}
2920/**
2921 * Reads and processes the descriptor embedded in sparse images.
2922 *
2923 * @returns VBox status code.
2924 * @param pImage VMDK image instance.
2925 * @param pFile The sparse file handle.
2926 */
2927static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
2928{
2929 /* It's a hosted single-extent image. */
2930 int rc = vmdkCreateExtents(pImage, 1);
2931 if (RT_SUCCESS(rc))
2932 {
2933 /* The opened file is passed to the extent. No separate descriptor
2934 * file, so no need to keep anything open for the image. */
2935 PVMDKEXTENT pExtent = &pImage->pExtents[0];
2936 pExtent->pFile = pFile;
2937 pImage->pFile = NULL;
2938 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2939 if (RT_LIKELY(pExtent->pszFullname))
2940 {
2941 /* As we're dealing with a monolithic image here, there must
2942 * be a descriptor embedded in the image file. */
2943 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
2944 if ( RT_SUCCESS(rc)
2945 && pExtent->uDescriptorSector
2946 && pExtent->cDescriptorSectors)
2947 {
2948 /* HACK: extend the descriptor if it is unusually small and it fits in
2949 * the unused space after the image header. Allows opening VMDK files
2950 * with extremely small descriptor in read/write mode.
2951 *
2952 * The previous version introduced a possible regression for VMDK stream
2953 * optimized images from VMware which tend to have only a single sector sized
2954 * descriptor. Increasing the descriptor size resulted in adding the various uuid
2955 * entries required to make it work with VBox but for stream optimized images
2956 * the updated binary header wasn't written to the disk creating a mismatch
2957 * between advertised and real descriptor size.
2958 *
2959 * The descriptor size will be increased even if opened readonly now if there
2960 * enough room but the new value will not be written back to the image.
2961 */
2962 if ( pExtent->cDescriptorSectors < 3
2963 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2964 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2965 {
2966 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
2967 pExtent->cDescriptorSectors = 4;
2968 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2969 {
2970 /*
2971 * Update the on disk number now to make sure we don't introduce inconsistencies
2972 * in case of stream optimized images from VMware where the descriptor is just
2973 * one sector big (the binary header is not written to disk for complete
2974 * stream optimized images in vmdkFlushImage()).
2975 */
2976 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
2977 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
2978 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
2979 &u64DescSizeNew, sizeof(u64DescSizeNew));
2980 if (RT_FAILURE(rc))
2981 {
2982 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
2983 /* Restore the old size and carry on. */
2984 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
2985 }
2986 }
2987 }
2988 /* Read the descriptor from the extent. */
2989 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2990 if (RT_LIKELY(pExtent->pDescData))
2991 {
2992 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2993 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2994 pExtent->pDescData,
2995 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2996 if (RT_SUCCESS(rc))
2997 {
2998 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2999 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3000 if ( RT_SUCCESS(rc)
3001 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3002 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3003 {
3004 rc = vmdkReadMetaExtent(pImage, pExtent);
3005 if (RT_SUCCESS(rc))
3006 {
3007 /* Mark the extent as unclean if opened in read-write mode. */
3008 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3009 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3010 {
3011 pExtent->fUncleanShutdown = true;
3012 pExtent->fMetaDirty = true;
3013 }
3014 }
3015 }
3016 else if (RT_SUCCESS(rc))
3017 rc = VERR_NOT_SUPPORTED;
3018 }
3019 else
3020 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3021 }
3022 else
3023 rc = VERR_NO_MEMORY;
3024 }
3025 else if (RT_SUCCESS(rc))
3026 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3027 }
3028 else
3029 rc = VERR_NO_MEMORY;
3030 }
3031 return rc;
3032}
3033/**
3034 * Reads the descriptor from a pure text file.
3035 *
3036 * @returns VBox status code.
3037 * @param pImage VMDK image instance.
3038 * @param pFile The descriptor file handle.
3039 */
3040static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3041{
3042 /* Allocate at least 10K, and make sure that there is 5K free space
3043 * in case new entries need to be added to the descriptor. Never
3044 * allocate more than 128K, because that's no valid descriptor file
3045 * and will result in the correct "truncated read" error handling. */
3046 uint64_t cbFileSize;
3047 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3048 if ( RT_SUCCESS(rc)
3049 && cbFileSize >= 50)
3050 {
3051 uint64_t cbSize = cbFileSize;
3052 if (cbSize % VMDK_SECTOR2BYTE(10))
3053 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3054 else
3055 cbSize += VMDK_SECTOR2BYTE(10);
3056 cbSize = RT_MIN(cbSize, _128K);
3057 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3058 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3059 if (RT_LIKELY(pImage->pDescData))
3060 {
3061 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3062 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3063 if (RT_SUCCESS(rc))
3064 {
3065#if 0 /** @todo Revisit */
3066 cbRead += sizeof(u32Magic);
3067 if (cbRead == pImage->cbDescAlloc)
3068 {
3069 /* Likely the read is truncated. Better fail a bit too early
3070 * (normally the descriptor is much smaller than our buffer). */
3071 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3072 goto out;
3073 }
3074#endif
3075 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3076 pImage->cbDescAlloc);
3077 if (RT_SUCCESS(rc))
3078 {
3079 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3080 {
3081 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3082 if (pExtent->pszBasename)
3083 {
3084 /* Hack to figure out whether the specified name in the
3085 * extent descriptor is absolute. Doesn't always work, but
3086 * should be good enough for now. */
3087 char *pszFullname;
3088 /** @todo implement proper path absolute check. */
3089 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3090 {
3091 pszFullname = RTStrDup(pExtent->pszBasename);
3092 if (!pszFullname)
3093 {
3094 rc = VERR_NO_MEMORY;
3095 break;
3096 }
3097 }
3098 else
3099 {
3100 char *pszDirname = RTStrDup(pImage->pszFilename);
3101 if (!pszDirname)
3102 {
3103 rc = VERR_NO_MEMORY;
3104 break;
3105 }
3106 RTPathStripFilename(pszDirname);
3107 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3108 RTStrFree(pszDirname);
3109 if (!pszFullname)
3110 {
3111 rc = VERR_NO_STR_MEMORY;
3112 break;
3113 }
3114 }
3115 pExtent->pszFullname = pszFullname;
3116 }
3117 else
3118 pExtent->pszFullname = NULL;
3119 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3120 switch (pExtent->enmType)
3121 {
3122 case VMDKETYPE_HOSTED_SPARSE:
3123 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3124 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3125 if (RT_FAILURE(rc))
3126 {
3127 /* Do NOT signal an appropriate error here, as the VD
3128 * layer has the choice of retrying the open if it
3129 * failed. */
3130 break;
3131 }
3132 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3133 false /* fMagicAlreadyRead */);
3134 if (RT_FAILURE(rc))
3135 break;
3136 rc = vmdkReadMetaExtent(pImage, pExtent);
3137 if (RT_FAILURE(rc))
3138 break;
3139 /* Mark extent as unclean if opened in read-write mode. */
3140 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3141 {
3142 pExtent->fUncleanShutdown = true;
3143 pExtent->fMetaDirty = true;
3144 }
3145 break;
3146 case VMDKETYPE_VMFS:
3147 case VMDKETYPE_FLAT:
3148 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3149 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3150 if (RT_FAILURE(rc))
3151 {
3152 /* Do NOT signal an appropriate error here, as the VD
3153 * layer has the choice of retrying the open if it
3154 * failed. */
3155 break;
3156 }
3157 break;
3158 case VMDKETYPE_ZERO:
3159 /* Nothing to do. */
3160 break;
3161 default:
3162 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3163 }
3164 }
3165 }
3166 }
3167 else
3168 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3169 }
3170 else
3171 rc = VERR_NO_MEMORY;
3172 }
3173 else if (RT_SUCCESS(rc))
3174 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3175 return rc;
3176}
3177/**
3178 * Read and process the descriptor based on the image type.
3179 *
3180 * @returns VBox status code.
3181 * @param pImage VMDK image instance.
3182 * @param pFile VMDK file handle.
3183 */
3184static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3185{
3186 uint32_t u32Magic;
3187 /* Read magic (if present). */
3188 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3189 &u32Magic, sizeof(u32Magic));
3190 if (RT_SUCCESS(rc))
3191 {
3192 /* Handle the file according to its magic number. */
3193 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3194 rc = vmdkDescriptorReadSparse(pImage, pFile);
3195 else
3196 rc = vmdkDescriptorReadAscii(pImage, pFile);
3197 }
3198 else
3199 {
3200 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3201 rc = VERR_VD_VMDK_INVALID_HEADER;
3202 }
3203 return rc;
3204}
3205/**
3206 * Internal: Open an image, constructing all necessary data structures.
3207 */
3208static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3209{
3210 pImage->uOpenFlags = uOpenFlags;
3211 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3212 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3213 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3214 /*
3215 * Open the image.
3216 * We don't have to check for asynchronous access because
3217 * we only support raw access and the opened file is a description
3218 * file were no data is stored.
3219 */
3220 PVMDKFILE pFile;
3221 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3222 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3223 if (RT_SUCCESS(rc))
3224 {
3225 pImage->pFile = pFile;
3226 rc = vmdkDescriptorRead(pImage, pFile);
3227 if (RT_SUCCESS(rc))
3228 {
3229 /* Determine PCHS geometry if not set. */
3230 if (pImage->PCHSGeometry.cCylinders == 0)
3231 {
3232 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3233 / pImage->PCHSGeometry.cHeads
3234 / pImage->PCHSGeometry.cSectors;
3235 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3236 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3237 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3238 {
3239 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3240 AssertRC(rc);
3241 }
3242 }
3243 /* Update the image metadata now in case has changed. */
3244 rc = vmdkFlushImage(pImage, NULL);
3245 if (RT_SUCCESS(rc))
3246 {
3247 /* Figure out a few per-image constants from the extents. */
3248 pImage->cbSize = 0;
3249 for (unsigned i = 0; i < pImage->cExtents; i++)
3250 {
3251 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3252 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3253 {
3254 /* Here used to be a check whether the nominal size of an extent
3255 * is a multiple of the grain size. The spec says that this is
3256 * always the case, but unfortunately some files out there in the
3257 * wild violate the spec (e.g. ReactOS 0.3.1). */
3258 }
3259 else if ( pExtent->enmType == VMDKETYPE_FLAT
3260 || pExtent->enmType == VMDKETYPE_ZERO)
3261 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3262 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3263 }
3264 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3265 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3266 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3267 rc = vmdkAllocateGrainTableCache(pImage);
3268 }
3269 }
3270 }
3271 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3272 * choice of retrying the open if it failed. */
3273 if (RT_SUCCESS(rc))
3274 {
3275 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3276 pImage->RegionList.fFlags = 0;
3277 pImage->RegionList.cRegions = 1;
3278 pRegion->offRegion = 0; /* Disk start. */
3279 pRegion->cbBlock = 512;
3280 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3281 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3282 pRegion->cbData = 512;
3283 pRegion->cbMetadata = 0;
3284 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3285 }
3286 else
3287 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3288 return rc;
3289}
3290/**
3291 * Frees a raw descriptor.
3292 * @internal
3293 */
3294static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3295{
3296 if (!pRawDesc)
3297 return VINF_SUCCESS;
3298 RTStrFree(pRawDesc->pszRawDisk);
3299 pRawDesc->pszRawDisk = NULL;
3300 /* Partitions: */
3301 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3302 {
3303 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3304 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3305 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3306 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3307 }
3308 RTMemFree(pRawDesc->pPartDescs);
3309 pRawDesc->pPartDescs = NULL;
3310 RTMemFree(pRawDesc);
3311 return VINF_SUCCESS;
3312}
3313/**
3314 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3315 * returning the pointer to the first new entry.
3316 * @internal
3317 */
3318static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3319{
3320 uint32_t const cOld = pRawDesc->cPartDescs;
3321 uint32_t const cNew = cOld + cToAdd;
3322 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3323 cOld * sizeof(pRawDesc->pPartDescs[0]),
3324 cNew * sizeof(pRawDesc->pPartDescs[0]));
3325 if (paNew)
3326 {
3327 pRawDesc->cPartDescs = cNew;
3328 pRawDesc->pPartDescs = paNew;
3329 *ppRet = &paNew[cOld];
3330 return VINF_SUCCESS;
3331 }
3332 *ppRet = NULL;
3333 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3334 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3335 pImage->pszFilename, cOld, cNew);
3336}
3337/**
3338 * @callback_method_impl{FNRTSORTCMP}
3339 */
3340static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3341{
3342 RT_NOREF(pvUser);
3343 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3344 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3345}
3346/**
3347 * Post processes the partition descriptors.
3348 *
3349 * Sorts them and check that they don't overlap.
3350 */
3351static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3352{
3353 /*
3354 * Sort data areas in ascending order of start.
3355 */
3356 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3357 /*
3358 * Check that we don't have overlapping descriptors. If we do, that's an
3359 * indication that the drive is corrupt or that the RTDvm code is buggy.
3360 */
3361 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3362 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3363 {
3364 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3365 if (offLast <= paPartDescs[i].offStartInVDisk)
3366 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3367 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3368 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3369 paPartDescs[i].pvPartitionData ? " (data)" : "");
3370 offLast -= 1;
3371 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3372 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3373 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3374 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3375 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3376 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3377 if (offLast >= cbSize)
3378 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3379 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3380 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3381 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3382 }
3383 return VINF_SUCCESS;
3384}
3385#ifdef RT_OS_LINUX
3386/**
3387 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3388 * 'dev' file matching @a uDevToLocate.
3389 *
3390 * This is used both
3391 *
3392 * @returns IPRT status code, errors have been reported properly.
3393 * @param pImage For error reporting.
3394 * @param pszBlockDevDir Input: Path to the directory search under.
3395 * Output: Path to the directory containing information
3396 * for @a uDevToLocate.
3397 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3398 * @param uDevToLocate The device number of the block device info dir to
3399 * locate.
3400 * @param pszDevToLocate For error reporting.
3401 */
3402static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3403 dev_t uDevToLocate, const char *pszDevToLocate)
3404{
3405 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3406 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3407 RTDIR hDir = NIL_RTDIR;
3408 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3409 if (RT_SUCCESS(rc))
3410 {
3411 for (;;)
3412 {
3413 RTDIRENTRY Entry;
3414 rc = RTDirRead(hDir, &Entry, NULL);
3415 if (RT_SUCCESS(rc))
3416 {
3417 /* We're interested in directories and symlinks. */
3418 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3419 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3420 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3421 {
3422 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3423 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3424 dev_t uThisDevNo = ~uDevToLocate;
3425 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3426 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3427 break;
3428 }
3429 }
3430 else
3431 {
3432 pszBlockDevDir[cchDir] = '\0';
3433 if (rc == VERR_NO_MORE_FILES)
3434 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3435 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3436 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3437 else
3438 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3439 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3440 pImage->pszFilename, pszBlockDevDir, rc);
3441 break;
3442 }
3443 }
3444 RTDirClose(hDir);
3445 }
3446 else
3447 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3448 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3449 pImage->pszFilename, pszBlockDevDir, rc);
3450 return rc;
3451}
3452#endif /* RT_OS_LINUX */
3453#ifdef RT_OS_FREEBSD
3454/**
3455 * Reads the config data from the provider and returns offset and size
3456 *
3457 * @return IPRT status code
3458 * @param pProvider GEOM provider representing partition
3459 * @param pcbOffset Placeholder for the offset of the partition
3460 * @param pcbSize Placeholder for the size of the partition
3461 */
3462static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3463{
3464 gconfig *pConfEntry;
3465 int rc = VERR_NOT_FOUND;
3466 /*
3467 * Required parameters are located in the list containing key/value pairs.
3468 * Both key and value are in text form. Manuals tells nothing about the fact
3469 * that the both parameters should be present in the list. Thus, there are
3470 * cases when only one parameter is presented. To handle such cases we treat
3471 * absent params as zero allowing the caller decide the case is either correct
3472 * or an error.
3473 */
3474 uint64_t cbOffset = 0;
3475 uint64_t cbSize = 0;
3476 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3477 {
3478 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3479 {
3480 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3481 rc = VINF_SUCCESS;
3482 }
3483 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3484 {
3485 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3486 rc = VINF_SUCCESS;
3487 }
3488 }
3489 if (RT_SUCCESS(rc))
3490 {
3491 *pcbOffset = cbOffset;
3492 *pcbSize = cbSize;
3493 }
3494 return rc;
3495}
3496/**
3497 * Searches the partition specified by name and calculates its size and absolute offset.
3498 *
3499 * @return IPRT status code.
3500 * @param pParentClass Class containing pParentGeom
3501 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3502 * @param pszProviderName Name of the provider we are looking for
3503 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3504 * @param psbSize Placeholder for the size of the partition.
3505 */
3506static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3507 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3508{
3509 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3510 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3511 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3512 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3513 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3514 ggeom *pParentGeom;
3515 int rc = VERR_NOT_FOUND;
3516 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3517 {
3518 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3519 {
3520 rc = VINF_SUCCESS;
3521 break;
3522 }
3523 }
3524 if (RT_FAILURE(rc))
3525 return rc;
3526 gprovider *pProvider;
3527 /*
3528 * First, go over providers without handling EBR or BSDLabel
3529 * partitions for case when looking provider is child
3530 * of the givng geom, to reduce searching time
3531 */
3532 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3533 {
3534 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3535 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3536 }
3537 /*
3538 * No provider found. Go over the parent geom again
3539 * and make recursions if geom represents EBR or BSDLabel.
3540 * In this case given parent geom contains only EBR or BSDLabel
3541 * partition itself and their own partitions are in the separate
3542 * geoms. Also, partition offsets are relative to geom, so
3543 * we have to add offset from child provider with parent geoms
3544 * provider
3545 */
3546 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3547 {
3548 uint64_t cbOffset = 0;
3549 uint64_t cbSize = 0;
3550 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3551 if (RT_FAILURE(rc))
3552 return rc;
3553 uint64_t cbProviderOffset = 0;
3554 uint64_t cbProviderSize = 0;
3555 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3556 if (RT_SUCCESS(rc))
3557 {
3558 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3559 *pcbSize = cbProviderSize;
3560 return rc;
3561 }
3562 }
3563 return VERR_NOT_FOUND;
3564}
3565#endif
3566/**
3567 * Attempts to verify the raw partition path.
3568 *
3569 * We don't want to trust RTDvm and the partition device node morphing blindly.
3570 */
3571static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3572 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3573{
3574 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3575 /*
3576 * Try open the raw partition device.
3577 */
3578 RTFILE hRawPart = NIL_RTFILE;
3579 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3580 if (RT_FAILURE(rc))
3581 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3582 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3583 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3584 /*
3585 * Compare the partition UUID if we can get it.
3586 */
3587#ifdef RT_OS_WINDOWS
3588 DWORD cbReturned;
3589 /* 1. Get the device numbers for both handles, they should have the same disk. */
3590 STORAGE_DEVICE_NUMBER DevNum1;
3591 RT_ZERO(DevNum1);
3592 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3593 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3594 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3595 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3596 pImage->pszFilename, pszRawDrive, GetLastError());
3597 STORAGE_DEVICE_NUMBER DevNum2;
3598 RT_ZERO(DevNum2);
3599 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3600 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3601 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3602 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3603 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3604 if ( RT_SUCCESS(rc)
3605 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3606 || DevNum1.DeviceType != DevNum2.DeviceType))
3607 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3608 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3609 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3610 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3611 if (RT_SUCCESS(rc))
3612 {
3613 /* Get the partitions from the raw drive and match up with the volume info
3614 from RTDvm. The partition number is found in DevNum2. */
3615 DWORD cbNeeded = 0;
3616 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3617 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3618 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3619 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3620 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3621 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3622 if (pLayout)
3623 {
3624 cbReturned = 0;
3625 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3626 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3627 {
3628 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3629 unsigned iEntry = 0;
3630 while ( iEntry < pLayout->PartitionCount
3631 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3632 iEntry++;
3633 if (iEntry < pLayout->PartitionCount)
3634 {
3635 /* Compare the basics */
3636 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3637 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3638 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3639 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3640 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3641 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3642 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3643 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3644 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3645 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3646 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3647 /** @todo We could compare the MBR type, GPT type and ID. */
3648 RT_NOREF(hVol);
3649 }
3650 else
3651 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3652 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3653 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3654 DevNum2.PartitionNumber, pLayout->PartitionCount);
3655# ifndef LOG_ENABLED
3656 if (RT_FAILURE(rc))
3657# endif
3658 {
3659 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3660 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3661 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3662 {
3663 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3664 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3665 pEntry->PartitionStyle, pEntry->RewritePartition));
3666 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3667 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3668 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3669 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3670 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3671 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3672 else
3673 LogRel(("\n"));
3674 }
3675 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3676 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3677 }
3678 }
3679 else
3680 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3681 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3682 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3683 RTMemTmpFree(pLayout);
3684 }
3685 else
3686 rc = VERR_NO_TMP_MEMORY;
3687 }
3688#elif defined(RT_OS_LINUX)
3689 RT_NOREF(hVol);
3690 /* Stat the two devices first to get their device numbers. (We probably
3691 could make some assumptions here about the major & minor number assignments
3692 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
3693 struct stat StDrive, StPart;
3694 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3695 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3696 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3697 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
3698 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3699 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
3700 else
3701 {
3702 /* Scan the directories immediately under /sys/block/ for one with a
3703 'dev' file matching the drive's device number: */
3704 char szSysPath[RTPATH_MAX];
3705 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
3706 AssertRCReturn(rc, rc); /* this shall not fail */
3707 if (RTDirExists(szSysPath))
3708 {
3709 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
3710 /* Now, scan the directories under that again for a partition device
3711 matching the hRawPart device's number: */
3712 if (RT_SUCCESS(rc))
3713 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
3714 /* Having found the /sys/block/device/partition/ path, we can finally
3715 read the partition attributes and compare with hVol. */
3716 if (RT_SUCCESS(rc))
3717 {
3718 /* partition number: */
3719 int64_t iLnxPartition = 0;
3720 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
3721 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
3722 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3723 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
3724 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
3725 /* else: ignore failure? */
3726 /* start offset: */
3727 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
3728 if (RT_SUCCESS(rc))
3729 {
3730 int64_t offLnxStart = -1;
3731 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
3732 offLnxStart *= cbLnxSector;
3733 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
3734 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3735 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3736 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
3737 /* else: ignore failure? */
3738 }
3739 /* the size: */
3740 if (RT_SUCCESS(rc))
3741 {
3742 int64_t cbLnxData = -1;
3743 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
3744 cbLnxData *= cbLnxSector;
3745 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
3746 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3747 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3748 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
3749 /* else: ignore failure? */
3750 }
3751 }
3752 }
3753 /* else: We've got nothing to work on, so only do content comparison. */
3754 }
3755#elif defined(RT_OS_FREEBSD)
3756 char szDriveDevName[256];
3757 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
3758 if (pszDevName == NULL)
3759 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3760 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
3761 char szPartDevName[256];
3762 if (RT_SUCCESS(rc))
3763 {
3764 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
3765 if (pszDevName == NULL)
3766 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3767 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
3768 }
3769 if (RT_SUCCESS(rc))
3770 {
3771 gmesh geomMesh;
3772 int err = geom_gettree(&geomMesh);
3773 if (err == 0)
3774 {
3775 /* Find root class containg partitions info */
3776 gclass* pPartClass;
3777 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
3778 {
3779 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
3780 break;
3781 }
3782 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
3783 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
3784 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
3785 if (RT_SUCCESS(rc))
3786 {
3787 /* Find provider representing partition device */
3788 uint64_t cbOffset;
3789 uint64_t cbSize;
3790 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
3791 if (RT_SUCCESS(rc))
3792 {
3793 if (cbOffset != pPartDesc->offStartInVDisk)
3794 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3795 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3796 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3797 if (cbSize != pPartDesc->cbData)
3798 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3799 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3800 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3801 }
3802 else
3803 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3804 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
3805 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
3806 }
3807 geom_deletetree(&geomMesh);
3808 }
3809 else
3810 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
3811 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
3812 }
3813#elif defined(RT_OS_SOLARIS)
3814 RT_NOREF(hVol);
3815 dk_cinfo dkiDriveInfo;
3816 dk_cinfo dkiPartInfo;
3817 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
3818 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3819 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3820 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
3821 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3822 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3823 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
3824 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
3825 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
3826 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
3827 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
3828 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3829 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
3830 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3831 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
3832 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
3833 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
3834 else
3835 {
3836 uint64_t cbOffset = 0;
3837 uint64_t cbSize = 0;
3838 dk_gpt *pEfi = NULL;
3839 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
3840 if (idxEfiPart >= 0)
3841 {
3842 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
3843 {
3844 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
3845 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
3846 }
3847 else
3848 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3849 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3850 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3851 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
3852 efi_free(pEfi);
3853 }
3854 else
3855 {
3856 /*
3857 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
3858 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
3859 * real error or just no EFI table found. Therefore, let's try to obtain partition info
3860 * using another way. If there is an error, it returns errno which will be handled below.
3861 */
3862 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
3863 if (numPartition > NDKMAP)
3864 numPartition -= NDKMAP;
3865 if (numPartition != idxPartition)
3866 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3867 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3868 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3869 idxPartition, numPartition);
3870 else
3871 {
3872 dk_minfo_ext mediaInfo;
3873 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
3874 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3875 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3876 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3877 else
3878 {
3879 extpart_info extPartInfo;
3880 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
3881 {
3882 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
3883 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
3884 }
3885 else
3886 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3887 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3888 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3889 }
3890 }
3891 }
3892 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
3893 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3894 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3895 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3896 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
3897 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3898 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3899 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3900 }
3901
3902#elif defined(RT_OS_DARWIN)
3903 /* Stat the drive get its device number. */
3904 struct stat StDrive;
3905 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3906 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3907 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
3908 else
3909 {
3910 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
3911 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3912 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
3913 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3914 else
3915 {
3916 uint32_t cbBlockSize = 0;
3917 uint64_t cbOffset = 0;
3918 uint64_t cbSize = 0;
3919 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
3920 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3921 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
3922 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3923 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
3924 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3925 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
3926 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3927 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
3928 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3929 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
3930 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3931 else
3932 {
3933 cbSize *= (uint64_t)cbBlockSize;
3934 dk_physical_extent_t dkPartExtent = {0};
3935 dkPartExtent.offset = 0;
3936 dkPartExtent.length = cbSize;
3937 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
3938 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3939 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
3940 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3941 else
3942 {
3943 if (dkPartExtent.dev != StDrive.st_rdev)
3944 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3945 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
3946 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
3947 else if (cbOffset != pPartDesc->offStartInVDisk)
3948 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3949 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3950 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3951 else if (cbSize != pPartDesc->cbData)
3952 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3953 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3954 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3955 }
3956 }
3957
3958 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
3959 {
3960 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3961 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
3962 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3963 if (RT_SUCCESS(rc))
3964 rc = rc2;
3965 }
3966 }
3967 }
3968
3969#else
3970 RT_NOREF(hVol); /* PORTME */
3971#endif
3972 if (RT_SUCCESS(rc))
3973 {
3974 /*
3975 * Compare the first 32 sectors of the partition.
3976 *
3977 * This might not be conclusive, but for partitions formatted with the more
3978 * common file systems it should be as they have a superblock copy at or near
3979 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3980 */
3981 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3982 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3983 if (pbSector1 != NULL)
3984 {
3985 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3986 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3987 uint64_t uPrevCrc1 = 0;
3988 uint64_t uPrevCrc2 = 0;
3989 uint32_t cStable = 0;
3990 for (unsigned iTry = 0; iTry < 256; iTry++)
3991 {
3992 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3993 if (RT_SUCCESS(rc))
3994 {
3995 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3996 if (RT_SUCCESS(rc))
3997 {
3998 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3999 {
4000 rc = VERR_MISMATCH;
4001 /* Do data stability checks before repeating: */
4002 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4003 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4004 if ( uPrevCrc1 != uCrc1
4005 || uPrevCrc2 != uCrc2)
4006 cStable = 0;
4007 else if (++cStable > 4)
4008 break;
4009 uPrevCrc1 = uCrc1;
4010 uPrevCrc2 = uCrc2;
4011 continue;
4012 }
4013 rc = VINF_SUCCESS;
4014 }
4015 else
4016 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4017 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4018 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4019 }
4020 else
4021 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4022 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4023 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4024 break;
4025 }
4026 if (rc == VERR_MISMATCH)
4027 {
4028 /* Find the first mismatching bytes: */
4029 size_t offMissmatch = 0;
4030 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4031 offMissmatch++;
4032 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4033 if (cStable > 0)
4034 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4035 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4036 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4037 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4038 else
4039 {
4040 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4041 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4042 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4043 rc = -rc;
4044 }
4045 }
4046 RTMemTmpFree(pbSector1);
4047 }
4048 else
4049 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4050 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4051 pImage->pszFilename, cbToCompare * 2);
4052 }
4053 RTFileClose(hRawPart);
4054 return rc;
4055}
4056#ifdef RT_OS_WINDOWS
4057/**
4058 * Construct the device name for the given partition number.
4059 */
4060static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4061 char **ppszRawPartition)
4062{
4063 int rc = VINF_SUCCESS;
4064 DWORD cbReturned = 0;
4065 STORAGE_DEVICE_NUMBER DevNum;
4066 RT_ZERO(DevNum);
4067 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4068 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4069 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4070 else
4071 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4072 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4073 pImage->pszFilename, pszRawDrive, GetLastError());
4074 return rc;
4075}
4076#endif /* RT_OS_WINDOWS */
4077/**
4078 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4079 * 'Partitions' configuration value is present.
4080 *
4081 * @returns VBox status code, error message has been set on failure.
4082 *
4083 * @note Caller is assumed to clean up @a pRawDesc and release
4084 * @a *phVolToRelease.
4085 * @internal
4086 */
4087static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4088 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4089 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4090 PRTDVMVOLUME phVolToRelease)
4091{
4092 *phVolToRelease = NIL_RTDVMVOLUME;
4093 /* Check sanity/understanding. */
4094 Assert(fPartitions);
4095 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4096 /*
4097 * Allocate on descriptor for each volume up front.
4098 */
4099 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4100 PVDISKRAWPARTDESC paPartDescs = NULL;
4101 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4102 AssertRCReturn(rc, rc);
4103 /*
4104 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4105 */
4106 uint32_t fPartitionsLeft = fPartitions;
4107 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4108 for (uint32_t i = 0; i < cVolumes; i++)
4109 {
4110 /*
4111 * Get the next/first volume and release the current.
4112 */
4113 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4114 if (i == 0)
4115 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4116 else
4117 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4118 if (RT_FAILURE(rc))
4119 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4120 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4121 pImage->pszFilename, i, pszRawDrive, rc);
4122 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4123 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4124 *phVolToRelease = hVol = hVolNext;
4125 /*
4126 * Depending on the fPartitions selector and associated read-only mask,
4127 * the guest either gets read-write or read-only access (bits set)
4128 * or no access (selector bit clear, access directed to the VMDK).
4129 */
4130 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4131 uint64_t offVolumeEndIgnored = 0;
4132 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4133 if (RT_FAILURE(rc))
4134 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4135 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4136 pImage->pszFilename, i, pszRawDrive, rc);
4137 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4138 /* Note! The index must match IHostDrivePartition::number. */
4139 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4140 if ( idxPartition < 32
4141 && (fPartitions & RT_BIT_32(idxPartition)))
4142 {
4143 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4144 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4145 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4146 if (!fRelative)
4147 {
4148 /*
4149 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4150 */
4151 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4152 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4153 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4154 }
4155 else
4156 {
4157 /*
4158 * Relative means access the partition data via the device node for that
4159 * partition, allowing the sysadmin/OS to allow a user access to individual
4160 * partitions without necessarily being able to compromise the host OS.
4161 * Obviously, the creation of the VMDK requires read access to the main
4162 * device node for the drive, but that's a one-time thing and can be done
4163 * by the sysadmin. Here data starts at offset zero in the device node.
4164 */
4165 paPartDescs[i].offStartInDevice = 0;
4166#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4167 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4168 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4169#elif defined(RT_OS_LINUX)
4170 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4171 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4172 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4173#elif defined(RT_OS_WINDOWS)
4174 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4175 AssertRCReturn(rc, rc);
4176#elif defined(RT_OS_SOLARIS)
4177 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4178 {
4179 /*
4180 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4181 * where X is the controller,
4182 * Y is target (SCSI device number),
4183 * Z is disk number,
4184 * K is partition number,
4185 * where p0 is the whole disk
4186 * p1-pN are the partitions of the disk
4187 */
4188 const char *pszRawDrivePath = pszRawDrive;
4189 char szDrivePath[RTPATH_MAX];
4190 size_t cbRawDrive = strlen(pszRawDrive);
4191 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4192 {
4193 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4194 szDrivePath[cbRawDrive - 2] = '\0';
4195 pszRawDrivePath = szDrivePath;
4196 }
4197 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4198 }
4199 else /* GPT */
4200 {
4201 /*
4202 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4203 * where X is the controller,
4204 * Y is target (SCSI device number),
4205 * Z is disk number,
4206 * K is partition number, zero based. Can be only from 0 to 6.
4207 * Thus, only partitions numbered 0 through 6 have device nodes.
4208 */
4209 if (idxPartition > 7)
4210 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4211 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4212 pImage->pszFilename, idxPartition, pszRawDrive);
4213 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4214 }
4215#else
4216 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4217#endif
4218 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4219 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4220 AssertRCReturn(rc, rc);
4221 }
4222 }
4223 else
4224 {
4225 /* Not accessible to the guest. */
4226 paPartDescs[i].offStartInDevice = 0;
4227 paPartDescs[i].pszRawDevice = NULL;
4228 }
4229 } /* for each volume */
4230 RTDvmVolumeRelease(hVol);
4231 *phVolToRelease = NIL_RTDVMVOLUME;
4232 /*
4233 * Check that we found all the partitions the user selected.
4234 */
4235 if (fPartitionsLeft)
4236 {
4237 char szLeft[3 * sizeof(fPartitions) * 8];
4238 size_t cchLeft = 0;
4239 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4240 if (fPartitionsLeft & RT_BIT_32(i))
4241 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4242 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4243 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4244 pImage->pszFilename, pszRawDrive, szLeft);
4245 }
4246 return VINF_SUCCESS;
4247}
4248/**
4249 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4250 * of the partition tables and associated padding areas when the 'Partitions'
4251 * configuration value is present.
4252 *
4253 * The guest is not allowed access to the partition tables, however it needs
4254 * them to be able to access the drive. So, create descriptors for each of the
4255 * tables and attach the current disk content. vmdkCreateRawImage() will later
4256 * write the content to the VMDK. Any changes the guest later makes to the
4257 * partition tables will then go to the VMDK copy, rather than the host drive.
4258 *
4259 * @returns VBox status code, error message has been set on failure.
4260 *
4261 * @note Caller is assumed to clean up @a pRawDesc
4262 * @internal
4263 */
4264static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4265 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4266{
4267 /*
4268 * Query the locations.
4269 */
4270 /* Determin how many locations there are: */
4271 size_t cLocations = 0;
4272 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4273 if (rc != VERR_BUFFER_OVERFLOW)
4274 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4275 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4276 pImage->pszFilename, pszRawDrive, rc);
4277 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4278 /* We can allocate the partition descriptors here to save an intentation level. */
4279 PVDISKRAWPARTDESC paPartDescs = NULL;
4280 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4281 AssertRCReturn(rc, rc);
4282 /* Allocate the result table and repeat the location table query: */
4283 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4284 if (!paLocations)
4285 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4286 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4287 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4288 if (RT_SUCCESS(rc))
4289 {
4290 /*
4291 * Translate them into descriptors.
4292 *
4293 * We restrict the amount of partition alignment padding to 4MiB as more
4294 * will just be a waste of space. The use case for including the padding
4295 * are older boot loaders and boot manager (including one by a team member)
4296 * that put data and code in the 62 sectors between the MBR and the first
4297 * partition (total of 63). Later CHS was abandond and partition started
4298 * being aligned on power of two sector boundraries (typically 64KiB or
4299 * 1MiB depending on the media size).
4300 */
4301 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4302 {
4303 Assert(paLocations[i].cb > 0);
4304 if (paLocations[i].cb <= _64M)
4305 {
4306 /* Create the partition descriptor entry: */
4307 //paPartDescs[i].pszRawDevice = NULL;
4308 //paPartDescs[i].offStartInDevice = 0;
4309 //paPartDescs[i].uFlags = 0;
4310 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4311 paPartDescs[i].cbData = paLocations[i].cb;
4312 if (paPartDescs[i].cbData < _4M)
4313 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4314 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4315 if (paPartDescs[i].pvPartitionData)
4316 {
4317 /* Read the content from the drive: */
4318 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4319 (size_t)paPartDescs[i].cbData, NULL);
4320 if (RT_SUCCESS(rc))
4321 {
4322 /* Do we have custom boot sector code? */
4323 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4324 {
4325 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4326 Instead we fail as we weren't able to do what the user requested us to do.
4327 Better if the user knows than starts questioning why the guest isn't
4328 booting as expected. */
4329 if (cbBootSector <= paPartDescs[i].cbData)
4330 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4331 else
4332 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4333 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4334 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4335 }
4336 }
4337 else
4338 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4339 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4340 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4341 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4342 }
4343 else
4344 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4345 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4346 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4347 }
4348 else
4349 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4350 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4351 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4352 }
4353 }
4354 else
4355 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4356 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4357 pImage->pszFilename, pszRawDrive, rc);
4358 RTMemFree(paLocations);
4359 return rc;
4360}
4361/**
4362 * Opens the volume manager for the raw drive when in selected-partition mode.
4363 *
4364 * @param pImage The VMDK image (for errors).
4365 * @param hRawDrive The raw drive handle.
4366 * @param pszRawDrive The raw drive device path (for errors).
4367 * @param cbSector The sector size.
4368 * @param phVolMgr Where to return the handle to the volume manager on
4369 * success.
4370 * @returns VBox status code, errors have been reported.
4371 * @internal
4372 */
4373static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4374{
4375 *phVolMgr = NIL_RTDVM;
4376 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4377 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4378 if (RT_FAILURE(rc))
4379 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4380 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4381 pImage->pszFilename, pszRawDrive, rc);
4382 RTDVM hVolMgr = NIL_RTDVM;
4383 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4384 RTVfsFileRelease(hVfsFile);
4385 if (RT_FAILURE(rc))
4386 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4387 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4388 pImage->pszFilename, pszRawDrive, rc);
4389 rc = RTDvmMapOpen(hVolMgr);
4390 if (RT_SUCCESS(rc))
4391 {
4392 *phVolMgr = hVolMgr;
4393 return VINF_SUCCESS;
4394 }
4395 RTDvmRelease(hVolMgr);
4396 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4397 pImage->pszFilename, pszRawDrive, rc);
4398}
4399/**
4400 * Opens the raw drive device and get the sizes for it.
4401 *
4402 * @param pImage The image (for error reporting).
4403 * @param pszRawDrive The device/whatever to open.
4404 * @param phRawDrive Where to return the file handle.
4405 * @param pcbRawDrive Where to return the size.
4406 * @param pcbSector Where to return the sector size.
4407 * @returns IPRT status code, errors have been reported.
4408 * @internal
4409 */
4410static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4411 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4412{
4413 /*
4414 * Open the device for the raw drive.
4415 */
4416 RTFILE hRawDrive = NIL_RTFILE;
4417 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4418 if (RT_FAILURE(rc))
4419 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4420 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4421 pImage->pszFilename, pszRawDrive, rc);
4422 /*
4423 * Get the sector size.
4424 */
4425 uint32_t cbSector = 0;
4426 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4427 if (RT_SUCCESS(rc))
4428 {
4429 /* sanity checks */
4430 if ( cbSector >= 512
4431 && cbSector <= _64K
4432 && RT_IS_POWER_OF_TWO(cbSector))
4433 {
4434 /*
4435 * Get the size.
4436 */
4437 uint64_t cbRawDrive = 0;
4438 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4439 if (RT_SUCCESS(rc))
4440 {
4441 /* Check whether cbSize is actually sensible. */
4442 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4443 {
4444 *phRawDrive = hRawDrive;
4445 *pcbRawDrive = cbRawDrive;
4446 *pcbSector = cbSector;
4447 return VINF_SUCCESS;
4448 }
4449 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4450 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4451 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4452 }
4453 else
4454 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4455 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4456 pImage->pszFilename, pszRawDrive, rc);
4457 }
4458 else
4459 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4460 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4461 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4462 }
4463 else
4464 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4465 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4466 pImage->pszFilename, pszRawDrive, rc);
4467 RTFileClose(hRawDrive);
4468 return rc;
4469}
4470/**
4471 * Reads the raw disk configuration, leaving initalization and cleanup to the
4472 * caller (regardless of return status).
4473 *
4474 * @returns VBox status code, errors properly reported.
4475 * @internal
4476 */
4477static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4478 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4479 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4480 char **ppszFreeMe)
4481{
4482 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4483 if (!pImgCfg)
4484 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4485 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4486 /*
4487 * RawDrive = path
4488 */
4489 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4490 if (RT_FAILURE(rc))
4491 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4492 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4493 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4494 /*
4495 * Partitions=n[r][,...]
4496 */
4497 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4498 *pfPartitions = *pfPartitionsReadOnly = 0;
4499 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4500 if (RT_SUCCESS(rc))
4501 {
4502 char *psz = *ppszFreeMe;
4503 while (*psz != '\0')
4504 {
4505 char *pszNext;
4506 uint32_t u32;
4507 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4508 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4509 rc = -rc;
4510 if (RT_FAILURE(rc))
4511 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4512 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4513 pImage->pszFilename, rc, psz);
4514 if (u32 >= cMaxPartitionBits)
4515 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4516 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4517 pImage->pszFilename, u32, cMaxPartitionBits);
4518 *pfPartitions |= RT_BIT_32(u32);
4519 psz = pszNext;
4520 if (*psz == 'r')
4521 {
4522 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4523 psz++;
4524 }
4525 if (*psz == ',')
4526 psz++;
4527 else if (*psz != '\0')
4528 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4529 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4530 pImage->pszFilename, psz);
4531 }
4532 RTStrFree(*ppszFreeMe);
4533 *ppszFreeMe = NULL;
4534 }
4535 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4536 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4537 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4538 /*
4539 * BootSector=base64
4540 */
4541 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4542 if (RT_SUCCESS(rc))
4543 {
4544 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4545 if (cbBootSector < 0)
4546 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4547 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4548 pImage->pszFilename, *ppszRawDrive);
4549 if (cbBootSector == 0)
4550 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4551 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4552 pImage->pszFilename, *ppszRawDrive);
4553 if (cbBootSector > _4M) /* this is just a preliminary max */
4554 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4555 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4556 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4557 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4558 however, bird disagrees and thinks the user should be told that what
4559 he/she/it tries to do isn't possible. There should be less head
4560 scratching this way when the guest doesn't do the expected thing. */
4561 if (!*pfPartitions)
4562 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4563 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4564 pImage->pszFilename, *ppszRawDrive);
4565 *pcbBootSector = (size_t)cbBootSector;
4566 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4567 if (!*ppvBootSector)
4568 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4569 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4570 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4571 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4572 if (RT_FAILURE(rc))
4573 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4574 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4575 pImage->pszFilename, *ppszRawDrive, rc);
4576 RTStrFree(*ppszFreeMe);
4577 *ppszFreeMe = NULL;
4578 }
4579 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4580 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4581 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4582 /*
4583 * Relative=0/1
4584 */
4585 *pfRelative = false;
4586 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4587 if (RT_SUCCESS(rc))
4588 {
4589 if (!*pfPartitions && *pfRelative != false)
4590 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4591 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4592 pImage->pszFilename);
4593#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
4594 if (*pfRelative == true)
4595 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4596 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4597 pImage->pszFilename);
4598#endif
4599 }
4600 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4601 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4602 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4603 else
4604#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4605 *pfRelative = true;
4606#else
4607 *pfRelative = false;
4608#endif
4609 return VINF_SUCCESS;
4610}
4611/**
4612 * Creates a raw drive (nee disk) descriptor.
4613 *
4614 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4615 * here much later. That's one of the reasons why we produce a descriptor just
4616 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4617 *
4618 * @returns VBox status code.
4619 * @param pImage The image.
4620 * @param ppRaw Where to return the raw drive descriptor. Caller must
4621 * free it using vmdkRawDescFree regardless of the status
4622 * code.
4623 * @internal
4624 */
4625static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4626{
4627 /* Make sure it's NULL. */
4628 *ppRaw = NULL;
4629 /*
4630 * Read the configuration.
4631 */
4632 char *pszRawDrive = NULL;
4633 uint32_t fPartitions = 0; /* zero if whole-drive */
4634 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4635 void *pvBootSector = NULL;
4636 size_t cbBootSector = 0;
4637 bool fRelative = false;
4638 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4639 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4640 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4641 RTStrFree(pszFreeMe);
4642 if (RT_SUCCESS(rc))
4643 {
4644 /*
4645 * Open the device, getting the sector size and drive size.
4646 */
4647 uint64_t cbSize = 0;
4648 uint32_t cbSector = 0;
4649 RTFILE hRawDrive = NIL_RTFILE;
4650 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4651 if (RT_SUCCESS(rc))
4652 {
4653 /*
4654 * Create the raw-drive descriptor
4655 */
4656 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4657 if (pRawDesc)
4658 {
4659 pRawDesc->szSignature[0] = 'R';
4660 pRawDesc->szSignature[1] = 'A';
4661 pRawDesc->szSignature[2] = 'W';
4662 //pRawDesc->szSignature[3] = '\0';
4663 if (!fPartitions)
4664 {
4665 /*
4666 * It's simple for when doing the whole drive.
4667 */
4668 pRawDesc->uFlags = VDISKRAW_DISK;
4669 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4670 }
4671 else
4672 {
4673 /*
4674 * In selected partitions mode we've got a lot more work ahead of us.
4675 */
4676 pRawDesc->uFlags = VDISKRAW_NORMAL;
4677 //pRawDesc->pszRawDisk = NULL;
4678 //pRawDesc->cPartDescs = 0;
4679 //pRawDesc->pPartDescs = NULL;
4680 /* We need to parse the partition map to complete the descriptor: */
4681 RTDVM hVolMgr = NIL_RTDVM;
4682 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4683 if (RT_SUCCESS(rc))
4684 {
4685 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4686 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4687 || enmFormatType == RTDVMFORMATTYPE_GPT)
4688 {
4689 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4690 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4691 /* Add copies of the partition tables: */
4692 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4693 pvBootSector, cbBootSector);
4694 if (RT_SUCCESS(rc))
4695 {
4696 /* Add descriptors for the partitions/volumes, indicating which
4697 should be accessible and how to access them: */
4698 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4699 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4700 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4701 RTDvmVolumeRelease(hVolRelease);
4702 /* Finally, sort the partition and check consistency (overlaps, etc): */
4703 if (RT_SUCCESS(rc))
4704 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4705 }
4706 }
4707 else
4708 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4709 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4710 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4711 RTDvmRelease(hVolMgr);
4712 }
4713 }
4714 if (RT_SUCCESS(rc))
4715 {
4716 /*
4717 * We succeeded.
4718 */
4719 *ppRaw = pRawDesc;
4720 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4721 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4722 if (pRawDesc->cPartDescs)
4723 {
4724 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4725 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4726 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4727 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4728 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4729 }
4730 }
4731 else
4732 vmdkRawDescFree(pRawDesc);
4733 }
4734 else
4735 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4736 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4737 pImage->pszFilename, sizeof(*pRawDesc));
4738 RTFileClose(hRawDrive);
4739 }
4740 }
4741 RTStrFree(pszRawDrive);
4742 RTMemFree(pvBootSector);
4743 return rc;
4744}
4745/**
4746 * Internal: create VMDK images for raw disk/partition access.
4747 */
4748static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4749 uint64_t cbSize)
4750{
4751 int rc = VINF_SUCCESS;
4752 PVMDKEXTENT pExtent;
4753 if (pRaw->uFlags & VDISKRAW_DISK)
4754 {
4755 /* Full raw disk access. This requires setting up a descriptor
4756 * file and open the (flat) raw disk. */
4757 rc = vmdkCreateExtents(pImage, 1);
4758 if (RT_FAILURE(rc))
4759 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4760 pExtent = &pImage->pExtents[0];
4761 /* Create raw disk descriptor file. */
4762 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4763 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4764 true /* fCreate */));
4765 if (RT_FAILURE(rc))
4766 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4767 /* Set up basename for extent description. Cannot use StrDup. */
4768 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4769 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4770 if (!pszBasename)
4771 return VERR_NO_MEMORY;
4772 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4773 pExtent->pszBasename = pszBasename;
4774 /* For raw disks the full name is identical to the base name. */
4775 pExtent->pszFullname = RTStrDup(pszBasename);
4776 if (!pExtent->pszFullname)
4777 return VERR_NO_MEMORY;
4778 pExtent->enmType = VMDKETYPE_FLAT;
4779 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4780 pExtent->uSectorOffset = 0;
4781 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4782 pExtent->fMetaDirty = false;
4783 /* Open flat image, the raw disk. */
4784 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4785 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4786 false /* fCreate */));
4787 if (RT_FAILURE(rc))
4788 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4789 }
4790 else
4791 {
4792 /* Raw partition access. This requires setting up a descriptor
4793 * file, write the partition information to a flat extent and
4794 * open all the (flat) raw disk partitions. */
4795 /* First pass over the partition data areas to determine how many
4796 * extents we need. One data area can require up to 2 extents, as
4797 * it might be necessary to skip over unpartitioned space. */
4798 unsigned cExtents = 0;
4799 uint64_t uStart = 0;
4800 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4801 {
4802 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4803 if (uStart > pPart->offStartInVDisk)
4804 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4805 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4806 if (uStart < pPart->offStartInVDisk)
4807 cExtents++;
4808 uStart = pPart->offStartInVDisk + pPart->cbData;
4809 cExtents++;
4810 }
4811 /* Another extent for filling up the rest of the image. */
4812 if (uStart != cbSize)
4813 cExtents++;
4814 rc = vmdkCreateExtents(pImage, cExtents);
4815 if (RT_FAILURE(rc))
4816 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4817 /* Create raw partition descriptor file. */
4818 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4819 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4820 true /* fCreate */));
4821 if (RT_FAILURE(rc))
4822 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4823 /* Create base filename for the partition table extent. */
4824 /** @todo remove fixed buffer without creating memory leaks. */
4825 char pszPartition[1024];
4826 const char *pszBase = RTPathFilename(pImage->pszFilename);
4827 const char *pszSuff = RTPathSuffix(pszBase);
4828 if (pszSuff == NULL)
4829 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4830 char *pszBaseBase = RTStrDup(pszBase);
4831 if (!pszBaseBase)
4832 return VERR_NO_MEMORY;
4833 RTPathStripSuffix(pszBaseBase);
4834 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4835 pszBaseBase, pszSuff);
4836 RTStrFree(pszBaseBase);
4837 /* Second pass over the partitions, now define all extents. */
4838 uint64_t uPartOffset = 0;
4839 cExtents = 0;
4840 uStart = 0;
4841 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4842 {
4843 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4844 pExtent = &pImage->pExtents[cExtents++];
4845 if (uStart < pPart->offStartInVDisk)
4846 {
4847 pExtent->pszBasename = NULL;
4848 pExtent->pszFullname = NULL;
4849 pExtent->enmType = VMDKETYPE_ZERO;
4850 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4851 pExtent->uSectorOffset = 0;
4852 pExtent->enmAccess = VMDKACCESS_READWRITE;
4853 pExtent->fMetaDirty = false;
4854 /* go to next extent */
4855 pExtent = &pImage->pExtents[cExtents++];
4856 }
4857 uStart = pPart->offStartInVDisk + pPart->cbData;
4858 if (pPart->pvPartitionData)
4859 {
4860 /* Set up basename for extent description. Can't use StrDup. */
4861 size_t cbBasename = strlen(pszPartition) + 1;
4862 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4863 if (!pszBasename)
4864 return VERR_NO_MEMORY;
4865 memcpy(pszBasename, pszPartition, cbBasename);
4866 pExtent->pszBasename = pszBasename;
4867 /* Set up full name for partition extent. */
4868 char *pszDirname = RTStrDup(pImage->pszFilename);
4869 if (!pszDirname)
4870 return VERR_NO_STR_MEMORY;
4871 RTPathStripFilename(pszDirname);
4872 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4873 RTStrFree(pszDirname);
4874 if (!pszFullname)
4875 return VERR_NO_STR_MEMORY;
4876 pExtent->pszFullname = pszFullname;
4877 pExtent->enmType = VMDKETYPE_FLAT;
4878 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4879 pExtent->uSectorOffset = uPartOffset;
4880 pExtent->enmAccess = VMDKACCESS_READWRITE;
4881 pExtent->fMetaDirty = false;
4882 /* Create partition table flat image. */
4883 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4884 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4885 true /* fCreate */));
4886 if (RT_FAILURE(rc))
4887 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4888 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4889 VMDK_SECTOR2BYTE(uPartOffset),
4890 pPart->pvPartitionData,
4891 pPart->cbData);
4892 if (RT_FAILURE(rc))
4893 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4894 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4895 }
4896 else
4897 {
4898 if (pPart->pszRawDevice)
4899 {
4900 /* Set up basename for extent descr. Can't use StrDup. */
4901 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4902 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4903 if (!pszBasename)
4904 return VERR_NO_MEMORY;
4905 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4906 pExtent->pszBasename = pszBasename;
4907 /* For raw disks full name is identical to base name. */
4908 pExtent->pszFullname = RTStrDup(pszBasename);
4909 if (!pExtent->pszFullname)
4910 return VERR_NO_MEMORY;
4911 pExtent->enmType = VMDKETYPE_FLAT;
4912 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4913 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4914 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4915 pExtent->fMetaDirty = false;
4916 /* Open flat image, the raw partition. */
4917 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4918 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4919 false /* fCreate */));
4920 if (RT_FAILURE(rc))
4921 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4922 }
4923 else
4924 {
4925 pExtent->pszBasename = NULL;
4926 pExtent->pszFullname = NULL;
4927 pExtent->enmType = VMDKETYPE_ZERO;
4928 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4929 pExtent->uSectorOffset = 0;
4930 pExtent->enmAccess = VMDKACCESS_READWRITE;
4931 pExtent->fMetaDirty = false;
4932 }
4933 }
4934 }
4935 /* Another extent for filling up the rest of the image. */
4936 if (uStart != cbSize)
4937 {
4938 pExtent = &pImage->pExtents[cExtents++];
4939 pExtent->pszBasename = NULL;
4940 pExtent->pszFullname = NULL;
4941 pExtent->enmType = VMDKETYPE_ZERO;
4942 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4943 pExtent->uSectorOffset = 0;
4944 pExtent->enmAccess = VMDKACCESS_READWRITE;
4945 pExtent->fMetaDirty = false;
4946 }
4947 }
4948 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4949 (pRaw->uFlags & VDISKRAW_DISK) ?
4950 "fullDevice" : "partitionedDevice");
4951 if (RT_FAILURE(rc))
4952 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4953 return rc;
4954}
4955/**
4956 * Internal: create a regular (i.e. file-backed) VMDK image.
4957 */
4958static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4959 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4960 unsigned uPercentStart, unsigned uPercentSpan)
4961{
4962 int rc = VINF_SUCCESS;
4963 unsigned cExtents = 1;
4964 uint64_t cbOffset = 0;
4965 uint64_t cbRemaining = cbSize;
4966 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4967 {
4968 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4969 /* Do proper extent computation: need one smaller extent if the total
4970 * size isn't evenly divisible by the split size. */
4971 if (cbSize % VMDK_2G_SPLIT_SIZE)
4972 cExtents++;
4973 }
4974 rc = vmdkCreateExtents(pImage, cExtents);
4975 if (RT_FAILURE(rc))
4976 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4977 /* Basename strings needed for constructing the extent names. */
4978 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4979 AssertPtr(pszBasenameSubstr);
4980 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4981 /* Create separate descriptor file if necessary. */
4982 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4983 {
4984 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4985 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4986 true /* fCreate */));
4987 if (RT_FAILURE(rc))
4988 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4989 }
4990 else
4991 pImage->pFile = NULL;
4992 /* Set up all extents. */
4993 for (unsigned i = 0; i < cExtents; i++)
4994 {
4995 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4996 uint64_t cbExtent = cbRemaining;
4997 /* Set up fullname/basename for extent description. Cannot use StrDup
4998 * for basename, as it is not guaranteed that the memory can be freed
4999 * with RTMemTmpFree, which must be used as in other code paths
5000 * StrDup is not usable. */
5001 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5002 {
5003 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5004 if (!pszBasename)
5005 return VERR_NO_MEMORY;
5006 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5007 pExtent->pszBasename = pszBasename;
5008 }
5009 else
5010 {
5011 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5012 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5013 RTPathStripSuffix(pszBasenameBase);
5014 char *pszTmp;
5015 size_t cbTmp;
5016 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5017 {
5018 if (cExtents == 1)
5019 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5020 pszBasenameSuff);
5021 else
5022 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5023 i+1, pszBasenameSuff);
5024 }
5025 else
5026 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5027 pszBasenameSuff);
5028 RTStrFree(pszBasenameBase);
5029 if (!pszTmp)
5030 return VERR_NO_STR_MEMORY;
5031 cbTmp = strlen(pszTmp) + 1;
5032 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5033 if (!pszBasename)
5034 {
5035 RTStrFree(pszTmp);
5036 return VERR_NO_MEMORY;
5037 }
5038 memcpy(pszBasename, pszTmp, cbTmp);
5039 RTStrFree(pszTmp);
5040 pExtent->pszBasename = pszBasename;
5041 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5042 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5043 }
5044 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5045 if (!pszBasedirectory)
5046 return VERR_NO_STR_MEMORY;
5047 RTPathStripFilename(pszBasedirectory);
5048 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5049 RTStrFree(pszBasedirectory);
5050 if (!pszFullname)
5051 return VERR_NO_STR_MEMORY;
5052 pExtent->pszFullname = pszFullname;
5053 /* Create file for extent. */
5054 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5055 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5056 true /* fCreate */));
5057 if (RT_FAILURE(rc))
5058 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5059 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5060 {
5061 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5062 0 /* fFlags */, pIfProgress,
5063 uPercentStart + cbOffset * uPercentSpan / cbSize,
5064 cbExtent * uPercentSpan / cbSize);
5065 if (RT_FAILURE(rc))
5066 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5067 }
5068 /* Place descriptor file information (where integrated). */
5069 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5070 {
5071 pExtent->uDescriptorSector = 1;
5072 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5073 /* The descriptor is part of the (only) extent. */
5074 pExtent->pDescData = pImage->pDescData;
5075 pImage->pDescData = NULL;
5076 }
5077 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5078 {
5079 uint64_t cSectorsPerGDE, cSectorsPerGD;
5080 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5081 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5082 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5083 pExtent->cGTEntries = 512;
5084 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5085 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5086 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5087 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5088 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5089 {
5090 /* The spec says version is 1 for all VMDKs, but the vast
5091 * majority of streamOptimized VMDKs actually contain
5092 * version 3 - so go with the majority. Both are accepted. */
5093 pExtent->uVersion = 3;
5094 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5095 }
5096 }
5097 else
5098 {
5099 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5100 pExtent->enmType = VMDKETYPE_VMFS;
5101 else
5102 pExtent->enmType = VMDKETYPE_FLAT;
5103 }
5104 pExtent->enmAccess = VMDKACCESS_READWRITE;
5105 pExtent->fUncleanShutdown = true;
5106 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5107 pExtent->uSectorOffset = 0;
5108 pExtent->fMetaDirty = true;
5109 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5110 {
5111 /* fPreAlloc should never be false because VMware can't use such images. */
5112 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5113 RT_MAX( pExtent->uDescriptorSector
5114 + pExtent->cDescriptorSectors,
5115 1),
5116 true /* fPreAlloc */);
5117 if (RT_FAILURE(rc))
5118 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5119 }
5120 cbOffset += cbExtent;
5121 if (RT_SUCCESS(rc))
5122 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5123 cbRemaining -= cbExtent;
5124 }
5125 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5126 {
5127 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5128 * controller type is set in an image. */
5129 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5130 if (RT_FAILURE(rc))
5131 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5132 }
5133 const char *pszDescType = NULL;
5134 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5135 {
5136 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5137 pszDescType = "vmfs";
5138 else
5139 pszDescType = (cExtents == 1)
5140 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5141 }
5142 else
5143 {
5144 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5145 pszDescType = "streamOptimized";
5146 else
5147 {
5148 pszDescType = (cExtents == 1)
5149 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5150 }
5151 }
5152 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5153 pszDescType);
5154 if (RT_FAILURE(rc))
5155 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5156 return rc;
5157}
5158/**
5159 * Internal: Create a real stream optimized VMDK using only linear writes.
5160 */
5161static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5162{
5163 int rc = vmdkCreateExtents(pImage, 1);
5164 if (RT_FAILURE(rc))
5165 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5166 /* Basename strings needed for constructing the extent names. */
5167 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5168 AssertPtr(pszBasenameSubstr);
5169 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5170 /* No separate descriptor file. */
5171 pImage->pFile = NULL;
5172 /* Set up all extents. */
5173 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5174 /* Set up fullname/basename for extent description. Cannot use StrDup
5175 * for basename, as it is not guaranteed that the memory can be freed
5176 * with RTMemTmpFree, which must be used as in other code paths
5177 * StrDup is not usable. */
5178 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5179 if (!pszBasename)
5180 return VERR_NO_MEMORY;
5181 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5182 pExtent->pszBasename = pszBasename;
5183 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5184 RTPathStripFilename(pszBasedirectory);
5185 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5186 RTStrFree(pszBasedirectory);
5187 if (!pszFullname)
5188 return VERR_NO_STR_MEMORY;
5189 pExtent->pszFullname = pszFullname;
5190 /* Create file for extent. Make it write only, no reading allowed. */
5191 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5192 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5193 true /* fCreate */)
5194 & ~RTFILE_O_READ);
5195 if (RT_FAILURE(rc))
5196 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5197 /* Place descriptor file information. */
5198 pExtent->uDescriptorSector = 1;
5199 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5200 /* The descriptor is part of the (only) extent. */
5201 pExtent->pDescData = pImage->pDescData;
5202 pImage->pDescData = NULL;
5203 uint64_t cSectorsPerGDE, cSectorsPerGD;
5204 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5205 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5206 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5207 pExtent->cGTEntries = 512;
5208 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5209 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5210 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5211 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5212 /* The spec says version is 1 for all VMDKs, but the vast
5213 * majority of streamOptimized VMDKs actually contain
5214 * version 3 - so go with the majority. Both are accepted. */
5215 pExtent->uVersion = 3;
5216 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5217 pExtent->fFooter = true;
5218 pExtent->enmAccess = VMDKACCESS_READONLY;
5219 pExtent->fUncleanShutdown = false;
5220 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5221 pExtent->uSectorOffset = 0;
5222 pExtent->fMetaDirty = true;
5223 /* Create grain directory, without preallocating it straight away. It will
5224 * be constructed on the fly when writing out the data and written when
5225 * closing the image. The end effect is that the full grain directory is
5226 * allocated, which is a requirement of the VMDK specs. */
5227 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5228 false /* fPreAlloc */);
5229 if (RT_FAILURE(rc))
5230 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5231 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5232 "streamOptimized");
5233 if (RT_FAILURE(rc))
5234 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5235 return rc;
5236}
5237/**
5238 * Initializes the UUID fields in the DDB.
5239 *
5240 * @returns VBox status code.
5241 * @param pImage The VMDK image instance.
5242 */
5243static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5244{
5245 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5246 if (RT_SUCCESS(rc))
5247 {
5248 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5249 if (RT_SUCCESS(rc))
5250 {
5251 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5252 &pImage->ModificationUuid);
5253 if (RT_SUCCESS(rc))
5254 {
5255 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5256 &pImage->ParentModificationUuid);
5257 if (RT_FAILURE(rc))
5258 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5259 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5260 }
5261 else
5262 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5263 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5264 }
5265 else
5266 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5267 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5268 }
5269 else
5270 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5271 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5272 return rc;
5273}
5274/**
5275 * Internal: The actual code for creating any VMDK variant currently in
5276 * existence on hosted environments.
5277 */
5278static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5279 unsigned uImageFlags, const char *pszComment,
5280 PCVDGEOMETRY pPCHSGeometry,
5281 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5282 PVDINTERFACEPROGRESS pIfProgress,
5283 unsigned uPercentStart, unsigned uPercentSpan)
5284{
5285 pImage->uImageFlags = uImageFlags;
5286 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5287 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5288 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5289 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5290 &pImage->Descriptor);
5291 if (RT_SUCCESS(rc))
5292 {
5293 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5294 {
5295 /* Raw disk image (includes raw partition). */
5296 PVDISKRAW pRaw = NULL;
5297 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5298 if (RT_FAILURE(rc))
5299 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5300 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5301 vmdkRawDescFree(pRaw);
5302 }
5303 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5304 {
5305 /* Stream optimized sparse image (monolithic). */
5306 rc = vmdkCreateStreamImage(pImage, cbSize);
5307 }
5308 else
5309 {
5310 /* Regular fixed or sparse image (monolithic or split). */
5311 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5312 pIfProgress, uPercentStart,
5313 uPercentSpan * 95 / 100);
5314 }
5315 if (RT_SUCCESS(rc))
5316 {
5317 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5318 pImage->cbSize = cbSize;
5319 for (unsigned i = 0; i < pImage->cExtents; i++)
5320 {
5321 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5322 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5323 pExtent->cNominalSectors, pExtent->enmType,
5324 pExtent->pszBasename, pExtent->uSectorOffset);
5325 if (RT_FAILURE(rc))
5326 {
5327 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5328 break;
5329 }
5330 }
5331 if (RT_SUCCESS(rc))
5332 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5333 if ( RT_SUCCESS(rc)
5334 && pPCHSGeometry->cCylinders != 0
5335 && pPCHSGeometry->cHeads != 0
5336 && pPCHSGeometry->cSectors != 0)
5337 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5338 if ( RT_SUCCESS(rc)
5339 && pLCHSGeometry->cCylinders != 0
5340 && pLCHSGeometry->cHeads != 0
5341 && pLCHSGeometry->cSectors != 0)
5342 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5343 pImage->LCHSGeometry = *pLCHSGeometry;
5344 pImage->PCHSGeometry = *pPCHSGeometry;
5345 pImage->ImageUuid = *pUuid;
5346 RTUuidClear(&pImage->ParentUuid);
5347 RTUuidClear(&pImage->ModificationUuid);
5348 RTUuidClear(&pImage->ParentModificationUuid);
5349 if (RT_SUCCESS(rc))
5350 rc = vmdkCreateImageDdbUuidsInit(pImage);
5351 if (RT_SUCCESS(rc))
5352 rc = vmdkAllocateGrainTableCache(pImage);
5353 if (RT_SUCCESS(rc))
5354 {
5355 rc = vmdkSetImageComment(pImage, pszComment);
5356 if (RT_FAILURE(rc))
5357 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5358 }
5359 if (RT_SUCCESS(rc))
5360 {
5361 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5362 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5363 {
5364 /* streamOptimized is a bit special, we cannot trigger the flush
5365 * until all data has been written. So we write the necessary
5366 * information explicitly. */
5367 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5368 - pImage->Descriptor.aLines[0], 512));
5369 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5370 if (RT_SUCCESS(rc))
5371 {
5372 rc = vmdkWriteDescriptor(pImage, NULL);
5373 if (RT_FAILURE(rc))
5374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5375 }
5376 else
5377 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5378 }
5379 else
5380 rc = vmdkFlushImage(pImage, NULL);
5381 }
5382 }
5383 }
5384 else
5385 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5386 if (RT_SUCCESS(rc))
5387 {
5388 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5389 pImage->RegionList.fFlags = 0;
5390 pImage->RegionList.cRegions = 1;
5391 pRegion->offRegion = 0; /* Disk start. */
5392 pRegion->cbBlock = 512;
5393 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5394 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5395 pRegion->cbData = 512;
5396 pRegion->cbMetadata = 0;
5397 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5398 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5399 }
5400 else
5401 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5402 return rc;
5403}
5404/**
5405 * Internal: Update image comment.
5406 */
5407static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5408{
5409 char *pszCommentEncoded = NULL;
5410 if (pszComment)
5411 {
5412 pszCommentEncoded = vmdkEncodeString(pszComment);
5413 if (!pszCommentEncoded)
5414 return VERR_NO_MEMORY;
5415 }
5416 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5417 "ddb.comment", pszCommentEncoded);
5418 if (pszCommentEncoded)
5419 RTStrFree(pszCommentEncoded);
5420 if (RT_FAILURE(rc))
5421 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5422 return VINF_SUCCESS;
5423}
5424/**
5425 * Internal. Clear the grain table buffer for real stream optimized writing.
5426 */
5427static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5428{
5429 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5430 for (uint32_t i = 0; i < cCacheLines; i++)
5431 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5432 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5433}
5434/**
5435 * Internal. Flush the grain table buffer for real stream optimized writing.
5436 */
5437static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5438 uint32_t uGDEntry)
5439{
5440 int rc = VINF_SUCCESS;
5441 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5442 /* VMware does not write out completely empty grain tables in the case
5443 * of streamOptimized images, which according to my interpretation of
5444 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5445 * handle it without problems do it the same way and save some bytes. */
5446 bool fAllZero = true;
5447 for (uint32_t i = 0; i < cCacheLines; i++)
5448 {
5449 /* Convert the grain table to little endian in place, as it will not
5450 * be used at all after this function has been called. */
5451 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5452 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5453 if (*pGTTmp)
5454 {
5455 fAllZero = false;
5456 break;
5457 }
5458 if (!fAllZero)
5459 break;
5460 }
5461 if (fAllZero)
5462 return VINF_SUCCESS;
5463 uint64_t uFileOffset = pExtent->uAppendPosition;
5464 if (!uFileOffset)
5465 return VERR_INTERNAL_ERROR;
5466 /* Align to sector, as the previous write could have been any size. */
5467 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5468 /* Grain table marker. */
5469 uint8_t aMarker[512];
5470 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5471 memset(pMarker, '\0', sizeof(aMarker));
5472 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5473 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5474 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5475 aMarker, sizeof(aMarker));
5476 AssertRC(rc);
5477 uFileOffset += 512;
5478 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5479 return VERR_INTERNAL_ERROR;
5480 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5481 for (uint32_t i = 0; i < cCacheLines; i++)
5482 {
5483 /* Convert the grain table to little endian in place, as it will not
5484 * be used at all after this function has been called. */
5485 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5486 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5487 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5488 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5489 &pImage->pGTCache->aGTCache[i].aGTData[0],
5490 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5491 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5492 if (RT_FAILURE(rc))
5493 break;
5494 }
5495 Assert(!(uFileOffset % 512));
5496 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5497 return rc;
5498}
5499/**
5500 * Internal. Free all allocated space for representing an image, and optionally
5501 * delete the image from disk.
5502 */
5503static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5504{
5505 int rc = VINF_SUCCESS;
5506 /* Freeing a never allocated image (e.g. because the open failed) is
5507 * not signalled as an error. After all nothing bad happens. */
5508 if (pImage)
5509 {
5510 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5511 {
5512 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5513 {
5514 /* Check if all extents are clean. */
5515 for (unsigned i = 0; i < pImage->cExtents; i++)
5516 {
5517 Assert(!pImage->pExtents[i].fUncleanShutdown);
5518 }
5519 }
5520 else
5521 {
5522 /* Mark all extents as clean. */
5523 for (unsigned i = 0; i < pImage->cExtents; i++)
5524 {
5525 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5526 && pImage->pExtents[i].fUncleanShutdown)
5527 {
5528 pImage->pExtents[i].fUncleanShutdown = false;
5529 pImage->pExtents[i].fMetaDirty = true;
5530 }
5531 /* From now on it's not safe to append any more data. */
5532 pImage->pExtents[i].uAppendPosition = 0;
5533 }
5534 }
5535 }
5536 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5537 {
5538 /* No need to write any pending data if the file will be deleted
5539 * or if the new file wasn't successfully created. */
5540 if ( !fDelete && pImage->pExtents
5541 && pImage->pExtents[0].cGTEntries
5542 && pImage->pExtents[0].uAppendPosition)
5543 {
5544 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5545 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5546 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5547 AssertRC(rc);
5548 vmdkStreamClearGT(pImage, pExtent);
5549 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5550 {
5551 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5552 AssertRC(rc);
5553 }
5554 uint64_t uFileOffset = pExtent->uAppendPosition;
5555 if (!uFileOffset)
5556 return VERR_INTERNAL_ERROR;
5557 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5558 /* From now on it's not safe to append any more data. */
5559 pExtent->uAppendPosition = 0;
5560 /* Grain directory marker. */
5561 uint8_t aMarker[512];
5562 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5563 memset(pMarker, '\0', sizeof(aMarker));
5564 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5565 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5566 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5567 aMarker, sizeof(aMarker));
5568 AssertRC(rc);
5569 uFileOffset += 512;
5570 /* Write grain directory in little endian style. The array will
5571 * not be used after this, so convert in place. */
5572 uint32_t *pGDTmp = pExtent->pGD;
5573 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5574 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5575 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5576 uFileOffset, pExtent->pGD,
5577 pExtent->cGDEntries * sizeof(uint32_t));
5578 AssertRC(rc);
5579 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5580 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5581 uFileOffset = RT_ALIGN_64( uFileOffset
5582 + pExtent->cGDEntries * sizeof(uint32_t),
5583 512);
5584 /* Footer marker. */
5585 memset(pMarker, '\0', sizeof(aMarker));
5586 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5587 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5588 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5589 uFileOffset, aMarker, sizeof(aMarker));
5590 AssertRC(rc);
5591 uFileOffset += 512;
5592 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5593 AssertRC(rc);
5594 uFileOffset += 512;
5595 /* End-of-stream marker. */
5596 memset(pMarker, '\0', sizeof(aMarker));
5597 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5598 uFileOffset, aMarker, sizeof(aMarker));
5599 AssertRC(rc);
5600 }
5601 }
5602 else if (!fDelete && fFlush)
5603 vmdkFlushImage(pImage, NULL);
5604 if (pImage->pExtents != NULL)
5605 {
5606 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5607 {
5608 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5609 if (RT_SUCCESS(rc))
5610 rc = rc2; /* Propogate any error when closing the file. */
5611 }
5612 RTMemFree(pImage->pExtents);
5613 pImage->pExtents = NULL;
5614 }
5615 pImage->cExtents = 0;
5616 if (pImage->pFile != NULL)
5617 {
5618 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5619 if (RT_SUCCESS(rc))
5620 rc = rc2; /* Propogate any error when closing the file. */
5621 }
5622 int rc2 = vmdkFileCheckAllClose(pImage);
5623 if (RT_SUCCESS(rc))
5624 rc = rc2; /* Propogate any error when closing the file. */
5625 if (pImage->pGTCache)
5626 {
5627 RTMemFree(pImage->pGTCache);
5628 pImage->pGTCache = NULL;
5629 }
5630 if (pImage->pDescData)
5631 {
5632 RTMemFree(pImage->pDescData);
5633 pImage->pDescData = NULL;
5634 }
5635 }
5636 LogFlowFunc(("returns %Rrc\n", rc));
5637 return rc;
5638}
5639/**
5640 * Internal. Flush image data (and metadata) to disk.
5641 */
5642static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5643{
5644 PVMDKEXTENT pExtent;
5645 int rc = VINF_SUCCESS;
5646 /* Update descriptor if changed. */
5647 if (pImage->Descriptor.fDirty)
5648 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5649 if (RT_SUCCESS(rc))
5650 {
5651 for (unsigned i = 0; i < pImage->cExtents; i++)
5652 {
5653 pExtent = &pImage->pExtents[i];
5654 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5655 {
5656 switch (pExtent->enmType)
5657 {
5658 case VMDKETYPE_HOSTED_SPARSE:
5659 if (!pExtent->fFooter)
5660 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5661 else
5662 {
5663 uint64_t uFileOffset = pExtent->uAppendPosition;
5664 /* Simply skip writing anything if the streamOptimized
5665 * image hasn't been just created. */
5666 if (!uFileOffset)
5667 break;
5668 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5669 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5670 uFileOffset, pIoCtx);
5671 }
5672 break;
5673 case VMDKETYPE_VMFS:
5674 case VMDKETYPE_FLAT:
5675 /* Nothing to do. */
5676 break;
5677 case VMDKETYPE_ZERO:
5678 default:
5679 AssertMsgFailed(("extent with type %d marked as dirty\n",
5680 pExtent->enmType));
5681 break;
5682 }
5683 }
5684 if (RT_FAILURE(rc))
5685 break;
5686 switch (pExtent->enmType)
5687 {
5688 case VMDKETYPE_HOSTED_SPARSE:
5689 case VMDKETYPE_VMFS:
5690 case VMDKETYPE_FLAT:
5691 /** @todo implement proper path absolute check. */
5692 if ( pExtent->pFile != NULL
5693 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5694 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5695 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5696 NULL, NULL);
5697 break;
5698 case VMDKETYPE_ZERO:
5699 /* No need to do anything for this extent. */
5700 break;
5701 default:
5702 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5703 break;
5704 }
5705 }
5706 }
5707 return rc;
5708}
5709/**
5710 * Internal. Find extent corresponding to the sector number in the disk.
5711 */
5712static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5713 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5714{
5715 PVMDKEXTENT pExtent = NULL;
5716 int rc = VINF_SUCCESS;
5717 for (unsigned i = 0; i < pImage->cExtents; i++)
5718 {
5719 if (offSector < pImage->pExtents[i].cNominalSectors)
5720 {
5721 pExtent = &pImage->pExtents[i];
5722 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5723 break;
5724 }
5725 offSector -= pImage->pExtents[i].cNominalSectors;
5726 }
5727 if (pExtent)
5728 *ppExtent = pExtent;
5729 else
5730 rc = VERR_IO_SECTOR_NOT_FOUND;
5731 return rc;
5732}
5733/**
5734 * Internal. Hash function for placing the grain table hash entries.
5735 */
5736static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5737 unsigned uExtent)
5738{
5739 /** @todo this hash function is quite simple, maybe use a better one which
5740 * scrambles the bits better. */
5741 return (uSector + uExtent) % pCache->cEntries;
5742}
5743/**
5744 * Internal. Get sector number in the extent file from the relative sector
5745 * number in the extent.
5746 */
5747static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5748 PVMDKEXTENT pExtent, uint64_t uSector,
5749 uint64_t *puExtentSector)
5750{
5751 PVMDKGTCACHE pCache = pImage->pGTCache;
5752 uint64_t uGDIndex, uGTSector, uGTBlock;
5753 uint32_t uGTHash, uGTBlockIndex;
5754 PVMDKGTCACHEENTRY pGTCacheEntry;
5755 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5756 int rc;
5757 /* For newly created and readonly/sequentially opened streamOptimized
5758 * images this must be a no-op, as the grain directory is not there. */
5759 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5760 && pExtent->uAppendPosition)
5761 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5762 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5763 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5764 {
5765 *puExtentSector = 0;
5766 return VINF_SUCCESS;
5767 }
5768 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5769 if (uGDIndex >= pExtent->cGDEntries)
5770 return VERR_OUT_OF_RANGE;
5771 uGTSector = pExtent->pGD[uGDIndex];
5772 if (!uGTSector)
5773 {
5774 /* There is no grain table referenced by this grain directory
5775 * entry. So there is absolutely no data in this area. */
5776 *puExtentSector = 0;
5777 return VINF_SUCCESS;
5778 }
5779 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5780 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5781 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5782 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5783 || pGTCacheEntry->uGTBlock != uGTBlock)
5784 {
5785 /* Cache miss, fetch data from disk. */
5786 PVDMETAXFER pMetaXfer;
5787 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5788 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5789 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5790 if (RT_FAILURE(rc))
5791 return rc;
5792 /* We can release the metadata transfer immediately. */
5793 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5794 pGTCacheEntry->uExtent = pExtent->uExtent;
5795 pGTCacheEntry->uGTBlock = uGTBlock;
5796 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5797 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5798 }
5799 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5800 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5801 if (uGrainSector)
5802 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5803 else
5804 *puExtentSector = 0;
5805 return VINF_SUCCESS;
5806}
5807/**
5808 * Internal. Writes the grain and also if necessary the grain tables.
5809 * Uses the grain table cache as a true grain table.
5810 */
5811static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5812 uint64_t uSector, PVDIOCTX pIoCtx,
5813 uint64_t cbWrite)
5814{
5815 uint32_t uGrain;
5816 uint32_t uGDEntry, uLastGDEntry;
5817 uint32_t cbGrain = 0;
5818 uint32_t uCacheLine, uCacheEntry;
5819 const void *pData;
5820 int rc;
5821 /* Very strict requirements: always write at least one full grain, with
5822 * proper alignment. Everything else would require reading of already
5823 * written data, which we don't support for obvious reasons. The only
5824 * exception is the last grain, and only if the image size specifies
5825 * that only some portion holds data. In any case the write must be
5826 * within the image limits, no "overshoot" allowed. */
5827 if ( cbWrite == 0
5828 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5829 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5830 || uSector % pExtent->cSectorsPerGrain
5831 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5832 return VERR_INVALID_PARAMETER;
5833 /* Clip write range to at most the rest of the grain. */
5834 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5835 /* Do not allow to go back. */
5836 uGrain = uSector / pExtent->cSectorsPerGrain;
5837 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5838 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5839 uGDEntry = uGrain / pExtent->cGTEntries;
5840 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5841 if (uGrain < pExtent->uLastGrainAccess)
5842 return VERR_VD_VMDK_INVALID_WRITE;
5843 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5844 * to allocate something, we also need to detect the situation ourself. */
5845 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5846 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5847 return VINF_SUCCESS;
5848 if (uGDEntry != uLastGDEntry)
5849 {
5850 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5851 if (RT_FAILURE(rc))
5852 return rc;
5853 vmdkStreamClearGT(pImage, pExtent);
5854 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5855 {
5856 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5857 if (RT_FAILURE(rc))
5858 return rc;
5859 }
5860 }
5861 uint64_t uFileOffset;
5862 uFileOffset = pExtent->uAppendPosition;
5863 if (!uFileOffset)
5864 return VERR_INTERNAL_ERROR;
5865 /* Align to sector, as the previous write could have been any size. */
5866 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5867 /* Paranoia check: extent type, grain table buffer presence and
5868 * grain table buffer space. Also grain table entry must be clear. */
5869 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5870 || !pImage->pGTCache
5871 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5872 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5873 return VERR_INTERNAL_ERROR;
5874 /* Update grain table entry. */
5875 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5876 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5877 {
5878 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5879 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5880 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5881 pData = pExtent->pvGrain;
5882 }
5883 else
5884 {
5885 RTSGSEG Segment;
5886 unsigned cSegments = 1;
5887 size_t cbSeg = 0;
5888 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5889 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5890 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5891 pData = Segment.pvSeg;
5892 }
5893 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5894 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5895 uSector, &cbGrain);
5896 if (RT_FAILURE(rc))
5897 {
5898 pExtent->uGrainSectorAbs = 0;
5899 AssertRC(rc);
5900 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5901 }
5902 pExtent->uLastGrainAccess = uGrain;
5903 pExtent->uAppendPosition += cbGrain;
5904 return rc;
5905}
5906/**
5907 * Internal: Updates the grain table during grain allocation.
5908 */
5909static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5910 PVMDKGRAINALLOCASYNC pGrainAlloc)
5911{
5912 int rc = VINF_SUCCESS;
5913 PVMDKGTCACHE pCache = pImage->pGTCache;
5914 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5915 uint32_t uGTHash, uGTBlockIndex;
5916 uint64_t uGTSector, uRGTSector, uGTBlock;
5917 uint64_t uSector = pGrainAlloc->uSector;
5918 PVMDKGTCACHEENTRY pGTCacheEntry;
5919 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5920 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5921 uGTSector = pGrainAlloc->uGTSector;
5922 uRGTSector = pGrainAlloc->uRGTSector;
5923 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5924 /* Update the grain table (and the cache). */
5925 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5926 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5927 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5928 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5929 || pGTCacheEntry->uGTBlock != uGTBlock)
5930 {
5931 /* Cache miss, fetch data from disk. */
5932 LogFlow(("Cache miss, fetch data from disk\n"));
5933 PVDMETAXFER pMetaXfer = NULL;
5934 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5935 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5936 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5937 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5938 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5939 {
5940 pGrainAlloc->cIoXfersPending++;
5941 pGrainAlloc->fGTUpdateNeeded = true;
5942 /* Leave early, we will be called again after the read completed. */
5943 LogFlowFunc(("Metadata read in progress, leaving\n"));
5944 return rc;
5945 }
5946 else if (RT_FAILURE(rc))
5947 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5948 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5949 pGTCacheEntry->uExtent = pExtent->uExtent;
5950 pGTCacheEntry->uGTBlock = uGTBlock;
5951 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5952 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5953 }
5954 else
5955 {
5956 /* Cache hit. Convert grain table block back to disk format, otherwise
5957 * the code below will write garbage for all but the updated entry. */
5958 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5959 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5960 }
5961 pGrainAlloc->fGTUpdateNeeded = false;
5962 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5963 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5964 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5965 /* Update grain table on disk. */
5966 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5967 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5968 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5969 vmdkAllocGrainComplete, pGrainAlloc);
5970 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5971 pGrainAlloc->cIoXfersPending++;
5972 else if (RT_FAILURE(rc))
5973 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5974 if (pExtent->pRGD)
5975 {
5976 /* Update backup grain table on disk. */
5977 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5978 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5979 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5980 vmdkAllocGrainComplete, pGrainAlloc);
5981 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5982 pGrainAlloc->cIoXfersPending++;
5983 else if (RT_FAILURE(rc))
5984 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5985 }
5986 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5987 return rc;
5988}
5989/**
5990 * Internal - complete the grain allocation by updating disk grain table if required.
5991 */
5992static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5993{
5994 RT_NOREF1(rcReq);
5995 int rc = VINF_SUCCESS;
5996 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5997 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5998 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5999 pBackendData, pIoCtx, pvUser, rcReq));
6000 pGrainAlloc->cIoXfersPending--;
6001 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6002 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6003 if (!pGrainAlloc->cIoXfersPending)
6004 {
6005 /* Grain allocation completed. */
6006 RTMemFree(pGrainAlloc);
6007 }
6008 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6009 return rc;
6010}
6011/**
6012 * Internal. Allocates a new grain table (if necessary).
6013 */
6014static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6015 uint64_t uSector, uint64_t cbWrite)
6016{
6017 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6018 uint64_t uGDIndex, uGTSector, uRGTSector;
6019 uint64_t uFileOffset;
6020 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6021 int rc;
6022 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6023 pCache, pExtent, pIoCtx, uSector, cbWrite));
6024 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6025 if (!pGrainAlloc)
6026 return VERR_NO_MEMORY;
6027 pGrainAlloc->pExtent = pExtent;
6028 pGrainAlloc->uSector = uSector;
6029 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6030 if (uGDIndex >= pExtent->cGDEntries)
6031 {
6032 RTMemFree(pGrainAlloc);
6033 return VERR_OUT_OF_RANGE;
6034 }
6035 uGTSector = pExtent->pGD[uGDIndex];
6036 if (pExtent->pRGD)
6037 uRGTSector = pExtent->pRGD[uGDIndex];
6038 else
6039 uRGTSector = 0; /**< avoid compiler warning */
6040 if (!uGTSector)
6041 {
6042 LogFlow(("Allocating new grain table\n"));
6043 /* There is no grain table referenced by this grain directory
6044 * entry. So there is absolutely no data in this area. Allocate
6045 * a new grain table and put the reference to it in the GDs. */
6046 uFileOffset = pExtent->uAppendPosition;
6047 if (!uFileOffset)
6048 {
6049 RTMemFree(pGrainAlloc);
6050 return VERR_INTERNAL_ERROR;
6051 }
6052 Assert(!(uFileOffset % 512));
6053 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6054 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6055 /* Normally the grain table is preallocated for hosted sparse extents
6056 * that support more than 32 bit sector numbers. So this shouldn't
6057 * ever happen on a valid extent. */
6058 if (uGTSector > UINT32_MAX)
6059 {
6060 RTMemFree(pGrainAlloc);
6061 return VERR_VD_VMDK_INVALID_HEADER;
6062 }
6063 /* Write grain table by writing the required number of grain table
6064 * cache chunks. Allocate memory dynamically here or we flood the
6065 * metadata cache with very small entries. */
6066 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6067 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6068 if (!paGTDataTmp)
6069 {
6070 RTMemFree(pGrainAlloc);
6071 return VERR_NO_MEMORY;
6072 }
6073 memset(paGTDataTmp, '\0', cbGTDataTmp);
6074 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6075 VMDK_SECTOR2BYTE(uGTSector),
6076 paGTDataTmp, cbGTDataTmp, pIoCtx,
6077 vmdkAllocGrainComplete, pGrainAlloc);
6078 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6079 pGrainAlloc->cIoXfersPending++;
6080 else if (RT_FAILURE(rc))
6081 {
6082 RTMemTmpFree(paGTDataTmp);
6083 RTMemFree(pGrainAlloc);
6084 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6085 }
6086 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6087 + cbGTDataTmp, 512);
6088 if (pExtent->pRGD)
6089 {
6090 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6091 uFileOffset = pExtent->uAppendPosition;
6092 if (!uFileOffset)
6093 return VERR_INTERNAL_ERROR;
6094 Assert(!(uFileOffset % 512));
6095 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6096 /* Normally the redundant grain table is preallocated for hosted
6097 * sparse extents that support more than 32 bit sector numbers. So
6098 * this shouldn't ever happen on a valid extent. */
6099 if (uRGTSector > UINT32_MAX)
6100 {
6101 RTMemTmpFree(paGTDataTmp);
6102 return VERR_VD_VMDK_INVALID_HEADER;
6103 }
6104 /* Write grain table by writing the required number of grain table
6105 * cache chunks. Allocate memory dynamically here or we flood the
6106 * metadata cache with very small entries. */
6107 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6108 VMDK_SECTOR2BYTE(uRGTSector),
6109 paGTDataTmp, cbGTDataTmp, pIoCtx,
6110 vmdkAllocGrainComplete, pGrainAlloc);
6111 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6112 pGrainAlloc->cIoXfersPending++;
6113 else if (RT_FAILURE(rc))
6114 {
6115 RTMemTmpFree(paGTDataTmp);
6116 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6117 }
6118 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6119 }
6120 RTMemTmpFree(paGTDataTmp);
6121 /* Update the grain directory on disk (doing it before writing the
6122 * grain table will result in a garbled extent if the operation is
6123 * aborted for some reason. Otherwise the worst that can happen is
6124 * some unused sectors in the extent. */
6125 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6126 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6127 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6128 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6129 vmdkAllocGrainComplete, pGrainAlloc);
6130 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6131 pGrainAlloc->cIoXfersPending++;
6132 else if (RT_FAILURE(rc))
6133 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6134 if (pExtent->pRGD)
6135 {
6136 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6137 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6138 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6139 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6140 vmdkAllocGrainComplete, pGrainAlloc);
6141 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6142 pGrainAlloc->cIoXfersPending++;
6143 else if (RT_FAILURE(rc))
6144 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6145 }
6146 /* As the final step update the in-memory copy of the GDs. */
6147 pExtent->pGD[uGDIndex] = uGTSector;
6148 if (pExtent->pRGD)
6149 pExtent->pRGD[uGDIndex] = uRGTSector;
6150 }
6151 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6152 pGrainAlloc->uGTSector = uGTSector;
6153 pGrainAlloc->uRGTSector = uRGTSector;
6154 uFileOffset = pExtent->uAppendPosition;
6155 if (!uFileOffset)
6156 return VERR_INTERNAL_ERROR;
6157 Assert(!(uFileOffset % 512));
6158 pGrainAlloc->uGrainOffset = uFileOffset;
6159 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6160 {
6161 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6162 ("Accesses to stream optimized images must be synchronous\n"),
6163 VERR_INVALID_STATE);
6164 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6165 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6166 /* Invalidate cache, just in case some code incorrectly allows mixing
6167 * of reads and writes. Normally shouldn't be needed. */
6168 pExtent->uGrainSectorAbs = 0;
6169 /* Write compressed data block and the markers. */
6170 uint32_t cbGrain = 0;
6171 size_t cbSeg = 0;
6172 RTSGSEG Segment;
6173 unsigned cSegments = 1;
6174 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6175 &cSegments, cbWrite);
6176 Assert(cbSeg == cbWrite);
6177 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6178 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6179 if (RT_FAILURE(rc))
6180 {
6181 AssertRC(rc);
6182 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6183 }
6184 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6185 pExtent->uAppendPosition += cbGrain;
6186 }
6187 else
6188 {
6189 /* Write the data. Always a full grain, or we're in big trouble. */
6190 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6191 uFileOffset, pIoCtx, cbWrite,
6192 vmdkAllocGrainComplete, pGrainAlloc);
6193 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6194 pGrainAlloc->cIoXfersPending++;
6195 else if (RT_FAILURE(rc))
6196 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6197 pExtent->uAppendPosition += cbWrite;
6198 }
6199 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6200 if (!pGrainAlloc->cIoXfersPending)
6201 {
6202 /* Grain allocation completed. */
6203 RTMemFree(pGrainAlloc);
6204 }
6205 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6206 return rc;
6207}
6208/**
6209 * Internal. Reads the contents by sequentially going over the compressed
6210 * grains (hoping that they are in sequence).
6211 */
6212static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6213 uint64_t uSector, PVDIOCTX pIoCtx,
6214 uint64_t cbRead)
6215{
6216 int rc;
6217 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6218 pImage, pExtent, uSector, pIoCtx, cbRead));
6219 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6220 ("Async I/O not supported for sequential stream optimized images\n"),
6221 VERR_INVALID_STATE);
6222 /* Do not allow to go back. */
6223 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6224 if (uGrain < pExtent->uLastGrainAccess)
6225 return VERR_VD_VMDK_INVALID_STATE;
6226 pExtent->uLastGrainAccess = uGrain;
6227 /* After a previous error do not attempt to recover, as it would need
6228 * seeking (in the general case backwards which is forbidden). */
6229 if (!pExtent->uGrainSectorAbs)
6230 return VERR_VD_VMDK_INVALID_STATE;
6231 /* Check if we need to read something from the image or if what we have
6232 * in the buffer is good to fulfill the request. */
6233 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6234 {
6235 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6236 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6237 /* Get the marker from the next data block - and skip everything which
6238 * is not a compressed grain. If it's a compressed grain which is for
6239 * the requested sector (or after), read it. */
6240 VMDKMARKER Marker;
6241 do
6242 {
6243 RT_ZERO(Marker);
6244 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6245 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6246 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6247 if (RT_FAILURE(rc))
6248 return rc;
6249 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6250 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6251 if (Marker.cbSize == 0)
6252 {
6253 /* A marker for something else than a compressed grain. */
6254 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6255 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6256 + RT_UOFFSETOF(VMDKMARKER, uType),
6257 &Marker.uType, sizeof(Marker.uType));
6258 if (RT_FAILURE(rc))
6259 return rc;
6260 Marker.uType = RT_LE2H_U32(Marker.uType);
6261 switch (Marker.uType)
6262 {
6263 case VMDK_MARKER_EOS:
6264 uGrainSectorAbs++;
6265 /* Read (or mostly skip) to the end of file. Uses the
6266 * Marker (LBA sector) as it is unused anyway. This
6267 * makes sure that really everything is read in the
6268 * success case. If this read fails it means the image
6269 * is truncated, but this is harmless so ignore. */
6270 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6271 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6272 + 511,
6273 &Marker.uSector, 1);
6274 break;
6275 case VMDK_MARKER_GT:
6276 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6277 break;
6278 case VMDK_MARKER_GD:
6279 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6280 break;
6281 case VMDK_MARKER_FOOTER:
6282 uGrainSectorAbs += 2;
6283 break;
6284 case VMDK_MARKER_UNSPECIFIED:
6285 /* Skip over the contents of the unspecified marker
6286 * type 4 which exists in some vSphere created files. */
6287 /** @todo figure out what the payload means. */
6288 uGrainSectorAbs += 1;
6289 break;
6290 default:
6291 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6292 pExtent->uGrainSectorAbs = 0;
6293 return VERR_VD_VMDK_INVALID_STATE;
6294 }
6295 pExtent->cbGrainStreamRead = 0;
6296 }
6297 else
6298 {
6299 /* A compressed grain marker. If it is at/after what we're
6300 * interested in read and decompress data. */
6301 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6302 {
6303 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6304 continue;
6305 }
6306 uint64_t uLBA = 0;
6307 uint32_t cbGrainStreamRead = 0;
6308 rc = vmdkFileInflateSync(pImage, pExtent,
6309 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6310 pExtent->pvGrain,
6311 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6312 &Marker, &uLBA, &cbGrainStreamRead);
6313 if (RT_FAILURE(rc))
6314 {
6315 pExtent->uGrainSectorAbs = 0;
6316 return rc;
6317 }
6318 if ( pExtent->uGrain
6319 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6320 {
6321 pExtent->uGrainSectorAbs = 0;
6322 return VERR_VD_VMDK_INVALID_STATE;
6323 }
6324 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6325 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6326 break;
6327 }
6328 } while (Marker.uType != VMDK_MARKER_EOS);
6329 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6330 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6331 {
6332 pExtent->uGrain = UINT32_MAX;
6333 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6334 * the next read would try to get more data, and we're at EOF. */
6335 pExtent->cbGrainStreamRead = 1;
6336 }
6337 }
6338 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6339 {
6340 /* The next data block we have is not for this area, so just return
6341 * that there is no data. */
6342 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6343 return VERR_VD_BLOCK_FREE;
6344 }
6345 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6346 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6347 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6348 cbRead);
6349 LogFlowFunc(("returns VINF_SUCCESS\n"));
6350 return VINF_SUCCESS;
6351}
6352/**
6353 * Replaces a fragment of a string with the specified string.
6354 *
6355 * @returns Pointer to the allocated UTF-8 string.
6356 * @param pszWhere UTF-8 string to search in.
6357 * @param pszWhat UTF-8 string to search for.
6358 * @param pszByWhat UTF-8 string to replace the found string with.
6359 *
6360 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6361 * for updating the base name in the descriptor, the second is for
6362 * generating new filenames for extents. This code borked when
6363 * RTPathAbs started correcting the driver letter case on windows,
6364 * when strstr failed because the pExtent->pszFullname was not
6365 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6366 * this by apply RTPathAbs to the places it wasn't applied.
6367 *
6368 * However, this highlights some undocumented ASSUMPTIONS as well as
6369 * terrible short commings of the approach.
6370 *
6371 * Given the right filename, it may also screw up the descriptor. Take
6372 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6373 * we'll be asked to replace "Test0" with something, no problem. No,
6374 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6375 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6376 * its bum. The descriptor string must be parsed and reconstructed,
6377 * the lazy strstr approach doesn't cut it.
6378 *
6379 * I'm also curious as to what would be the correct escaping of '"' in
6380 * the file name and how that is supposed to be handled, because it
6381 * needs to be or such names must be rejected in several places (maybe
6382 * they are, I didn't check).
6383 *
6384 * When this function is used to replace the start of a path, I think
6385 * the assumption from the prep/setup code is that we kind of knows
6386 * what we're working on (I could be wrong). However, using strstr
6387 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6388 * Especially on unix systems, weird stuff could happen if someone
6389 * unwittingly tinkers with the prep/setup code. What should really be
6390 * done here is using a new RTPathStartEx function that (via flags)
6391 * allows matching partial final component and returns the length of
6392 * what it matched up (in case it skipped slashes and '.' components).
6393 *
6394 */
6395static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6396 const char *pszByWhat)
6397{
6398 AssertPtr(pszWhere);
6399 AssertPtr(pszWhat);
6400 AssertPtr(pszByWhat);
6401 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6402 if (!pszFoundStr)
6403 {
6404 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6405 return NULL;
6406 }
6407 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6408 char *pszNewStr = RTStrAlloc(cbFinal);
6409 if (pszNewStr)
6410 {
6411 char *pszTmp = pszNewStr;
6412 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6413 pszTmp += pszFoundStr - pszWhere;
6414 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6415 pszTmp += strlen(pszByWhat);
6416 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6417 }
6418 return pszNewStr;
6419}
6420/** @copydoc VDIMAGEBACKEND::pfnProbe */
6421static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6422 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6423{
6424 RT_NOREF(enmDesiredType);
6425 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6426 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6427 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6428 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6429
6430 int rc = VINF_SUCCESS;
6431 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6432 if (RT_LIKELY(pImage))
6433 {
6434 pImage->pszFilename = pszFilename;
6435 pImage->pFile = NULL;
6436 pImage->pExtents = NULL;
6437 pImage->pFiles = NULL;
6438 pImage->pGTCache = NULL;
6439 pImage->pDescData = NULL;
6440 pImage->pVDIfsDisk = pVDIfsDisk;
6441 pImage->pVDIfsImage = pVDIfsImage;
6442 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6443 * much as possible in vmdkOpenImage. */
6444 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6445 vmdkFreeImage(pImage, false, false /*fFlush*/);
6446 RTMemFree(pImage);
6447 if (RT_SUCCESS(rc))
6448 *penmType = VDTYPE_HDD;
6449 }
6450 else
6451 rc = VERR_NO_MEMORY;
6452 LogFlowFunc(("returns %Rrc\n", rc));
6453 return rc;
6454}
6455/** @copydoc VDIMAGEBACKEND::pfnOpen */
6456static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6457 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6458 VDTYPE enmType, void **ppBackendData)
6459{
6460 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6461 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6462 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6463 int rc;
6464 /* Check open flags. All valid flags are supported. */
6465 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6466 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6467 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6468
6469 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6470 if (RT_LIKELY(pImage))
6471 {
6472 pImage->pszFilename = pszFilename;
6473 pImage->pFile = NULL;
6474 pImage->pExtents = NULL;
6475 pImage->pFiles = NULL;
6476 pImage->pGTCache = NULL;
6477 pImage->pDescData = NULL;
6478 pImage->pVDIfsDisk = pVDIfsDisk;
6479 pImage->pVDIfsImage = pVDIfsImage;
6480 rc = vmdkOpenImage(pImage, uOpenFlags);
6481 if (RT_SUCCESS(rc))
6482 *ppBackendData = pImage;
6483 else
6484 RTMemFree(pImage);
6485 }
6486 else
6487 rc = VERR_NO_MEMORY;
6488 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6489 return rc;
6490}
6491/** @copydoc VDIMAGEBACKEND::pfnCreate */
6492static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6493 unsigned uImageFlags, const char *pszComment,
6494 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6495 PCRTUUID pUuid, unsigned uOpenFlags,
6496 unsigned uPercentStart, unsigned uPercentSpan,
6497 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6498 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6499 void **ppBackendData)
6500{
6501 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6502 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6503 int rc;
6504 /* Check the VD container type and image flags. */
6505 if ( enmType != VDTYPE_HDD
6506 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6507 return VERR_VD_INVALID_TYPE;
6508 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6509 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6510 && ( !cbSize
6511 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6512 return VERR_VD_INVALID_SIZE;
6513 /* Check image flags for invalid combinations. */
6514 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6515 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6516 return VERR_INVALID_PARAMETER;
6517 /* Check open flags. All valid flags are supported. */
6518 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6519 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6520 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6521 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
6522 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
6523 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6524 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6525 VERR_INVALID_PARAMETER);
6526 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6527 if (RT_LIKELY(pImage))
6528 {
6529 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6530 pImage->pszFilename = pszFilename;
6531 pImage->pFile = NULL;
6532 pImage->pExtents = NULL;
6533 pImage->pFiles = NULL;
6534 pImage->pGTCache = NULL;
6535 pImage->pDescData = NULL;
6536 pImage->pVDIfsDisk = pVDIfsDisk;
6537 pImage->pVDIfsImage = pVDIfsImage;
6538 /* Descriptors for split images can be pretty large, especially if the
6539 * filename is long. So prepare for the worst, and allocate quite some
6540 * memory for the descriptor in this case. */
6541 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6542 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6543 else
6544 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6545 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6546 if (RT_LIKELY(pImage->pDescData))
6547 {
6548 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6549 pPCHSGeometry, pLCHSGeometry, pUuid,
6550 pIfProgress, uPercentStart, uPercentSpan);
6551 if (RT_SUCCESS(rc))
6552 {
6553 /* So far the image is opened in read/write mode. Make sure the
6554 * image is opened in read-only mode if the caller requested that. */
6555 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6556 {
6557 vmdkFreeImage(pImage, false, true /*fFlush*/);
6558 rc = vmdkOpenImage(pImage, uOpenFlags);
6559 }
6560 if (RT_SUCCESS(rc))
6561 *ppBackendData = pImage;
6562 }
6563 if (RT_FAILURE(rc))
6564 RTMemFree(pImage->pDescData);
6565 }
6566 else
6567 rc = VERR_NO_MEMORY;
6568 if (RT_FAILURE(rc))
6569 RTMemFree(pImage);
6570 }
6571 else
6572 rc = VERR_NO_MEMORY;
6573 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6574 return rc;
6575}
6576/**
6577 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6578 * memory.
6579 *
6580 * @returns VBox status code.
6581 * @param pImage VMDK image instance.
6582 * @param pRenameState The state to initialize.
6583 * @param pszFilename The new filename.
6584 */
6585static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6586{
6587 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6588 int rc = VINF_SUCCESS;
6589 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6590 /*
6591 * Allocate an array to store both old and new names of renamed files
6592 * in case we have to roll back the changes. Arrays are initialized
6593 * with zeros. We actually save stuff when and if we change it.
6594 */
6595 pRenameState->cExtents = pImage->cExtents;
6596 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6597 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6598 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6599 if ( pRenameState->apszOldName
6600 && pRenameState->apszNewName
6601 && pRenameState->apszNewLines)
6602 {
6603 /* Save the descriptor size and position. */
6604 if (pImage->pDescData)
6605 {
6606 /* Separate descriptor file. */
6607 pRenameState->fEmbeddedDesc = false;
6608 }
6609 else
6610 {
6611 /* Embedded descriptor file. */
6612 pRenameState->ExtentCopy = pImage->pExtents[0];
6613 pRenameState->fEmbeddedDesc = true;
6614 }
6615 /* Save the descriptor content. */
6616 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6617 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6618 {
6619 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6620 if (!pRenameState->DescriptorCopy.aLines[i])
6621 {
6622 rc = VERR_NO_MEMORY;
6623 break;
6624 }
6625 }
6626 if (RT_SUCCESS(rc))
6627 {
6628 /* Prepare both old and new base names used for string replacement. */
6629 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6630 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6631 RTPathStripSuffix(pRenameState->pszNewBaseName);
6632 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6633 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6634 RTPathStripSuffix(pRenameState->pszOldBaseName);
6635 /* Prepare both old and new full names used for string replacement.
6636 Note! Must abspath the stuff here, so the strstr weirdness later in
6637 the renaming process get a match against abspath'ed extent paths.
6638 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6639 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6640 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6641 RTPathStripSuffix(pRenameState->pszNewFullName);
6642 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6643 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6644 RTPathStripSuffix(pRenameState->pszOldFullName);
6645 /* Save the old name for easy access to the old descriptor file. */
6646 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6647 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6648 /* Save old image name. */
6649 pRenameState->pszOldImageName = pImage->pszFilename;
6650 }
6651 }
6652 else
6653 rc = VERR_NO_TMP_MEMORY;
6654 return rc;
6655}
6656/**
6657 * Destroys the given rename state, freeing all allocated memory.
6658 *
6659 * @returns nothing.
6660 * @param pRenameState The rename state to destroy.
6661 */
6662static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6663{
6664 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6665 if (pRenameState->DescriptorCopy.aLines[i])
6666 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6667 if (pRenameState->apszOldName)
6668 {
6669 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6670 if (pRenameState->apszOldName[i])
6671 RTStrFree(pRenameState->apszOldName[i]);
6672 RTMemTmpFree(pRenameState->apszOldName);
6673 }
6674 if (pRenameState->apszNewName)
6675 {
6676 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6677 if (pRenameState->apszNewName[i])
6678 RTStrFree(pRenameState->apszNewName[i]);
6679 RTMemTmpFree(pRenameState->apszNewName);
6680 }
6681 if (pRenameState->apszNewLines)
6682 {
6683 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6684 if (pRenameState->apszNewLines[i])
6685 RTStrFree(pRenameState->apszNewLines[i]);
6686 RTMemTmpFree(pRenameState->apszNewLines);
6687 }
6688 if (pRenameState->pszOldDescName)
6689 RTStrFree(pRenameState->pszOldDescName);
6690 if (pRenameState->pszOldBaseName)
6691 RTStrFree(pRenameState->pszOldBaseName);
6692 if (pRenameState->pszNewBaseName)
6693 RTStrFree(pRenameState->pszNewBaseName);
6694 if (pRenameState->pszOldFullName)
6695 RTStrFree(pRenameState->pszOldFullName);
6696 if (pRenameState->pszNewFullName)
6697 RTStrFree(pRenameState->pszNewFullName);
6698}
6699/**
6700 * Rolls back the rename operation to the original state.
6701 *
6702 * @returns VBox status code.
6703 * @param pImage VMDK image instance.
6704 * @param pRenameState The rename state.
6705 */
6706static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6707{
6708 int rc = VINF_SUCCESS;
6709 if (!pRenameState->fImageFreed)
6710 {
6711 /*
6712 * Some extents may have been closed, close the rest. We will
6713 * re-open the whole thing later.
6714 */
6715 vmdkFreeImage(pImage, false, true /*fFlush*/);
6716 }
6717 /* Rename files back. */
6718 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6719 {
6720 if (pRenameState->apszOldName[i])
6721 {
6722 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6723 AssertRC(rc);
6724 }
6725 }
6726 /* Restore the old descriptor. */
6727 PVMDKFILE pFile;
6728 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6729 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6730 false /* fCreate */));
6731 AssertRC(rc);
6732 if (pRenameState->fEmbeddedDesc)
6733 {
6734 pRenameState->ExtentCopy.pFile = pFile;
6735 pImage->pExtents = &pRenameState->ExtentCopy;
6736 }
6737 else
6738 {
6739 /* Shouldn't be null for separate descriptor.
6740 * There will be no access to the actual content.
6741 */
6742 pImage->pDescData = pRenameState->pszOldDescName;
6743 pImage->pFile = pFile;
6744 }
6745 pImage->Descriptor = pRenameState->DescriptorCopy;
6746 vmdkWriteDescriptor(pImage, NULL);
6747 vmdkFileClose(pImage, &pFile, false);
6748 /* Get rid of the stuff we implanted. */
6749 pImage->pExtents = NULL;
6750 pImage->pFile = NULL;
6751 pImage->pDescData = NULL;
6752 /* Re-open the image back. */
6753 pImage->pszFilename = pRenameState->pszOldImageName;
6754 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6755 return rc;
6756}
6757/**
6758 * Rename worker doing the real work.
6759 *
6760 * @returns VBox status code.
6761 * @param pImage VMDK image instance.
6762 * @param pRenameState The rename state.
6763 * @param pszFilename The new filename.
6764 */
6765static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6766{
6767 int rc = VINF_SUCCESS;
6768 unsigned i, line;
6769 /* Update the descriptor with modified extent names. */
6770 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6771 i < pRenameState->cExtents;
6772 i++, line = pImage->Descriptor.aNextLines[line])
6773 {
6774 /* Update the descriptor. */
6775 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6776 pRenameState->pszOldBaseName,
6777 pRenameState->pszNewBaseName);
6778 if (!pRenameState->apszNewLines[i])
6779 {
6780 rc = VERR_NO_MEMORY;
6781 break;
6782 }
6783 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6784 }
6785 if (RT_SUCCESS(rc))
6786 {
6787 /* Make sure the descriptor gets written back. */
6788 pImage->Descriptor.fDirty = true;
6789 /* Flush the descriptor now, in case it is embedded. */
6790 vmdkFlushImage(pImage, NULL);
6791 /* Close and rename/move extents. */
6792 for (i = 0; i < pRenameState->cExtents; i++)
6793 {
6794 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6795 /* Compose new name for the extent. */
6796 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6797 pRenameState->pszOldFullName,
6798 pRenameState->pszNewFullName);
6799 if (!pRenameState->apszNewName[i])
6800 {
6801 rc = VERR_NO_MEMORY;
6802 break;
6803 }
6804 /* Close the extent file. */
6805 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6806 if (RT_FAILURE(rc))
6807 break;;
6808 /* Rename the extent file. */
6809 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6810 if (RT_FAILURE(rc))
6811 break;
6812 /* Remember the old name. */
6813 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6814 }
6815 if (RT_SUCCESS(rc))
6816 {
6817 /* Release all old stuff. */
6818 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6819 if (RT_SUCCESS(rc))
6820 {
6821 pRenameState->fImageFreed = true;
6822 /* Last elements of new/old name arrays are intended for
6823 * storing descriptor's names.
6824 */
6825 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6826 /* Rename the descriptor file if it's separate. */
6827 if (!pRenameState->fEmbeddedDesc)
6828 {
6829 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6830 if (RT_SUCCESS(rc))
6831 {
6832 /* Save old name only if we may need to change it back. */
6833 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6834 }
6835 }
6836 /* Update pImage with the new information. */
6837 pImage->pszFilename = pszFilename;
6838 /* Open the new image. */
6839 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6840 }
6841 }
6842 }
6843 return rc;
6844}
6845/** @copydoc VDIMAGEBACKEND::pfnRename */
6846static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6847{
6848 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6849 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6850 VMDKRENAMESTATE RenameState;
6851 memset(&RenameState, 0, sizeof(RenameState));
6852 /* Check arguments. */
6853 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6854 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6855 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6856 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
6857 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6858 if (RT_SUCCESS(rc))
6859 {
6860 /* --- Up to this point we have not done any damage yet. --- */
6861 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6862 /* Roll back all changes in case of failure. */
6863 if (RT_FAILURE(rc))
6864 {
6865 int rrc = vmdkRenameRollback(pImage, &RenameState);
6866 AssertRC(rrc);
6867 }
6868 }
6869 vmdkRenameStateDestroy(&RenameState);
6870 LogFlowFunc(("returns %Rrc\n", rc));
6871 return rc;
6872}
6873/** @copydoc VDIMAGEBACKEND::pfnClose */
6874static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6875{
6876 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6877 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6878 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6879 RTMemFree(pImage);
6880 LogFlowFunc(("returns %Rrc\n", rc));
6881 return rc;
6882}
6883/** @copydoc VDIMAGEBACKEND::pfnRead */
6884static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6885 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6886{
6887 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6888 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6889 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6890 AssertPtr(pImage);
6891 Assert(uOffset % 512 == 0);
6892 Assert(cbToRead % 512 == 0);
6893 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
6894 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
6895 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6896 /* Find the extent and check access permissions as defined in the extent descriptor. */
6897 PVMDKEXTENT pExtent;
6898 uint64_t uSectorExtentRel;
6899 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6900 &pExtent, &uSectorExtentRel);
6901 if ( RT_SUCCESS(rc)
6902 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
6903 {
6904 /* Clip read range to remain in this extent. */
6905 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6906 /* Handle the read according to the current extent type. */
6907 switch (pExtent->enmType)
6908 {
6909 case VMDKETYPE_HOSTED_SPARSE:
6910 {
6911 uint64_t uSectorExtentAbs;
6912 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6913 if (RT_FAILURE(rc))
6914 break;
6915 /* Clip read range to at most the rest of the grain. */
6916 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6917 Assert(!(cbToRead % 512));
6918 if (uSectorExtentAbs == 0)
6919 {
6920 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6921 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6922 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6923 rc = VERR_VD_BLOCK_FREE;
6924 else
6925 rc = vmdkStreamReadSequential(pImage, pExtent,
6926 uSectorExtentRel,
6927 pIoCtx, cbToRead);
6928 }
6929 else
6930 {
6931 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6932 {
6933 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6934 ("Async I/O is not supported for stream optimized VMDK's\n"));
6935 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6936 uSectorExtentAbs -= uSectorInGrain;
6937 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6938 {
6939 uint64_t uLBA = 0; /* gcc maybe uninitialized */
6940 rc = vmdkFileInflateSync(pImage, pExtent,
6941 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6942 pExtent->pvGrain,
6943 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6944 NULL, &uLBA, NULL);
6945 if (RT_FAILURE(rc))
6946 {
6947 pExtent->uGrainSectorAbs = 0;
6948 break;
6949 }
6950 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6951 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6952 Assert(uLBA == uSectorExtentRel);
6953 }
6954 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6955 (uint8_t *)pExtent->pvGrain
6956 + VMDK_SECTOR2BYTE(uSectorInGrain),
6957 cbToRead);
6958 }
6959 else
6960 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6961 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6962 pIoCtx, cbToRead);
6963 }
6964 break;
6965 }
6966 case VMDKETYPE_VMFS:
6967 case VMDKETYPE_FLAT:
6968 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6969 VMDK_SECTOR2BYTE(uSectorExtentRel),
6970 pIoCtx, cbToRead);
6971 break;
6972 case VMDKETYPE_ZERO:
6973 {
6974 size_t cbSet;
6975 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
6976 Assert(cbSet == cbToRead);
6977 break;
6978 }
6979 }
6980 if (pcbActuallyRead)
6981 *pcbActuallyRead = cbToRead;
6982 }
6983 else if (RT_SUCCESS(rc))
6984 rc = VERR_VD_VMDK_INVALID_STATE;
6985 LogFlowFunc(("returns %Rrc\n", rc));
6986 return rc;
6987}
6988/** @copydoc VDIMAGEBACKEND::pfnWrite */
6989static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
6990 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
6991 size_t *pcbPostRead, unsigned fWrite)
6992{
6993 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6994 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6995 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6996 int rc;
6997 AssertPtr(pImage);
6998 Assert(uOffset % 512 == 0);
6999 Assert(cbToWrite % 512 == 0);
7000 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7001 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7002 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7003 {
7004 PVMDKEXTENT pExtent;
7005 uint64_t uSectorExtentRel;
7006 uint64_t uSectorExtentAbs;
7007 /* No size check here, will do that later when the extent is located.
7008 * There are sparse images out there which according to the spec are
7009 * invalid, because the total size is not a multiple of the grain size.
7010 * Also for sparse images which are stitched together in odd ways (not at
7011 * grain boundaries, and with the nominal size not being a multiple of the
7012 * grain size), this would prevent writing to the last grain. */
7013 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7014 &pExtent, &uSectorExtentRel);
7015 if (RT_SUCCESS(rc))
7016 {
7017 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7018 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7019 && !pImage->pExtents[0].uAppendPosition
7020 && pExtent->enmAccess != VMDKACCESS_READONLY))
7021 rc = VERR_VD_VMDK_INVALID_STATE;
7022 else
7023 {
7024 /* Handle the write according to the current extent type. */
7025 switch (pExtent->enmType)
7026 {
7027 case VMDKETYPE_HOSTED_SPARSE:
7028 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7029 if (RT_SUCCESS(rc))
7030 {
7031 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7032 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7033 rc = VERR_VD_VMDK_INVALID_WRITE;
7034 else
7035 {
7036 /* Clip write range to at most the rest of the grain. */
7037 cbToWrite = RT_MIN(cbToWrite,
7038 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7039 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7040 if (uSectorExtentAbs == 0)
7041 {
7042 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7043 {
7044 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7045 {
7046 /* Full block write to a previously unallocated block.
7047 * Check if the caller wants to avoid the automatic alloc. */
7048 if (!(fWrite & VD_WRITE_NO_ALLOC))
7049 {
7050 /* Allocate GT and find out where to store the grain. */
7051 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7052 uSectorExtentRel, cbToWrite);
7053 }
7054 else
7055 rc = VERR_VD_BLOCK_FREE;
7056 *pcbPreRead = 0;
7057 *pcbPostRead = 0;
7058 }
7059 else
7060 {
7061 /* Clip write range to remain in this extent. */
7062 cbToWrite = RT_MIN(cbToWrite,
7063 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7064 + pExtent->cNominalSectors - uSectorExtentRel));
7065 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7066 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7067 rc = VERR_VD_BLOCK_FREE;
7068 }
7069 }
7070 else
7071 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7072 pIoCtx, cbToWrite);
7073 }
7074 else
7075 {
7076 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7077 {
7078 /* A partial write to a streamOptimized image is simply
7079 * invalid. It requires rewriting already compressed data
7080 * which is somewhere between expensive and impossible. */
7081 rc = VERR_VD_VMDK_INVALID_STATE;
7082 pExtent->uGrainSectorAbs = 0;
7083 AssertRC(rc);
7084 }
7085 else
7086 {
7087 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7088 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7089 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7090 pIoCtx, cbToWrite, NULL, NULL);
7091 }
7092 }
7093 }
7094 }
7095 break;
7096 case VMDKETYPE_VMFS:
7097 case VMDKETYPE_FLAT:
7098 /* Clip write range to remain in this extent. */
7099 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7100 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7101 VMDK_SECTOR2BYTE(uSectorExtentRel),
7102 pIoCtx, cbToWrite, NULL, NULL);
7103 break;
7104 case VMDKETYPE_ZERO:
7105 /* Clip write range to remain in this extent. */
7106 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7107 break;
7108 }
7109 }
7110 if (pcbWriteProcess)
7111 *pcbWriteProcess = cbToWrite;
7112 }
7113 }
7114 else
7115 rc = VERR_VD_IMAGE_READ_ONLY;
7116 LogFlowFunc(("returns %Rrc\n", rc));
7117 return rc;
7118}
7119/** @copydoc VDIMAGEBACKEND::pfnFlush */
7120static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7121{
7122 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7123 return vmdkFlushImage(pImage, pIoCtx);
7124}
7125/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7126static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7127{
7128 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7129 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7130 AssertPtrReturn(pImage, 0);
7131 return VMDK_IMAGE_VERSION;
7132}
7133/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7134static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7135{
7136 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7137 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7138 uint64_t cb = 0;
7139 AssertPtrReturn(pImage, 0);
7140 if (pImage->pFile != NULL)
7141 {
7142 uint64_t cbFile;
7143 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7144 if (RT_SUCCESS(rc))
7145 cb += cbFile;
7146 }
7147 for (unsigned i = 0; i < pImage->cExtents; i++)
7148 {
7149 if (pImage->pExtents[i].pFile != NULL)
7150 {
7151 uint64_t cbFile;
7152 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7153 if (RT_SUCCESS(rc))
7154 cb += cbFile;
7155 }
7156 }
7157 LogFlowFunc(("returns %lld\n", cb));
7158 return cb;
7159}
7160/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7161static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7162{
7163 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7164 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7165 int rc = VINF_SUCCESS;
7166 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7167 if (pImage->PCHSGeometry.cCylinders)
7168 *pPCHSGeometry = pImage->PCHSGeometry;
7169 else
7170 rc = VERR_VD_GEOMETRY_NOT_SET;
7171 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7172 return rc;
7173}
7174/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7175static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7176{
7177 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7178 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7179 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7180 int rc = VINF_SUCCESS;
7181 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7182 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7183 {
7184 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7185 {
7186 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7187 if (RT_SUCCESS(rc))
7188 pImage->PCHSGeometry = *pPCHSGeometry;
7189 }
7190 else
7191 rc = VERR_NOT_SUPPORTED;
7192 }
7193 else
7194 rc = VERR_VD_IMAGE_READ_ONLY;
7195 LogFlowFunc(("returns %Rrc\n", rc));
7196 return rc;
7197}
7198/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7199static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7200{
7201 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7202 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7203 int rc = VINF_SUCCESS;
7204 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7205 if (pImage->LCHSGeometry.cCylinders)
7206 *pLCHSGeometry = pImage->LCHSGeometry;
7207 else
7208 rc = VERR_VD_GEOMETRY_NOT_SET;
7209 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7210 return rc;
7211}
7212/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7213static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7214{
7215 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7216 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7217 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7218 int rc = VINF_SUCCESS;
7219 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7220 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7221 {
7222 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7223 {
7224 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7225 if (RT_SUCCESS(rc))
7226 pImage->LCHSGeometry = *pLCHSGeometry;
7227 }
7228 else
7229 rc = VERR_NOT_SUPPORTED;
7230 }
7231 else
7232 rc = VERR_VD_IMAGE_READ_ONLY;
7233 LogFlowFunc(("returns %Rrc\n", rc));
7234 return rc;
7235}
7236/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7237static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7238{
7239 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7240 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7241 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7242 *ppRegionList = &pThis->RegionList;
7243 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7244 return VINF_SUCCESS;
7245}
7246/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7247static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7248{
7249 RT_NOREF1(pRegionList);
7250 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7251 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7252 AssertPtr(pThis); RT_NOREF(pThis);
7253 /* Nothing to do here. */
7254}
7255/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7256static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7257{
7258 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7259 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7260 AssertPtrReturn(pImage, 0);
7261 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7262 return pImage->uImageFlags;
7263}
7264/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7265static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7266{
7267 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7268 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7269 AssertPtrReturn(pImage, 0);
7270 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7271 return pImage->uOpenFlags;
7272}
7273/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7274static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7275{
7276 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7277 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7278 int rc;
7279 /* Image must be opened and the new flags must be valid. */
7280 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7281 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7282 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7283 rc = VERR_INVALID_PARAMETER;
7284 else
7285 {
7286 /* StreamOptimized images need special treatment: reopen is prohibited. */
7287 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7288 {
7289 if (pImage->uOpenFlags == uOpenFlags)
7290 rc = VINF_SUCCESS;
7291 else
7292 rc = VERR_INVALID_PARAMETER;
7293 }
7294 else
7295 {
7296 /* Implement this operation via reopening the image. */
7297 vmdkFreeImage(pImage, false, true /*fFlush*/);
7298 rc = vmdkOpenImage(pImage, uOpenFlags);
7299 }
7300 }
7301 LogFlowFunc(("returns %Rrc\n", rc));
7302 return rc;
7303}
7304/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7305static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7306{
7307 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7308 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7309 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7310 char *pszCommentEncoded = NULL;
7311 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7312 "ddb.comment", &pszCommentEncoded);
7313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7314 {
7315 pszCommentEncoded = NULL;
7316 rc = VINF_SUCCESS;
7317 }
7318 if (RT_SUCCESS(rc))
7319 {
7320 if (pszComment && pszCommentEncoded)
7321 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7322 else if (pszComment)
7323 *pszComment = '\0';
7324 if (pszCommentEncoded)
7325 RTMemTmpFree(pszCommentEncoded);
7326 }
7327 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7328 return rc;
7329}
7330/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7331static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7332{
7333 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7334 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7335 int rc;
7336 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7337 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7338 {
7339 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7340 rc = vmdkSetImageComment(pImage, pszComment);
7341 else
7342 rc = VERR_NOT_SUPPORTED;
7343 }
7344 else
7345 rc = VERR_VD_IMAGE_READ_ONLY;
7346 LogFlowFunc(("returns %Rrc\n", rc));
7347 return rc;
7348}
7349/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7350static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7351{
7352 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7353 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7354 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7355 *pUuid = pImage->ImageUuid;
7356 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7357 return VINF_SUCCESS;
7358}
7359/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7360static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7361{
7362 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7363 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7364 int rc = VINF_SUCCESS;
7365 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7366 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7367 {
7368 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7369 {
7370 pImage->ImageUuid = *pUuid;
7371 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7372 VMDK_DDB_IMAGE_UUID, pUuid);
7373 if (RT_FAILURE(rc))
7374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7375 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7376 }
7377 else
7378 rc = VERR_NOT_SUPPORTED;
7379 }
7380 else
7381 rc = VERR_VD_IMAGE_READ_ONLY;
7382 LogFlowFunc(("returns %Rrc\n", rc));
7383 return rc;
7384}
7385/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7386static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7387{
7388 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7389 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7390 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7391 *pUuid = pImage->ModificationUuid;
7392 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7393 return VINF_SUCCESS;
7394}
7395/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7396static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7397{
7398 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7399 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7400 int rc = VINF_SUCCESS;
7401 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7402 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7403 {
7404 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7405 {
7406 /* Only touch the modification uuid if it changed. */
7407 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7408 {
7409 pImage->ModificationUuid = *pUuid;
7410 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7411 VMDK_DDB_MODIFICATION_UUID, pUuid);
7412 if (RT_FAILURE(rc))
7413 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7414 }
7415 }
7416 else
7417 rc = VERR_NOT_SUPPORTED;
7418 }
7419 else
7420 rc = VERR_VD_IMAGE_READ_ONLY;
7421 LogFlowFunc(("returns %Rrc\n", rc));
7422 return rc;
7423}
7424/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7425static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7426{
7427 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7428 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7429 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7430 *pUuid = pImage->ParentUuid;
7431 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7432 return VINF_SUCCESS;
7433}
7434/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7435static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7436{
7437 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7438 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7439 int rc = VINF_SUCCESS;
7440 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7441 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7442 {
7443 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7444 {
7445 pImage->ParentUuid = *pUuid;
7446 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7447 VMDK_DDB_PARENT_UUID, pUuid);
7448 if (RT_FAILURE(rc))
7449 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7450 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7451 }
7452 else
7453 rc = VERR_NOT_SUPPORTED;
7454 }
7455 else
7456 rc = VERR_VD_IMAGE_READ_ONLY;
7457 LogFlowFunc(("returns %Rrc\n", rc));
7458 return rc;
7459}
7460/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7461static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7462{
7463 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7464 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7465 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7466 *pUuid = pImage->ParentModificationUuid;
7467 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7468 return VINF_SUCCESS;
7469}
7470/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7471static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7472{
7473 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7474 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7475 int rc = VINF_SUCCESS;
7476 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7477 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7478 {
7479 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7480 {
7481 pImage->ParentModificationUuid = *pUuid;
7482 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7483 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7484 if (RT_FAILURE(rc))
7485 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7486 }
7487 else
7488 rc = VERR_NOT_SUPPORTED;
7489 }
7490 else
7491 rc = VERR_VD_IMAGE_READ_ONLY;
7492 LogFlowFunc(("returns %Rrc\n", rc));
7493 return rc;
7494}
7495/** @copydoc VDIMAGEBACKEND::pfnDump */
7496static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7497{
7498 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7499 AssertPtrReturnVoid(pImage);
7500 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7501 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7502 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7503 VMDK_BYTE2SECTOR(pImage->cbSize));
7504 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7505 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7506 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7507 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7508}
7509
7510static int vmdkRepaceExtentSize(PVMDKIMAGE pImage, unsigned line, uint64_t cSectorsOld,
7511 uint64_t cSectorsNew)
7512{
7513 char * szOldExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
7514 if (!szOldExtentSectors)
7515 return VERR_NO_MEMORY;
7516
7517 int cbWritten = RTStrPrintf2(szOldExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsOld);
7518 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
7519 {
7520 RTMemFree(szOldExtentSectors);
7521 szOldExtentSectors = NULL;
7522
7523 return VERR_BUFFER_OVERFLOW;
7524 }
7525
7526 char * szNewExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
7527 if (!szNewExtentSectors)
7528 return VERR_NO_MEMORY;
7529
7530 cbWritten = RTStrPrintf2(szNewExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsNew);
7531 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
7532 {
7533 RTMemFree(szOldExtentSectors);
7534 szOldExtentSectors = NULL;
7535
7536 RTMemFree(szNewExtentSectors);
7537 szNewExtentSectors = NULL;
7538
7539 return VERR_BUFFER_OVERFLOW;
7540 }
7541
7542 char * szNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[line],
7543 szOldExtentSectors,
7544 szNewExtentSectors);
7545
7546 RTMemFree(szOldExtentSectors);
7547 szOldExtentSectors = NULL;
7548
7549 RTMemFree(szNewExtentSectors);
7550 szNewExtentSectors = NULL;
7551
7552 if (!szNewExtentLine)
7553 return VERR_INVALID_PARAMETER;
7554
7555 pImage->Descriptor.aLines[line] = szNewExtentLine;
7556
7557 return VINF_SUCCESS;
7558}
7559
7560/** @copydoc VDIMAGEBACKEND::pfnResize */
7561static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
7562 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7563 unsigned uPercentStart, unsigned uPercentSpan,
7564 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7565 PVDINTERFACE pVDIfsOperation)
7566{
7567 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
7568
7569 // Establish variables and objects needed
7570 int rc = VINF_SUCCESS;
7571 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7572 unsigned uImageFlags = pImage->uImageFlags;
7573 PVMDKEXTENT pExtent = &pImage->pExtents[0];
7574
7575 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
7576 if (cbSize % VMDK_SECTOR_SIZE)
7577 cSectorsNew++;
7578
7579 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
7580 if (pImage->cbSize % VMDK_SECTOR_SIZE)
7581 cSectorsOld++;
7582 unsigned cExtents = pImage->cExtents;
7583
7584 /* Check size is within min/max bounds. */
7585 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7586 && ( !cbSize
7587 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
7588 return VERR_VD_INVALID_SIZE;
7589
7590 /*
7591 * Making the image smaller is not supported at the moment.
7592 */
7593 /** @todo implement making the image smaller, it is the responsibility of
7594 * the user to know what he's doing. */
7595 if (cbSize < pImage->cbSize)
7596 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
7597 else if (cbSize > pImage->cbSize)
7598 {
7599 /**
7600 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
7601 */
7602 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
7603 {
7604 /** Required space in bytes for the extent after the resize. */
7605 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
7606 pExtent = &pImage->pExtents[0];
7607
7608 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
7609 0 /* fFlags */, NULL,
7610 uPercentStart, uPercentSpan);
7611 if (RT_FAILURE(rc))
7612 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7613
7614 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
7615 if (RT_FAILURE(rc))
7616 return rc;
7617 }
7618
7619 /**
7620 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
7621 */
7622 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
7623 {
7624 /* Check to see how much space remains in last extent */
7625 bool fSpaceAvailible = false;
7626 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7627 if (cLastExtentRemSectors)
7628 fSpaceAvailible = true;
7629
7630 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
7631 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
7632 {
7633 pExtent = &pImage->pExtents[cExtents - 1];
7634 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
7635 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
7636 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
7637 if (RT_FAILURE(rc))
7638 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7639
7640 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
7641 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
7642 if (RT_FAILURE(rc))
7643 return rc;
7644 }
7645 else
7646 {
7647 if (fSpaceAvailible)
7648 {
7649 pExtent = &pImage->pExtents[cExtents - 1];
7650 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
7651 0 /* fFlags */, NULL,
7652 uPercentStart, uPercentSpan);
7653 if (RT_FAILURE(rc))
7654 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7655
7656 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
7657
7658 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
7659 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
7660 if (RT_FAILURE(rc))
7661 return rc;
7662 }
7663
7664 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
7665 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
7666 cNewExtents++;
7667
7668 for (unsigned i = cExtents;
7669 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7670 i++)
7671 {
7672 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
7673 if (RT_FAILURE(rc))
7674 return rc;
7675
7676 pExtent = &pImage->pExtents[i];
7677
7678 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7679 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7680 }
7681
7682 if (cSectorsNeeded)
7683 {
7684 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
7685 if (RT_FAILURE(rc))
7686 return rc;
7687 }
7688 }
7689 }
7690
7691 /* Successful resize. Update metadata */
7692 if (RT_SUCCESS(rc))
7693 {
7694 /* Update size and new block count. */
7695 pImage->cbSize = cbSize;
7696 /** @todo r=jack: update cExtents if needed */
7697 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
7698
7699 /* Update geometry. */
7700 pImage->PCHSGeometry = *pPCHSGeometry;
7701 pImage->LCHSGeometry = *pLCHSGeometry;
7702 }
7703
7704 /* Update header information in base image file. */
7705 rc = vmdkWriteDescriptor(pImage, NULL);
7706
7707 if (RT_FAILURE(rc))
7708 return rc;
7709
7710 rc = vmdkFlushImage(pImage, NULL);
7711
7712 if (RT_FAILURE(rc))
7713 return rc;
7714 }
7715 /* Same size doesn't change the image at all. */
7716
7717 LogFlowFunc(("returns %Rrc\n", rc));
7718 return rc;
7719}
7720
7721const VDIMAGEBACKEND g_VmdkBackend =
7722{
7723 /* u32Version */
7724 VD_IMGBACKEND_VERSION,
7725 /* pszBackendName */
7726 "VMDK",
7727 /* uBackendCaps */
7728 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7729 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7730 | VD_CAP_VFS | VD_CAP_PREFERRED,
7731 /* paFileExtensions */
7732 s_aVmdkFileExtensions,
7733 /* paConfigInfo */
7734 s_aVmdkConfigInfo,
7735 /* pfnProbe */
7736 vmdkProbe,
7737 /* pfnOpen */
7738 vmdkOpen,
7739 /* pfnCreate */
7740 vmdkCreate,
7741 /* pfnRename */
7742 vmdkRename,
7743 /* pfnClose */
7744 vmdkClose,
7745 /* pfnRead */
7746 vmdkRead,
7747 /* pfnWrite */
7748 vmdkWrite,
7749 /* pfnFlush */
7750 vmdkFlush,
7751 /* pfnDiscard */
7752 NULL,
7753 /* pfnGetVersion */
7754 vmdkGetVersion,
7755 /* pfnGetFileSize */
7756 vmdkGetFileSize,
7757 /* pfnGetPCHSGeometry */
7758 vmdkGetPCHSGeometry,
7759 /* pfnSetPCHSGeometry */
7760 vmdkSetPCHSGeometry,
7761 /* pfnGetLCHSGeometry */
7762 vmdkGetLCHSGeometry,
7763 /* pfnSetLCHSGeometry */
7764 vmdkSetLCHSGeometry,
7765 /* pfnQueryRegions */
7766 vmdkQueryRegions,
7767 /* pfnRegionListRelease */
7768 vmdkRegionListRelease,
7769 /* pfnGetImageFlags */
7770 vmdkGetImageFlags,
7771 /* pfnGetOpenFlags */
7772 vmdkGetOpenFlags,
7773 /* pfnSetOpenFlags */
7774 vmdkSetOpenFlags,
7775 /* pfnGetComment */
7776 vmdkGetComment,
7777 /* pfnSetComment */
7778 vmdkSetComment,
7779 /* pfnGetUuid */
7780 vmdkGetUuid,
7781 /* pfnSetUuid */
7782 vmdkSetUuid,
7783 /* pfnGetModificationUuid */
7784 vmdkGetModificationUuid,
7785 /* pfnSetModificationUuid */
7786 vmdkSetModificationUuid,
7787 /* pfnGetParentUuid */
7788 vmdkGetParentUuid,
7789 /* pfnSetParentUuid */
7790 vmdkSetParentUuid,
7791 /* pfnGetParentModificationUuid */
7792 vmdkGetParentModificationUuid,
7793 /* pfnSetParentModificationUuid */
7794 vmdkSetParentModificationUuid,
7795 /* pfnDump */
7796 vmdkDump,
7797 /* pfnGetTimestamp */
7798 NULL,
7799 /* pfnGetParentTimestamp */
7800 NULL,
7801 /* pfnSetParentTimestamp */
7802 NULL,
7803 /* pfnGetParentFilename */
7804 NULL,
7805 /* pfnSetParentFilename */
7806 NULL,
7807 /* pfnComposeLocation */
7808 genericFileComposeLocation,
7809 /* pfnComposeName */
7810 genericFileComposeName,
7811 /* pfnCompact */
7812 NULL,
7813 /* pfnResize */
7814 vmdkResize,
7815 /* pfnRepair */
7816 NULL,
7817 /* pfnTraverseMetadata */
7818 NULL,
7819 /* u32VersionEnd */
7820 VD_IMGBACKEND_VERSION
7821};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette