VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 99813

Last change on this file since 99813 was 99739, checked in by vboxsync, 19 months ago

*: doxygen corrections (mostly about removing @returns from functions returning void).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 379.2 KB
Line 
1/* $Id: VMDK.cpp 99739 2023-05-11 01:01:08Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_VMDK
33#include <VBox/log.h> /* before VBox/vd-ifs.h */
34#include <VBox/vd-plugin.h>
35#include <VBox/err.h>
36
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/base64.h>
40#include <iprt/ctype.h>
41#include <iprt/crc.h>
42#include <iprt/dvm.h>
43#include <iprt/uuid.h>
44#include <iprt/path.h>
45#include <iprt/rand.h>
46#include <iprt/string.h>
47#include <iprt/sort.h>
48#include <iprt/zip.h>
49#include <iprt/asm.h>
50#ifdef RT_OS_WINDOWS
51# include <iprt/utf16.h>
52# include <iprt/uni.h>
53# include <iprt/uni.h>
54# include <iprt/nt/nt-and-windows.h>
55# include <winioctl.h>
56#endif
57#ifdef RT_OS_LINUX
58# include <errno.h>
59# include <sys/stat.h>
60# include <iprt/dir.h>
61# include <iprt/symlink.h>
62# include <iprt/linux/sysfs.h>
63#endif
64#ifdef RT_OS_FREEBSD
65#include <libgeom.h>
66#include <sys/stat.h>
67#include <stdlib.h>
68#endif
69#ifdef RT_OS_SOLARIS
70#include <sys/dkio.h>
71#include <sys/vtoc.h>
72#include <sys/efi_partition.h>
73#include <unistd.h>
74#include <errno.h>
75#endif
76#ifdef RT_OS_DARWIN
77# include <sys/stat.h>
78# include <sys/disk.h>
79# include <errno.h>
80/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
81 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
82 While we could try include the header from the Kernel.framework, it's a lot
83 easier to just add the structure and 4 defines here. */
84typedef struct
85{
86 uint64_t offset;
87 uint64_t length;
88 uint8_t reserved0128[12];
89 dev_t dev;
90} dk_physical_extent_t;
91# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
92# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
93# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
94# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
95#endif /* RT_OS_DARWIN */
96
97#include "VDBackends.h"
98
99
100/*********************************************************************************************************************************
101* Constants And Macros, Structures and Typedefs *
102*********************************************************************************************************************************/
103
104/** Maximum encoded string size (including NUL) we allow for VMDK images.
105 * Deliberately not set high to avoid running out of descriptor space. */
106#define VMDK_ENCODED_COMMENT_MAX 1024
107
108/** VMDK descriptor DDB entry for PCHS cylinders. */
109#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
110
111/** VMDK descriptor DDB entry for PCHS heads. */
112#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
113
114/** VMDK descriptor DDB entry for PCHS sectors. */
115#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
116
117/** VMDK descriptor DDB entry for LCHS cylinders. */
118#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
119
120/** VMDK descriptor DDB entry for LCHS heads. */
121#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
122
123/** VMDK descriptor DDB entry for LCHS sectors. */
124#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
125
126/** VMDK descriptor DDB entry for image UUID. */
127#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
128
129/** VMDK descriptor DDB entry for image modification UUID. */
130#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
131
132/** VMDK descriptor DDB entry for parent image UUID. */
133#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
134
135/** VMDK descriptor DDB entry for parent image modification UUID. */
136#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
137
138/** No compression for streamOptimized files. */
139#define VMDK_COMPRESSION_NONE 0
140
141/** Deflate compression for streamOptimized files. */
142#define VMDK_COMPRESSION_DEFLATE 1
143
144/** Marker that the actual GD value is stored in the footer. */
145#define VMDK_GD_AT_END 0xffffffffffffffffULL
146
147/** Marker for end-of-stream in streamOptimized images. */
148#define VMDK_MARKER_EOS 0
149
150/** Marker for grain table block in streamOptimized images. */
151#define VMDK_MARKER_GT 1
152
153/** Marker for grain directory block in streamOptimized images. */
154#define VMDK_MARKER_GD 2
155
156/** Marker for footer in streamOptimized images. */
157#define VMDK_MARKER_FOOTER 3
158
159/** Marker for unknown purpose in streamOptimized images.
160 * Shows up in very recent images created by vSphere, but only sporadically.
161 * They "forgot" to document that one in the VMDK specification. */
162#define VMDK_MARKER_UNSPECIFIED 4
163
164/** Dummy marker for "don't check the marker value". */
165#define VMDK_MARKER_IGNORE 0xffffffffU
166
167/**
168 * Magic number for hosted images created by VMware Workstation 4, VMware
169 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
170 */
171#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
172
173/** VMDK sector size in bytes. */
174#define VMDK_SECTOR_SIZE 512
175/** Max string buffer size for uint64_t with null term */
176#define UINT64_MAX_BUFF_SIZE 21
177/** Grain directory entry size in bytes */
178#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
179/** Grain table size in bytes */
180#define VMDK_GRAIN_TABLE_SIZE 2048
181
182/**
183 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
184 * this header is also used for monolithic flat images.
185 */
186#pragma pack(1)
187typedef struct SparseExtentHeader
188{
189 uint32_t magicNumber;
190 uint32_t version;
191 uint32_t flags;
192 uint64_t capacity;
193 uint64_t grainSize;
194 uint64_t descriptorOffset;
195 uint64_t descriptorSize;
196 uint32_t numGTEsPerGT;
197 uint64_t rgdOffset;
198 uint64_t gdOffset;
199 uint64_t overHead;
200 bool uncleanShutdown;
201 char singleEndLineChar;
202 char nonEndLineChar;
203 char doubleEndLineChar1;
204 char doubleEndLineChar2;
205 uint16_t compressAlgorithm;
206 uint8_t pad[433];
207} SparseExtentHeader;
208#pragma pack()
209
210/** The maximum allowed descriptor size in the extent header in sectors. */
211#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
212
213/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
214 * divisible by the default grain size (64K) */
215#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
216
217/** VMDK streamOptimized file format marker. The type field may or may not
218 * be actually valid, but there's always data to read there. */
219#pragma pack(1)
220typedef struct VMDKMARKER
221{
222 uint64_t uSector;
223 uint32_t cbSize;
224 uint32_t uType;
225} VMDKMARKER, *PVMDKMARKER;
226#pragma pack()
227
228
229/** Convert sector number/size to byte offset/size. */
230#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
231
232/** Convert byte offset/size to sector number/size. */
233#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
234
235/**
236 * VMDK extent type.
237 */
238typedef enum VMDKETYPE
239{
240 /** Hosted sparse extent. */
241 VMDKETYPE_HOSTED_SPARSE = 1,
242 /** Flat extent. */
243 VMDKETYPE_FLAT,
244 /** Zero extent. */
245 VMDKETYPE_ZERO,
246 /** VMFS extent, used by ESX. */
247 VMDKETYPE_VMFS
248} VMDKETYPE, *PVMDKETYPE;
249
250/**
251 * VMDK access type for a extent.
252 */
253typedef enum VMDKACCESS
254{
255 /** No access allowed. */
256 VMDKACCESS_NOACCESS = 0,
257 /** Read-only access. */
258 VMDKACCESS_READONLY,
259 /** Read-write access. */
260 VMDKACCESS_READWRITE
261} VMDKACCESS, *PVMDKACCESS;
262
263/** Forward declaration for PVMDKIMAGE. */
264typedef struct VMDKIMAGE *PVMDKIMAGE;
265
266/**
267 * Extents files entry. Used for opening a particular file only once.
268 */
269typedef struct VMDKFILE
270{
271 /** Pointer to file path. Local copy. */
272 const char *pszFilename;
273 /** Pointer to base name. Local copy. */
274 const char *pszBasename;
275 /** File open flags for consistency checking. */
276 unsigned fOpen;
277 /** Handle for sync/async file abstraction.*/
278 PVDIOSTORAGE pStorage;
279 /** Reference counter. */
280 unsigned uReferences;
281 /** Flag whether the file should be deleted on last close. */
282 bool fDelete;
283 /** Pointer to the image we belong to (for debugging purposes). */
284 PVMDKIMAGE pImage;
285 /** Pointer to next file descriptor. */
286 struct VMDKFILE *pNext;
287 /** Pointer to the previous file descriptor. */
288 struct VMDKFILE *pPrev;
289} VMDKFILE, *PVMDKFILE;
290
291/**
292 * VMDK extent data structure.
293 */
294typedef struct VMDKEXTENT
295{
296 /** File handle. */
297 PVMDKFILE pFile;
298 /** Base name of the image extent. */
299 const char *pszBasename;
300 /** Full name of the image extent. */
301 const char *pszFullname;
302 /** Number of sectors in this extent. */
303 uint64_t cSectors;
304 /** Number of sectors per block (grain in VMDK speak). */
305 uint64_t cSectorsPerGrain;
306 /** Starting sector number of descriptor. */
307 uint64_t uDescriptorSector;
308 /** Size of descriptor in sectors. */
309 uint64_t cDescriptorSectors;
310 /** Starting sector number of grain directory. */
311 uint64_t uSectorGD;
312 /** Starting sector number of redundant grain directory. */
313 uint64_t uSectorRGD;
314 /** Total number of metadata sectors. */
315 uint64_t cOverheadSectors;
316 /** Nominal size (i.e. as described by the descriptor) of this extent. */
317 uint64_t cNominalSectors;
318 /** Sector offset (i.e. as described by the descriptor) of this extent. */
319 uint64_t uSectorOffset;
320 /** Number of entries in a grain table. */
321 uint32_t cGTEntries;
322 /** Number of sectors reachable via a grain directory entry. */
323 uint32_t cSectorsPerGDE;
324 /** Number of entries in the grain directory. */
325 uint32_t cGDEntries;
326 /** Pointer to the next free sector. Legacy information. Do not use. */
327 uint32_t uFreeSector;
328 /** Number of this extent in the list of images. */
329 uint32_t uExtent;
330 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
331 char *pDescData;
332 /** Pointer to the grain directory. */
333 uint32_t *pGD;
334 /** Pointer to the redundant grain directory. */
335 uint32_t *pRGD;
336 /** VMDK version of this extent. 1=1.0/1.1 */
337 uint32_t uVersion;
338 /** Type of this extent. */
339 VMDKETYPE enmType;
340 /** Access to this extent. */
341 VMDKACCESS enmAccess;
342 /** Flag whether this extent is marked as unclean. */
343 bool fUncleanShutdown;
344 /** Flag whether the metadata in the extent header needs to be updated. */
345 bool fMetaDirty;
346 /** Flag whether there is a footer in this extent. */
347 bool fFooter;
348 /** Compression type for this extent. */
349 uint16_t uCompression;
350 /** Append position for writing new grain. Only for sparse extents. */
351 uint64_t uAppendPosition;
352 /** Last grain which was accessed. Only for streamOptimized extents. */
353 uint32_t uLastGrainAccess;
354 /** Starting sector corresponding to the grain buffer. */
355 uint32_t uGrainSectorAbs;
356 /** Grain number corresponding to the grain buffer. */
357 uint32_t uGrain;
358 /** Actual size of the compressed data, only valid for reading. */
359 uint32_t cbGrainStreamRead;
360 /** Size of compressed grain buffer for streamOptimized extents. */
361 size_t cbCompGrain;
362 /** Compressed grain buffer for streamOptimized extents, with marker. */
363 void *pvCompGrain;
364 /** Decompressed grain buffer for streamOptimized extents. */
365 void *pvGrain;
366 /** Reference to the image in which this extent is used. Do not use this
367 * on a regular basis to avoid passing pImage references to functions
368 * explicitly. */
369 struct VMDKIMAGE *pImage;
370} VMDKEXTENT, *PVMDKEXTENT;
371
372/**
373 * Grain table cache size. Allocated per image.
374 */
375#define VMDK_GT_CACHE_SIZE 256
376
377/**
378 * Grain table block size. Smaller than an actual grain table block to allow
379 * more grain table blocks to be cached without having to allocate excessive
380 * amounts of memory for the cache.
381 */
382#define VMDK_GT_CACHELINE_SIZE 128
383
384
385/**
386 * Maximum number of lines in a descriptor file. Not worth the effort of
387 * making it variable. Descriptor files are generally very short (~20 lines),
388 * with the exception of sparse files split in 2G chunks, which need for the
389 * maximum size (almost 2T) exactly 1025 lines for the disk database.
390 */
391#define VMDK_DESCRIPTOR_LINES_MAX 1100U
392
393/**
394 * Parsed descriptor information. Allows easy access and update of the
395 * descriptor (whether separate file or not). Free form text files suck.
396 */
397typedef struct VMDKDESCRIPTOR
398{
399 /** Line number of first entry of the disk descriptor. */
400 unsigned uFirstDesc;
401 /** Line number of first entry in the extent description. */
402 unsigned uFirstExtent;
403 /** Line number of first disk database entry. */
404 unsigned uFirstDDB;
405 /** Total number of lines. */
406 unsigned cLines;
407 /** Total amount of memory available for the descriptor. */
408 size_t cbDescAlloc;
409 /** Set if descriptor has been changed and not yet written to disk. */
410 bool fDirty;
411 /** Array of pointers to the data in the descriptor. */
412 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
413 /** Array of line indices pointing to the next non-comment line. */
414 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
415} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
416
417
418/**
419 * Cache entry for translating extent/sector to a sector number in that
420 * extent.
421 */
422typedef struct VMDKGTCACHEENTRY
423{
424 /** Extent number for which this entry is valid. */
425 uint32_t uExtent;
426 /** GT data block number. */
427 uint64_t uGTBlock;
428 /** Data part of the cache entry. */
429 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
430} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
431
432/**
433 * Cache data structure for blocks of grain table entries. For now this is a
434 * fixed size direct mapping cache, but this should be adapted to the size of
435 * the sparse image and maybe converted to a set-associative cache. The
436 * implementation below implements a write-through cache with write allocate.
437 */
438typedef struct VMDKGTCACHE
439{
440 /** Cache entries. */
441 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
442 /** Number of cache entries (currently unused). */
443 unsigned cEntries;
444} VMDKGTCACHE, *PVMDKGTCACHE;
445
446/**
447 * Complete VMDK image data structure. Mainly a collection of extents and a few
448 * extra global data fields.
449 */
450typedef struct VMDKIMAGE
451{
452 /** Image name. */
453 const char *pszFilename;
454 /** Descriptor file if applicable. */
455 PVMDKFILE pFile;
456
457 /** Pointer to the per-disk VD interface list. */
458 PVDINTERFACE pVDIfsDisk;
459 /** Pointer to the per-image VD interface list. */
460 PVDINTERFACE pVDIfsImage;
461
462 /** Error interface. */
463 PVDINTERFACEERROR pIfError;
464 /** I/O interface. */
465 PVDINTERFACEIOINT pIfIo;
466
467
468 /** Pointer to the image extents. */
469 PVMDKEXTENT pExtents;
470 /** Number of image extents. */
471 unsigned cExtents;
472 /** Pointer to the files list, for opening a file referenced multiple
473 * times only once (happens mainly with raw partition access). */
474 PVMDKFILE pFiles;
475
476 /**
477 * Pointer to an array of segment entries for async I/O.
478 * This is an optimization because the task number to submit is not known
479 * and allocating/freeing an array in the read/write functions every time
480 * is too expensive.
481 */
482 PPDMDATASEG paSegments;
483 /** Entries available in the segments array. */
484 unsigned cSegments;
485
486 /** Open flags passed by VBoxHD layer. */
487 unsigned uOpenFlags;
488 /** Image flags defined during creation or determined during open. */
489 unsigned uImageFlags;
490 /** Total size of the image. */
491 uint64_t cbSize;
492 /** Physical geometry of this image. */
493 VDGEOMETRY PCHSGeometry;
494 /** Logical geometry of this image. */
495 VDGEOMETRY LCHSGeometry;
496 /** Image UUID. */
497 RTUUID ImageUuid;
498 /** Image modification UUID. */
499 RTUUID ModificationUuid;
500 /** Parent image UUID. */
501 RTUUID ParentUuid;
502 /** Parent image modification UUID. */
503 RTUUID ParentModificationUuid;
504
505 /** Pointer to grain table cache, if this image contains sparse extents. */
506 PVMDKGTCACHE pGTCache;
507 /** Pointer to the descriptor (NULL if no separate descriptor file). */
508 char *pDescData;
509 /** Allocation size of the descriptor file. */
510 size_t cbDescAlloc;
511 /** Parsed descriptor file content. */
512 VMDKDESCRIPTOR Descriptor;
513 /** The static region list. */
514 VDREGIONLIST RegionList;
515} VMDKIMAGE;
516
517
518/** State for the input/output callout of the inflate reader/deflate writer. */
519typedef struct VMDKCOMPRESSIO
520{
521 /* Image this operation relates to. */
522 PVMDKIMAGE pImage;
523 /* Current read position. */
524 ssize_t iOffset;
525 /* Size of the compressed grain buffer (available data). */
526 size_t cbCompGrain;
527 /* Pointer to the compressed grain buffer. */
528 void *pvCompGrain;
529} VMDKCOMPRESSIO;
530
531
532/** Tracks async grain allocation. */
533typedef struct VMDKGRAINALLOCASYNC
534{
535 /** Flag whether the allocation failed. */
536 bool fIoErr;
537 /** Current number of transfers pending.
538 * If reached 0 and there is an error the old state is restored. */
539 unsigned cIoXfersPending;
540 /** Sector number */
541 uint64_t uSector;
542 /** Flag whether the grain table needs to be updated. */
543 bool fGTUpdateNeeded;
544 /** Extent the allocation happens. */
545 PVMDKEXTENT pExtent;
546 /** Position of the new grain, required for the grain table update. */
547 uint64_t uGrainOffset;
548 /** Grain table sector. */
549 uint64_t uGTSector;
550 /** Backup grain table sector. */
551 uint64_t uRGTSector;
552} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
553
554/**
555 * State information for vmdkRename() and helpers.
556 */
557typedef struct VMDKRENAMESTATE
558{
559 /** Array of old filenames. */
560 char **apszOldName;
561 /** Array of new filenames. */
562 char **apszNewName;
563 /** Array of new lines in the extent descriptor. */
564 char **apszNewLines;
565 /** Name of the old descriptor file if not a sparse image. */
566 char *pszOldDescName;
567 /** Flag whether we called vmdkFreeImage(). */
568 bool fImageFreed;
569 /** Flag whther the descriptor is embedded in the image (sparse) or
570 * in a separate file. */
571 bool fEmbeddedDesc;
572 /** Number of extents in the image. */
573 unsigned cExtents;
574 /** New base filename. */
575 char *pszNewBaseName;
576 /** The old base filename. */
577 char *pszOldBaseName;
578 /** New full filename. */
579 char *pszNewFullName;
580 /** Old full filename. */
581 char *pszOldFullName;
582 /** The old image name. */
583 const char *pszOldImageName;
584 /** Copy of the original VMDK descriptor. */
585 VMDKDESCRIPTOR DescriptorCopy;
586 /** Copy of the extent state for sparse images. */
587 VMDKEXTENT ExtentCopy;
588} VMDKRENAMESTATE;
589/** Pointer to a VMDK rename state. */
590typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
591
592
593/*********************************************************************************************************************************
594* Static Variables *
595*********************************************************************************************************************************/
596
597/** NULL-terminated array of supported file extensions. */
598static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
599{
600 {"vmdk", VDTYPE_HDD},
601 {NULL, VDTYPE_INVALID}
602};
603
604/** NULL-terminated array of configuration option. */
605static const VDCONFIGINFO s_aVmdkConfigInfo[] =
606{
607 /* Options for VMDK raw disks */
608 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
609 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
610 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
611 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
612
613 /* End of options list */
614 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
615};
616
617
618/*********************************************************************************************************************************
619* Internal Functions *
620*********************************************************************************************************************************/
621
622static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
623static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
624 bool fDelete);
625
626static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
627static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
628static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
629static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
630
631static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
632 void *pvUser, int rcReq);
633
634/**
635 * Internal: open a file (using a file descriptor cache to ensure each file
636 * is only opened once - anything else can cause locking problems).
637 */
638static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
639 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
640{
641 int rc = VINF_SUCCESS;
642 PVMDKFILE pVmdkFile;
643
644 for (pVmdkFile = pImage->pFiles;
645 pVmdkFile != NULL;
646 pVmdkFile = pVmdkFile->pNext)
647 {
648 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
649 {
650 Assert(fOpen == pVmdkFile->fOpen);
651 pVmdkFile->uReferences++;
652
653 *ppVmdkFile = pVmdkFile;
654
655 return rc;
656 }
657 }
658
659 /* If we get here, there's no matching entry in the cache. */
660 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
661 if (!pVmdkFile)
662 {
663 *ppVmdkFile = NULL;
664 return VERR_NO_MEMORY;
665 }
666
667 pVmdkFile->pszFilename = RTStrDup(pszFilename);
668 if (!pVmdkFile->pszFilename)
669 {
670 RTMemFree(pVmdkFile);
671 *ppVmdkFile = NULL;
672 return VERR_NO_MEMORY;
673 }
674
675 if (pszBasename)
676 {
677 pVmdkFile->pszBasename = RTStrDup(pszBasename);
678 if (!pVmdkFile->pszBasename)
679 {
680 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
681 RTMemFree(pVmdkFile);
682 *ppVmdkFile = NULL;
683 return VERR_NO_MEMORY;
684 }
685 }
686
687 pVmdkFile->fOpen = fOpen;
688
689 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
690 &pVmdkFile->pStorage);
691 if (RT_SUCCESS(rc))
692 {
693 pVmdkFile->uReferences = 1;
694 pVmdkFile->pImage = pImage;
695 pVmdkFile->pNext = pImage->pFiles;
696 if (pImage->pFiles)
697 pImage->pFiles->pPrev = pVmdkFile;
698 pImage->pFiles = pVmdkFile;
699 *ppVmdkFile = pVmdkFile;
700 }
701 else
702 {
703 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
704 RTMemFree(pVmdkFile);
705 *ppVmdkFile = NULL;
706 }
707
708 return rc;
709}
710
711/**
712 * Internal: close a file, updating the file descriptor cache.
713 */
714static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
715{
716 int rc = VINF_SUCCESS;
717 PVMDKFILE pVmdkFile = *ppVmdkFile;
718
719 AssertPtr(pVmdkFile);
720
721 pVmdkFile->fDelete |= fDelete;
722 Assert(pVmdkFile->uReferences);
723 pVmdkFile->uReferences--;
724 if (pVmdkFile->uReferences == 0)
725 {
726 PVMDKFILE pPrev;
727 PVMDKFILE pNext;
728
729 /* Unchain the element from the list. */
730 pPrev = pVmdkFile->pPrev;
731 pNext = pVmdkFile->pNext;
732
733 if (pNext)
734 pNext->pPrev = pPrev;
735 if (pPrev)
736 pPrev->pNext = pNext;
737 else
738 pImage->pFiles = pNext;
739
740 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
741
742 bool fFileDel = pVmdkFile->fDelete;
743 if ( pVmdkFile->pszBasename
744 && fFileDel)
745 {
746 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
747 if ( RTPathHasPath(pVmdkFile->pszBasename)
748 || !pszSuffix
749 || ( strcmp(pszSuffix, ".vmdk")
750 && strcmp(pszSuffix, ".bin")
751 && strcmp(pszSuffix, ".img")))
752 fFileDel = false;
753 }
754
755 if (fFileDel)
756 {
757 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
758 if (RT_SUCCESS(rc))
759 rc = rc2;
760 }
761 else if (pVmdkFile->fDelete)
762 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
763 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
764 if (pVmdkFile->pszBasename)
765 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
766 RTMemFree(pVmdkFile);
767 }
768
769 *ppVmdkFile = NULL;
770 return rc;
771}
772
773/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
774#ifndef VMDK_USE_BLOCK_DECOMP_API
775static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
776{
777 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
778 size_t cbInjected = 0;
779
780 Assert(cbBuf);
781 if (pInflateState->iOffset < 0)
782 {
783 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
784 pvBuf = (uint8_t *)pvBuf + 1;
785 cbBuf--;
786 cbInjected = 1;
787 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
788 }
789 if (!cbBuf)
790 {
791 if (pcbBuf)
792 *pcbBuf = cbInjected;
793 return VINF_SUCCESS;
794 }
795 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
796 memcpy(pvBuf,
797 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
798 cbBuf);
799 pInflateState->iOffset += cbBuf;
800 Assert(pcbBuf);
801 *pcbBuf = cbBuf + cbInjected;
802 return VINF_SUCCESS;
803}
804#endif
805
806/**
807 * Internal: read from a file and inflate the compressed data,
808 * distinguishing between async and normal operation
809 */
810DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
811 uint64_t uOffset, void *pvBuf,
812 size_t cbToRead, const void *pcvMarker,
813 uint64_t *puLBA, uint32_t *pcbMarkerData)
814{
815 int rc;
816#ifndef VMDK_USE_BLOCK_DECOMP_API
817 PRTZIPDECOMP pZip = NULL;
818#endif
819 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
820 size_t cbCompSize, cbActuallyRead;
821
822 if (!pcvMarker)
823 {
824 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
825 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
826 if (RT_FAILURE(rc))
827 return rc;
828 }
829 else
830 {
831 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
832 /* pcvMarker endianness has already been partially transformed, fix it */
833 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
834 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
835 }
836
837 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
838 if (cbCompSize == 0)
839 {
840 AssertMsgFailed(("VMDK: corrupted marker\n"));
841 return VERR_VD_VMDK_INVALID_FORMAT;
842 }
843
844 /* Sanity check - the expansion ratio should be much less than 2. */
845 Assert(cbCompSize < 2 * cbToRead);
846 if (cbCompSize >= 2 * cbToRead)
847 return VERR_VD_VMDK_INVALID_FORMAT;
848
849 /* Compressed grain marker. Data follows immediately. */
850 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
851 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
852 (uint8_t *)pExtent->pvCompGrain
853 + RT_UOFFSETOF(VMDKMARKER, uType),
854 RT_ALIGN_Z( cbCompSize
855 + RT_UOFFSETOF(VMDKMARKER, uType),
856 512)
857 - RT_UOFFSETOF(VMDKMARKER, uType));
858
859 if (puLBA)
860 *puLBA = RT_LE2H_U64(pMarker->uSector);
861 if (pcbMarkerData)
862 *pcbMarkerData = RT_ALIGN( cbCompSize
863 + RT_UOFFSETOF(VMDKMARKER, uType),
864 512);
865
866#ifdef VMDK_USE_BLOCK_DECOMP_API
867 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
868 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
869 pvBuf, cbToRead, &cbActuallyRead);
870#else
871 VMDKCOMPRESSIO InflateState;
872 InflateState.pImage = pImage;
873 InflateState.iOffset = -1;
874 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
875 InflateState.pvCompGrain = pExtent->pvCompGrain;
876
877 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
878 if (RT_FAILURE(rc))
879 return rc;
880 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
881 RTZipDecompDestroy(pZip);
882#endif /* !VMDK_USE_BLOCK_DECOMP_API */
883 if (RT_FAILURE(rc))
884 {
885 if (rc == VERR_ZIP_CORRUPTED)
886 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
887 return rc;
888 }
889 if (cbActuallyRead != cbToRead)
890 rc = VERR_VD_VMDK_INVALID_FORMAT;
891 return rc;
892}
893
894static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
895{
896 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
897
898 Assert(cbBuf);
899 if (pDeflateState->iOffset < 0)
900 {
901 pvBuf = (const uint8_t *)pvBuf + 1;
902 cbBuf--;
903 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
904 }
905 if (!cbBuf)
906 return VINF_SUCCESS;
907 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
908 return VERR_BUFFER_OVERFLOW;
909 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
910 pvBuf, cbBuf);
911 pDeflateState->iOffset += cbBuf;
912 return VINF_SUCCESS;
913}
914
915/**
916 * Internal: deflate the uncompressed data and write to a file,
917 * distinguishing between async and normal operation
918 */
919DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
920 uint64_t uOffset, const void *pvBuf,
921 size_t cbToWrite, uint64_t uLBA,
922 uint32_t *pcbMarkerData)
923{
924 int rc;
925 PRTZIPCOMP pZip = NULL;
926 VMDKCOMPRESSIO DeflateState;
927
928 DeflateState.pImage = pImage;
929 DeflateState.iOffset = -1;
930 DeflateState.cbCompGrain = pExtent->cbCompGrain;
931 DeflateState.pvCompGrain = pExtent->pvCompGrain;
932
933 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
934 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
935 if (RT_FAILURE(rc))
936 return rc;
937 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
938 if (RT_SUCCESS(rc))
939 rc = RTZipCompFinish(pZip);
940 RTZipCompDestroy(pZip);
941 if (RT_SUCCESS(rc))
942 {
943 Assert( DeflateState.iOffset > 0
944 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
945
946 /* pad with zeroes to get to a full sector size */
947 uint32_t uSize = DeflateState.iOffset;
948 if (uSize % 512)
949 {
950 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
951 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
952 uSizeAlign - uSize);
953 uSize = uSizeAlign;
954 }
955
956 if (pcbMarkerData)
957 *pcbMarkerData = uSize;
958
959 /* Compressed grain marker. Data follows immediately. */
960 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
961 pMarker->uSector = RT_H2LE_U64(uLBA);
962 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
963 - RT_UOFFSETOF(VMDKMARKER, uType));
964 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
965 uOffset, pMarker, uSize);
966 if (RT_FAILURE(rc))
967 return rc;
968 }
969 return rc;
970}
971
972
973/**
974 * Internal: check if all files are closed, prevent leaking resources.
975 */
976static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
977{
978 int rc = VINF_SUCCESS, rc2;
979 PVMDKFILE pVmdkFile;
980
981 Assert(pImage->pFiles == NULL);
982 for (pVmdkFile = pImage->pFiles;
983 pVmdkFile != NULL;
984 pVmdkFile = pVmdkFile->pNext)
985 {
986 LogRel(("VMDK: leaking reference to file \"%s\"\n",
987 pVmdkFile->pszFilename));
988 pImage->pFiles = pVmdkFile->pNext;
989
990 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
991
992 if (RT_SUCCESS(rc))
993 rc = rc2;
994 }
995 return rc;
996}
997
998/**
999 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1000 * critical non-ASCII characters.
1001 */
1002static char *vmdkEncodeString(const char *psz)
1003{
1004 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1005 char *pszDst = szEnc;
1006
1007 AssertPtr(psz);
1008
1009 for (; *psz; psz = RTStrNextCp(psz))
1010 {
1011 char *pszDstPrev = pszDst;
1012 RTUNICP Cp = RTStrGetCp(psz);
1013 if (Cp == '\\')
1014 {
1015 pszDst = RTStrPutCp(pszDst, Cp);
1016 pszDst = RTStrPutCp(pszDst, Cp);
1017 }
1018 else if (Cp == '\n')
1019 {
1020 pszDst = RTStrPutCp(pszDst, '\\');
1021 pszDst = RTStrPutCp(pszDst, 'n');
1022 }
1023 else if (Cp == '\r')
1024 {
1025 pszDst = RTStrPutCp(pszDst, '\\');
1026 pszDst = RTStrPutCp(pszDst, 'r');
1027 }
1028 else
1029 pszDst = RTStrPutCp(pszDst, Cp);
1030 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1031 {
1032 pszDst = pszDstPrev;
1033 break;
1034 }
1035 }
1036 *pszDst = '\0';
1037 return RTStrDup(szEnc);
1038}
1039
1040/**
1041 * Internal: decode a string and store it into the specified string.
1042 */
1043static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1044{
1045 int rc = VINF_SUCCESS;
1046 char szBuf[4];
1047
1048 if (!cb)
1049 return VERR_BUFFER_OVERFLOW;
1050
1051 AssertPtr(psz);
1052
1053 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1054 {
1055 char *pszDst = szBuf;
1056 RTUNICP Cp = RTStrGetCp(pszEncoded);
1057 if (Cp == '\\')
1058 {
1059 pszEncoded = RTStrNextCp(pszEncoded);
1060 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1061 if (CpQ == 'n')
1062 RTStrPutCp(pszDst, '\n');
1063 else if (CpQ == 'r')
1064 RTStrPutCp(pszDst, '\r');
1065 else if (CpQ == '\0')
1066 {
1067 rc = VERR_VD_VMDK_INVALID_HEADER;
1068 break;
1069 }
1070 else
1071 RTStrPutCp(pszDst, CpQ);
1072 }
1073 else
1074 pszDst = RTStrPutCp(pszDst, Cp);
1075
1076 /* Need to leave space for terminating NUL. */
1077 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1078 {
1079 rc = VERR_BUFFER_OVERFLOW;
1080 break;
1081 }
1082 memcpy(psz, szBuf, pszDst - szBuf);
1083 psz += pszDst - szBuf;
1084 }
1085 *psz = '\0';
1086 return rc;
1087}
1088
1089/**
1090 * Internal: free all buffers associated with grain directories.
1091 */
1092static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1093{
1094 if (pExtent->pGD)
1095 {
1096 RTMemFree(pExtent->pGD);
1097 pExtent->pGD = NULL;
1098 }
1099 if (pExtent->pRGD)
1100 {
1101 RTMemFree(pExtent->pRGD);
1102 pExtent->pRGD = NULL;
1103 }
1104}
1105
1106/**
1107 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1108 * images.
1109 */
1110static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1111{
1112 int rc = VINF_SUCCESS;
1113
1114 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1115 {
1116 /* streamOptimized extents need a compressed grain buffer, which must
1117 * be big enough to hold uncompressible data (which needs ~8 bytes
1118 * more than the uncompressed data), the marker and padding. */
1119 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1120 + 8 + sizeof(VMDKMARKER), 512);
1121 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1122 if (RT_LIKELY(pExtent->pvCompGrain))
1123 {
1124 /* streamOptimized extents need a decompressed grain buffer. */
1125 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1126 if (!pExtent->pvGrain)
1127 rc = VERR_NO_MEMORY;
1128 }
1129 else
1130 rc = VERR_NO_MEMORY;
1131 }
1132
1133 if (RT_FAILURE(rc))
1134 vmdkFreeStreamBuffers(pExtent);
1135 return rc;
1136}
1137
1138/**
1139 * Internal: allocate all buffers associated with grain directories.
1140 */
1141static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1142{
1143 RT_NOREF1(pImage);
1144 int rc = VINF_SUCCESS;
1145 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1146
1147 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1148 if (RT_LIKELY(pExtent->pGD))
1149 {
1150 if (pExtent->uSectorRGD)
1151 {
1152 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1153 if (RT_UNLIKELY(!pExtent->pRGD))
1154 rc = VERR_NO_MEMORY;
1155 }
1156 }
1157 else
1158 rc = VERR_NO_MEMORY;
1159
1160 if (RT_FAILURE(rc))
1161 vmdkFreeGrainDirectory(pExtent);
1162 return rc;
1163}
1164
1165/**
1166 * Converts the grain directory from little to host endianess.
1167 *
1168 * @param pGD The grain directory.
1169 * @param cGDEntries Number of entries in the grain directory to convert.
1170 */
1171DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1172{
1173 uint32_t *pGDTmp = pGD;
1174
1175 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1176 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1177}
1178
1179/**
1180 * Read the grain directory and allocated grain tables verifying them against
1181 * their back up copies if available.
1182 *
1183 * @returns VBox status code.
1184 * @param pImage Image instance data.
1185 * @param pExtent The VMDK extent.
1186 */
1187static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1188{
1189 int rc = VINF_SUCCESS;
1190 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1191
1192 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1193 && pExtent->uSectorGD != VMDK_GD_AT_END
1194 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1195
1196 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1197 if (RT_SUCCESS(rc))
1198 {
1199 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1200 * but in reality they are not compressed. */
1201 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1202 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1203 pExtent->pGD, cbGD);
1204 if (RT_SUCCESS(rc))
1205 {
1206 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1207
1208 if ( pExtent->uSectorRGD
1209 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1210 {
1211 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1212 * but in reality they are not compressed. */
1213 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1214 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1215 pExtent->pRGD, cbGD);
1216 if (RT_SUCCESS(rc))
1217 {
1218 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1219
1220 /* Check grain table and redundant grain table for consistency. */
1221 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1222 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1223 size_t cbGTBuffersMax = _1M;
1224
1225 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1226 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1227
1228 if ( !pTmpGT1
1229 || !pTmpGT2)
1230 rc = VERR_NO_MEMORY;
1231
1232 size_t i = 0;
1233 uint32_t *pGDTmp = pExtent->pGD;
1234 uint32_t *pRGDTmp = pExtent->pRGD;
1235
1236 /* Loop through all entries. */
1237 while (i < pExtent->cGDEntries)
1238 {
1239 uint32_t uGTStart = *pGDTmp;
1240 uint32_t uRGTStart = *pRGDTmp;
1241 size_t cbGTRead = cbGT;
1242
1243 /* If no grain table is allocated skip the entry. */
1244 if (*pGDTmp == 0 && *pRGDTmp == 0)
1245 {
1246 i++;
1247 continue;
1248 }
1249
1250 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1251 {
1252 /* Just one grain directory entry refers to a not yet allocated
1253 * grain table or both grain directory copies refer to the same
1254 * grain table. Not allowed. */
1255 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1256 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1257 break;
1258 }
1259
1260 i++;
1261 pGDTmp++;
1262 pRGDTmp++;
1263
1264 /*
1265 * Read a few tables at once if adjacent to decrease the number
1266 * of I/O requests. Read at maximum 1MB at once.
1267 */
1268 while ( i < pExtent->cGDEntries
1269 && cbGTRead < cbGTBuffersMax)
1270 {
1271 /* If no grain table is allocated skip the entry. */
1272 if (*pGDTmp == 0 && *pRGDTmp == 0)
1273 {
1274 i++;
1275 continue;
1276 }
1277
1278 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1279 {
1280 /* Just one grain directory entry refers to a not yet allocated
1281 * grain table or both grain directory copies refer to the same
1282 * grain table. Not allowed. */
1283 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1284 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1285 break;
1286 }
1287
1288 /* Check that the start offsets are adjacent.*/
1289 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1290 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1291 break;
1292
1293 i++;
1294 pGDTmp++;
1295 pRGDTmp++;
1296 cbGTRead += cbGT;
1297 }
1298
1299 /* Increase buffers if required. */
1300 if ( RT_SUCCESS(rc)
1301 && cbGTBuffers < cbGTRead)
1302 {
1303 uint32_t *pTmp;
1304 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1305 if (pTmp)
1306 {
1307 pTmpGT1 = pTmp;
1308 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1309 if (pTmp)
1310 pTmpGT2 = pTmp;
1311 else
1312 rc = VERR_NO_MEMORY;
1313 }
1314 else
1315 rc = VERR_NO_MEMORY;
1316
1317 if (rc == VERR_NO_MEMORY)
1318 {
1319 /* Reset to the old values. */
1320 rc = VINF_SUCCESS;
1321 i -= cbGTRead / cbGT;
1322 cbGTRead = cbGT;
1323
1324 /* Don't try to increase the buffer again in the next run. */
1325 cbGTBuffersMax = cbGTBuffers;
1326 }
1327 }
1328
1329 if (RT_SUCCESS(rc))
1330 {
1331 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1332 * but in reality they are not compressed. */
1333 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1334 VMDK_SECTOR2BYTE(uGTStart),
1335 pTmpGT1, cbGTRead);
1336 if (RT_FAILURE(rc))
1337 {
1338 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1339 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1340 break;
1341 }
1342 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1343 * but in reality they are not compressed. */
1344 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1345 VMDK_SECTOR2BYTE(uRGTStart),
1346 pTmpGT2, cbGTRead);
1347 if (RT_FAILURE(rc))
1348 {
1349 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1350 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1351 break;
1352 }
1353 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1354 {
1355 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1356 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1357 break;
1358 }
1359 }
1360 } /* while (i < pExtent->cGDEntries) */
1361
1362 /** @todo figure out what to do for unclean VMDKs. */
1363 if (pTmpGT1)
1364 RTMemFree(pTmpGT1);
1365 if (pTmpGT2)
1366 RTMemFree(pTmpGT2);
1367 }
1368 else
1369 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1370 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1371 }
1372 }
1373 else
1374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1375 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1376 }
1377
1378 if (RT_FAILURE(rc))
1379 vmdkFreeGrainDirectory(pExtent);
1380 return rc;
1381}
1382
1383/**
1384 * Creates a new grain directory for the given extent at the given start sector.
1385 *
1386 * @returns VBox status code.
1387 * @param pImage Image instance data.
1388 * @param pExtent The VMDK extent.
1389 * @param uStartSector Where the grain directory should be stored in the image.
1390 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1391 */
1392static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1393 uint64_t uStartSector, bool fPreAlloc)
1394{
1395 int rc = VINF_SUCCESS;
1396 unsigned i;
1397 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1398 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1399 size_t cbGTRounded;
1400 uint64_t cbOverhead;
1401
1402 if (fPreAlloc)
1403 {
1404 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1405 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1406 }
1407 else
1408 {
1409 /* Use a dummy start sector for layout computation. */
1410 if (uStartSector == VMDK_GD_AT_END)
1411 uStartSector = 1;
1412 cbGTRounded = 0;
1413 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1414 }
1415
1416 /* For streamOptimized extents there is only one grain directory,
1417 * and for all others take redundant grain directory into account. */
1418 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1419 {
1420 cbOverhead = RT_ALIGN_64(cbOverhead,
1421 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1422 }
1423 else
1424 {
1425 cbOverhead += cbGDRounded + cbGTRounded;
1426 cbOverhead = RT_ALIGN_64(cbOverhead,
1427 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1428 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1429 }
1430
1431 if (RT_SUCCESS(rc))
1432 {
1433 pExtent->uAppendPosition = cbOverhead;
1434 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1435
1436 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1437 {
1438 pExtent->uSectorRGD = 0;
1439 pExtent->uSectorGD = uStartSector;
1440 }
1441 else
1442 {
1443 pExtent->uSectorRGD = uStartSector;
1444 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1445 }
1446
1447 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1448 if (RT_SUCCESS(rc))
1449 {
1450 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1451 if ( RT_SUCCESS(rc)
1452 && fPreAlloc)
1453 {
1454 uint32_t uGTSectorLE;
1455 uint64_t uOffsetSectors;
1456
1457 if (pExtent->pRGD)
1458 {
1459 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1460 for (i = 0; i < pExtent->cGDEntries; i++)
1461 {
1462 pExtent->pRGD[i] = uOffsetSectors;
1463 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1464 /* Write the redundant grain directory entry to disk. */
1465 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1466 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1467 &uGTSectorLE, sizeof(uGTSectorLE));
1468 if (RT_FAILURE(rc))
1469 {
1470 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1471 break;
1472 }
1473 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1474 }
1475 }
1476
1477 if (RT_SUCCESS(rc))
1478 {
1479 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1480 for (i = 0; i < pExtent->cGDEntries; i++)
1481 {
1482 pExtent->pGD[i] = uOffsetSectors;
1483 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1484 /* Write the grain directory entry to disk. */
1485 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1486 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1487 &uGTSectorLE, sizeof(uGTSectorLE));
1488 if (RT_FAILURE(rc))
1489 {
1490 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1491 break;
1492 }
1493 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1494 }
1495 }
1496 }
1497 }
1498 }
1499
1500 if (RT_FAILURE(rc))
1501 vmdkFreeGrainDirectory(pExtent);
1502 return rc;
1503}
1504
1505/**
1506 * Unquotes the given string returning the result in a separate buffer.
1507 *
1508 * @returns VBox status code.
1509 * @param pImage The VMDK image state.
1510 * @param pszStr The string to unquote.
1511 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1512 * free.
1513 * @param ppszNext Where to store the pointer to any character following
1514 * the quoted value, optional.
1515 */
1516static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1517 char **ppszUnquoted, char **ppszNext)
1518{
1519 const char *pszStart = pszStr;
1520 char *pszQ;
1521 char *pszUnquoted;
1522
1523 /* Skip over whitespace. */
1524 while (*pszStr == ' ' || *pszStr == '\t')
1525 pszStr++;
1526
1527 if (*pszStr != '"')
1528 {
1529 pszQ = (char *)pszStr;
1530 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1531 pszQ++;
1532 }
1533 else
1534 {
1535 pszStr++;
1536 pszQ = (char *)strchr(pszStr, '"');
1537 if (pszQ == NULL)
1538 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1539 pImage->pszFilename, pszStart);
1540 }
1541
1542 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1543 if (!pszUnquoted)
1544 return VERR_NO_MEMORY;
1545 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1546 pszUnquoted[pszQ - pszStr] = '\0';
1547 *ppszUnquoted = pszUnquoted;
1548 if (ppszNext)
1549 *ppszNext = pszQ + 1;
1550 return VINF_SUCCESS;
1551}
1552
1553static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1554 const char *pszLine)
1555{
1556 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1557 ssize_t cbDiff = strlen(pszLine) + 1;
1558
1559 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1560 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1561 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1562
1563 memcpy(pEnd, pszLine, cbDiff);
1564 pDescriptor->cLines++;
1565 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1566 pDescriptor->fDirty = true;
1567
1568 return VINF_SUCCESS;
1569}
1570
1571static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1572 const char *pszKey, const char **ppszValue)
1573{
1574 size_t cbKey = strlen(pszKey);
1575 const char *pszValue;
1576
1577 while (uStart != 0)
1578 {
1579 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1580 {
1581 /* Key matches, check for a '=' (preceded by whitespace). */
1582 pszValue = pDescriptor->aLines[uStart] + cbKey;
1583 while (*pszValue == ' ' || *pszValue == '\t')
1584 pszValue++;
1585 if (*pszValue == '=')
1586 {
1587 *ppszValue = pszValue + 1;
1588 break;
1589 }
1590 }
1591 uStart = pDescriptor->aNextLines[uStart];
1592 }
1593 return !!uStart;
1594}
1595
1596static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1597 unsigned uStart,
1598 const char *pszKey, const char *pszValue)
1599{
1600 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1601 size_t cbKey = strlen(pszKey);
1602 unsigned uLast = 0;
1603
1604 while (uStart != 0)
1605 {
1606 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1607 {
1608 /* Key matches, check for a '=' (preceded by whitespace). */
1609 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1610 while (*pszTmp == ' ' || *pszTmp == '\t')
1611 pszTmp++;
1612 if (*pszTmp == '=')
1613 {
1614 pszTmp++;
1615 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1616 * bloat and potentially out of space error? */
1617 while (*pszTmp == ' ' || *pszTmp == '\t')
1618 pszTmp++;
1619 break;
1620 }
1621 }
1622 if (!pDescriptor->aNextLines[uStart])
1623 uLast = uStart;
1624 uStart = pDescriptor->aNextLines[uStart];
1625 }
1626 if (uStart)
1627 {
1628 if (pszValue)
1629 {
1630 /* Key already exists, replace existing value. */
1631 size_t cbOldVal = strlen(pszTmp);
1632 size_t cbNewVal = strlen(pszValue);
1633 ssize_t cbDiff = cbNewVal - cbOldVal;
1634 /* Check for buffer overflow. */
1635 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1636 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1637 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1638
1639 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1640 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1641 memcpy(pszTmp, pszValue, cbNewVal + 1);
1642 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1643 pDescriptor->aLines[i] += cbDiff;
1644 }
1645 else
1646 {
1647 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1648 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1649 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1650 {
1651 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1652 if (pDescriptor->aNextLines[i])
1653 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1654 else
1655 pDescriptor->aNextLines[i-1] = 0;
1656 }
1657 pDescriptor->cLines--;
1658 /* Adjust starting line numbers of following descriptor sections. */
1659 if (uStart < pDescriptor->uFirstExtent)
1660 pDescriptor->uFirstExtent--;
1661 if (uStart < pDescriptor->uFirstDDB)
1662 pDescriptor->uFirstDDB--;
1663 }
1664 }
1665 else
1666 {
1667 /* Key doesn't exist, append after the last entry in this category. */
1668 if (!pszValue)
1669 {
1670 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1671 return VINF_SUCCESS;
1672 }
1673 cbKey = strlen(pszKey);
1674 size_t cbValue = strlen(pszValue);
1675 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1676 /* Check for buffer overflow. */
1677 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1678 || ( pDescriptor->aLines[pDescriptor->cLines]
1679 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1680 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1681 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1682 {
1683 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1684 if (pDescriptor->aNextLines[i - 1])
1685 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1686 else
1687 pDescriptor->aNextLines[i] = 0;
1688 }
1689 uStart = uLast + 1;
1690 pDescriptor->aNextLines[uLast] = uStart;
1691 pDescriptor->aNextLines[uStart] = 0;
1692 pDescriptor->cLines++;
1693 pszTmp = pDescriptor->aLines[uStart];
1694 memmove(pszTmp + cbDiff, pszTmp,
1695 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1696 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1697 pDescriptor->aLines[uStart][cbKey] = '=';
1698 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1699 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1700 pDescriptor->aLines[i] += cbDiff;
1701
1702 /* Adjust starting line numbers of following descriptor sections. */
1703 if (uStart <= pDescriptor->uFirstExtent)
1704 pDescriptor->uFirstExtent++;
1705 if (uStart <= pDescriptor->uFirstDDB)
1706 pDescriptor->uFirstDDB++;
1707 }
1708 pDescriptor->fDirty = true;
1709 return VINF_SUCCESS;
1710}
1711
1712static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1713 uint32_t *puValue)
1714{
1715 const char *pszValue;
1716
1717 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1718 &pszValue))
1719 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1720 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1721}
1722
1723/**
1724 * Returns the value of the given key as a string allocating the necessary memory.
1725 *
1726 * @returns VBox status code.
1727 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1728 * @param pImage The VMDK image state.
1729 * @param pDescriptor The descriptor to fetch the value from.
1730 * @param pszKey The key to get the value from.
1731 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1732 * free.
1733 */
1734static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1735 const char *pszKey, char **ppszValue)
1736{
1737 const char *pszValue;
1738 char *pszValueUnquoted;
1739
1740 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1741 &pszValue))
1742 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1743 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1744 if (RT_FAILURE(rc))
1745 return rc;
1746 *ppszValue = pszValueUnquoted;
1747 return rc;
1748}
1749
1750static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1751 const char *pszKey, const char *pszValue)
1752{
1753 char *pszValueQuoted;
1754
1755 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1756 if (!pszValueQuoted)
1757 return VERR_NO_STR_MEMORY;
1758 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1759 pszValueQuoted);
1760 RTStrFree(pszValueQuoted);
1761 return rc;
1762}
1763
1764static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1765 PVMDKDESCRIPTOR pDescriptor)
1766{
1767 RT_NOREF1(pImage);
1768 unsigned uEntry = pDescriptor->uFirstExtent;
1769 ssize_t cbDiff;
1770
1771 if (!uEntry)
1772 return;
1773
1774 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1775 /* Move everything including \0 in the entry marking the end of buffer. */
1776 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1777 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1778 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1779 {
1780 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1781 if (pDescriptor->aNextLines[i])
1782 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1783 else
1784 pDescriptor->aNextLines[i - 1] = 0;
1785 }
1786 pDescriptor->cLines--;
1787 if (pDescriptor->uFirstDDB)
1788 pDescriptor->uFirstDDB--;
1789
1790 return;
1791}
1792
1793static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage,
1794 PVMDKDESCRIPTOR pDescriptor, unsigned uLine)
1795{
1796 RT_NOREF1(pImage);
1797 unsigned uEntry = uLine;
1798 ssize_t cbDiff;
1799 if (!uEntry)
1800 return;
1801 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1802 /* Move everything including \0 in the entry marking the end of buffer. */
1803 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1804 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1805 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++)
1806 {
1807 if (i != uEntry)
1808 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1809 if (pDescriptor->aNextLines[i])
1810 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1811 else
1812 pDescriptor->aNextLines[i - 1] = 0;
1813 }
1814 pDescriptor->cLines--;
1815 if (pDescriptor->uFirstDDB)
1816 pDescriptor->uFirstDDB--;
1817 return;
1818}
1819
1820static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1821 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1822 VMDKETYPE enmType, const char *pszBasename,
1823 uint64_t uSectorOffset)
1824{
1825 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1826 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1827 char *pszTmp;
1828 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1829 char szExt[1024];
1830 ssize_t cbDiff;
1831
1832 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1833 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1834
1835 /* Find last entry in extent description. */
1836 while (uStart)
1837 {
1838 if (!pDescriptor->aNextLines[uStart])
1839 uLast = uStart;
1840 uStart = pDescriptor->aNextLines[uStart];
1841 }
1842
1843 if (enmType == VMDKETYPE_ZERO)
1844 {
1845 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1846 cNominalSectors, apszType[enmType]);
1847 }
1848 else if (enmType == VMDKETYPE_FLAT)
1849 {
1850 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1851 apszAccess[enmAccess], cNominalSectors,
1852 apszType[enmType], pszBasename, uSectorOffset);
1853 }
1854 else
1855 {
1856 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1857 apszAccess[enmAccess], cNominalSectors,
1858 apszType[enmType], pszBasename);
1859 }
1860 cbDiff = strlen(szExt) + 1;
1861
1862 /* Check for buffer overflow. */
1863 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1864 || ( pDescriptor->aLines[pDescriptor->cLines]
1865 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1866 {
1867 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
1868 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1))
1869 {
1870 pImage->cbDescAlloc *= 2;
1871 pDescriptor->cbDescAlloc *= 2;
1872 }
1873 else
1874 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1875 }
1876
1877 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1878 {
1879 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1880 if (pDescriptor->aNextLines[i - 1])
1881 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1882 else
1883 pDescriptor->aNextLines[i] = 0;
1884 }
1885 uStart = uLast + 1;
1886 pDescriptor->aNextLines[uLast] = uStart;
1887 pDescriptor->aNextLines[uStart] = 0;
1888 pDescriptor->cLines++;
1889 pszTmp = pDescriptor->aLines[uStart];
1890 memmove(pszTmp + cbDiff, pszTmp,
1891 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1892 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1893 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1894 pDescriptor->aLines[i] += cbDiff;
1895
1896 /* Adjust starting line numbers of following descriptor sections. */
1897 if (uStart <= pDescriptor->uFirstDDB)
1898 pDescriptor->uFirstDDB++;
1899
1900 pDescriptor->fDirty = true;
1901 return VINF_SUCCESS;
1902}
1903
1904/**
1905 * Returns the value of the given key from the DDB as a string allocating
1906 * the necessary memory.
1907 *
1908 * @returns VBox status code.
1909 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1910 * @param pImage The VMDK image state.
1911 * @param pDescriptor The descriptor to fetch the value from.
1912 * @param pszKey The key to get the value from.
1913 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1914 * free.
1915 */
1916static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1917 const char *pszKey, char **ppszValue)
1918{
1919 const char *pszValue;
1920 char *pszValueUnquoted;
1921
1922 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1923 &pszValue))
1924 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1925 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1926 if (RT_FAILURE(rc))
1927 return rc;
1928 *ppszValue = pszValueUnquoted;
1929 return rc;
1930}
1931
1932static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1933 const char *pszKey, uint32_t *puValue)
1934{
1935 const char *pszValue;
1936 char *pszValueUnquoted;
1937
1938 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1939 &pszValue))
1940 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1941 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1942 if (RT_FAILURE(rc))
1943 return rc;
1944 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1945 RTMemTmpFree(pszValueUnquoted);
1946 return rc;
1947}
1948
1949static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1950 const char *pszKey, PRTUUID pUuid)
1951{
1952 const char *pszValue;
1953 char *pszValueUnquoted;
1954
1955 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1956 &pszValue))
1957 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1958 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1959 if (RT_FAILURE(rc))
1960 return rc;
1961 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1962 RTMemTmpFree(pszValueUnquoted);
1963 return rc;
1964}
1965
1966static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1967 const char *pszKey, const char *pszVal)
1968{
1969 int rc;
1970 char *pszValQuoted;
1971
1972 if (pszVal)
1973 {
1974 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1975 if (!pszValQuoted)
1976 return VERR_NO_STR_MEMORY;
1977 }
1978 else
1979 pszValQuoted = NULL;
1980 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1981 pszValQuoted);
1982 if (pszValQuoted)
1983 RTStrFree(pszValQuoted);
1984 return rc;
1985}
1986
1987static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1988 const char *pszKey, PCRTUUID pUuid)
1989{
1990 char *pszUuid;
1991
1992 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1993 if (!pszUuid)
1994 return VERR_NO_STR_MEMORY;
1995 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1996 pszUuid);
1997 RTStrFree(pszUuid);
1998 return rc;
1999}
2000
2001static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
2002 const char *pszKey, uint32_t uValue)
2003{
2004 char *pszValue;
2005
2006 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
2007 if (!pszValue)
2008 return VERR_NO_STR_MEMORY;
2009 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
2010 pszValue);
2011 RTStrFree(pszValue);
2012 return rc;
2013}
2014
2015/**
2016 * Splits the descriptor data into individual lines checking for correct line
2017 * endings and descriptor size.
2018 *
2019 * @returns VBox status code.
2020 * @param pImage The image instance.
2021 * @param pDesc The descriptor.
2022 * @param pszTmp The raw descriptor data from the image.
2023 */
2024static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
2025{
2026 unsigned cLine = 0;
2027 int rc = VINF_SUCCESS;
2028
2029 while ( RT_SUCCESS(rc)
2030 && *pszTmp != '\0')
2031 {
2032 pDesc->aLines[cLine++] = pszTmp;
2033 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
2034 {
2035 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2036 rc = VERR_VD_VMDK_INVALID_HEADER;
2037 break;
2038 }
2039
2040 while (*pszTmp != '\0' && *pszTmp != '\n')
2041 {
2042 if (*pszTmp == '\r')
2043 {
2044 if (*(pszTmp + 1) != '\n')
2045 {
2046 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2047 break;
2048 }
2049 else
2050 {
2051 /* Get rid of CR character. */
2052 *pszTmp = '\0';
2053 }
2054 }
2055 pszTmp++;
2056 }
2057
2058 if (RT_FAILURE(rc))
2059 break;
2060
2061 /* Get rid of LF character. */
2062 if (*pszTmp == '\n')
2063 {
2064 *pszTmp = '\0';
2065 pszTmp++;
2066 }
2067 }
2068
2069 if (RT_SUCCESS(rc))
2070 {
2071 pDesc->cLines = cLine;
2072 /* Pointer right after the end of the used part of the buffer. */
2073 pDesc->aLines[cLine] = pszTmp;
2074 }
2075
2076 return rc;
2077}
2078
2079static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
2080 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2081{
2082 pDescriptor->cbDescAlloc = cbDescData;
2083 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
2084 if (RT_SUCCESS(rc))
2085 {
2086 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2087 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2088 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2089 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2090 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2091 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2092 else
2093 {
2094 unsigned uLastNonEmptyLine = 0;
2095
2096 /* Initialize those, because we need to be able to reopen an image. */
2097 pDescriptor->uFirstDesc = 0;
2098 pDescriptor->uFirstExtent = 0;
2099 pDescriptor->uFirstDDB = 0;
2100 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2101 {
2102 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2103 {
2104 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2105 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2106 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2107 {
2108 /* An extent descriptor. */
2109 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2110 {
2111 /* Incorrect ordering of entries. */
2112 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2113 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2114 break;
2115 }
2116 if (!pDescriptor->uFirstExtent)
2117 {
2118 pDescriptor->uFirstExtent = i;
2119 uLastNonEmptyLine = 0;
2120 }
2121 }
2122 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2123 {
2124 /* A disk database entry. */
2125 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2126 {
2127 /* Incorrect ordering of entries. */
2128 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2129 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2130 break;
2131 }
2132 if (!pDescriptor->uFirstDDB)
2133 {
2134 pDescriptor->uFirstDDB = i;
2135 uLastNonEmptyLine = 0;
2136 }
2137 }
2138 else
2139 {
2140 /* A normal entry. */
2141 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2142 {
2143 /* Incorrect ordering of entries. */
2144 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2145 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2146 break;
2147 }
2148 if (!pDescriptor->uFirstDesc)
2149 {
2150 pDescriptor->uFirstDesc = i;
2151 uLastNonEmptyLine = 0;
2152 }
2153 }
2154 if (uLastNonEmptyLine)
2155 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2156 uLastNonEmptyLine = i;
2157 }
2158 }
2159 }
2160 }
2161
2162 return rc;
2163}
2164
2165static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2166 PCVDGEOMETRY pPCHSGeometry)
2167{
2168 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2169 VMDK_DDB_GEO_PCHS_CYLINDERS,
2170 pPCHSGeometry->cCylinders);
2171 if (RT_FAILURE(rc))
2172 return rc;
2173 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2174 VMDK_DDB_GEO_PCHS_HEADS,
2175 pPCHSGeometry->cHeads);
2176 if (RT_FAILURE(rc))
2177 return rc;
2178 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2179 VMDK_DDB_GEO_PCHS_SECTORS,
2180 pPCHSGeometry->cSectors);
2181 return rc;
2182}
2183
2184static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2185 PCVDGEOMETRY pLCHSGeometry)
2186{
2187 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2188 VMDK_DDB_GEO_LCHS_CYLINDERS,
2189 pLCHSGeometry->cCylinders);
2190 if (RT_FAILURE(rc))
2191 return rc;
2192 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2193 VMDK_DDB_GEO_LCHS_HEADS,
2194
2195 pLCHSGeometry->cHeads);
2196 if (RT_FAILURE(rc))
2197 return rc;
2198 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2199 VMDK_DDB_GEO_LCHS_SECTORS,
2200 pLCHSGeometry->cSectors);
2201 return rc;
2202}
2203
2204static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2205 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2206{
2207 pDescriptor->uFirstDesc = 0;
2208 pDescriptor->uFirstExtent = 0;
2209 pDescriptor->uFirstDDB = 0;
2210 pDescriptor->cLines = 0;
2211 pDescriptor->cbDescAlloc = cbDescData;
2212 pDescriptor->fDirty = false;
2213 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2214 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2215
2216 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2217 if (RT_SUCCESS(rc))
2218 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2219 if (RT_SUCCESS(rc))
2220 {
2221 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2222 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2223 }
2224 if (RT_SUCCESS(rc))
2225 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2226 if (RT_SUCCESS(rc))
2227 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2228 if (RT_SUCCESS(rc))
2229 {
2230 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2231 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2232 }
2233 if (RT_SUCCESS(rc))
2234 {
2235 /* The trailing space is created by VMware, too. */
2236 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2237 }
2238 if (RT_SUCCESS(rc))
2239 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2240 if (RT_SUCCESS(rc))
2241 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2242 if (RT_SUCCESS(rc))
2243 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2244 if (RT_SUCCESS(rc))
2245 {
2246 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2247
2248 /* Now that the framework is in place, use the normal functions to insert
2249 * the remaining keys. */
2250 char szBuf[9];
2251 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2252 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2253 "CID", szBuf);
2254 }
2255 if (RT_SUCCESS(rc))
2256 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2257 "parentCID", "ffffffff");
2258 if (RT_SUCCESS(rc))
2259 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2260
2261 return rc;
2262}
2263
2264static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2265{
2266 int rc;
2267 unsigned cExtents;
2268 unsigned uLine;
2269 unsigned i;
2270
2271 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2272 &pImage->Descriptor);
2273 if (RT_FAILURE(rc))
2274 return rc;
2275
2276 /* Check version, must be 1. */
2277 uint32_t uVersion;
2278 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2279 if (RT_FAILURE(rc))
2280 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2281 if (uVersion != 1)
2282 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2283
2284 /* Get image creation type and determine image flags. */
2285 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2286 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2287 &pszCreateType);
2288 if (RT_FAILURE(rc))
2289 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2290 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2291 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2292 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2293 else if ( !strcmp(pszCreateType, "partitionedDevice")
2294 || !strcmp(pszCreateType, "fullDevice"))
2295 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2296 else if (!strcmp(pszCreateType, "streamOptimized"))
2297 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2298 else if (!strcmp(pszCreateType, "vmfs"))
2299 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2300 RTMemTmpFree(pszCreateType);
2301
2302 /* Count the number of extent config entries. */
2303 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2304 uLine != 0;
2305 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2306 /* nothing */;
2307
2308 if (!pImage->pDescData && cExtents != 1)
2309 {
2310 /* Monolithic image, must have only one extent (already opened). */
2311 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2312 }
2313
2314 if (pImage->pDescData)
2315 {
2316 /* Non-monolithic image, extents need to be allocated. */
2317 rc = vmdkCreateExtents(pImage, cExtents);
2318 if (RT_FAILURE(rc))
2319 return rc;
2320 }
2321
2322 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2323 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2324 {
2325 char *pszLine = pImage->Descriptor.aLines[uLine];
2326
2327 /* Access type of the extent. */
2328 if (!strncmp(pszLine, "RW", 2))
2329 {
2330 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2331 pszLine += 2;
2332 }
2333 else if (!strncmp(pszLine, "RDONLY", 6))
2334 {
2335 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2336 pszLine += 6;
2337 }
2338 else if (!strncmp(pszLine, "NOACCESS", 8))
2339 {
2340 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2341 pszLine += 8;
2342 }
2343 else
2344 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2345 if (*pszLine++ != ' ')
2346 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2347
2348 /* Nominal size of the extent. */
2349 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2350 &pImage->pExtents[i].cNominalSectors);
2351 if (RT_FAILURE(rc))
2352 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2353 if (*pszLine++ != ' ')
2354 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2355
2356 /* Type of the extent. */
2357 if (!strncmp(pszLine, "SPARSE", 6))
2358 {
2359 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2360 pszLine += 6;
2361 }
2362 else if (!strncmp(pszLine, "FLAT", 4))
2363 {
2364 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2365 pszLine += 4;
2366 }
2367 else if (!strncmp(pszLine, "ZERO", 4))
2368 {
2369 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2370 pszLine += 4;
2371 }
2372 else if (!strncmp(pszLine, "VMFS", 4))
2373 {
2374 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2375 pszLine += 4;
2376 }
2377 else
2378 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2379
2380 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2381 {
2382 /* This one has no basename or offset. */
2383 if (*pszLine == ' ')
2384 pszLine++;
2385 if (*pszLine != '\0')
2386 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2387 pImage->pExtents[i].pszBasename = NULL;
2388 }
2389 else
2390 {
2391 /* All other extent types have basename and optional offset. */
2392 if (*pszLine++ != ' ')
2393 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2394
2395 /* Basename of the image. Surrounded by quotes. */
2396 char *pszBasename;
2397 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2398 if (RT_FAILURE(rc))
2399 return rc;
2400 pImage->pExtents[i].pszBasename = pszBasename;
2401 if (*pszLine == ' ')
2402 {
2403 pszLine++;
2404 if (*pszLine != '\0')
2405 {
2406 /* Optional offset in extent specified. */
2407 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2408 &pImage->pExtents[i].uSectorOffset);
2409 if (RT_FAILURE(rc))
2410 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2411 }
2412 }
2413
2414 if (*pszLine != '\0')
2415 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2416 }
2417 }
2418
2419 /* Determine PCHS geometry (autogenerate if necessary). */
2420 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2421 VMDK_DDB_GEO_PCHS_CYLINDERS,
2422 &pImage->PCHSGeometry.cCylinders);
2423 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2424 pImage->PCHSGeometry.cCylinders = 0;
2425 else if (RT_FAILURE(rc))
2426 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2427 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2428 VMDK_DDB_GEO_PCHS_HEADS,
2429 &pImage->PCHSGeometry.cHeads);
2430 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2431 pImage->PCHSGeometry.cHeads = 0;
2432 else if (RT_FAILURE(rc))
2433 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2434 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2435 VMDK_DDB_GEO_PCHS_SECTORS,
2436 &pImage->PCHSGeometry.cSectors);
2437 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2438 pImage->PCHSGeometry.cSectors = 0;
2439 else if (RT_FAILURE(rc))
2440 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2441 if ( pImage->PCHSGeometry.cCylinders == 0
2442 || pImage->PCHSGeometry.cHeads == 0
2443 || pImage->PCHSGeometry.cHeads > 16
2444 || pImage->PCHSGeometry.cSectors == 0
2445 || pImage->PCHSGeometry.cSectors > 63)
2446 {
2447 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2448 * as the total image size isn't known yet). */
2449 pImage->PCHSGeometry.cCylinders = 0;
2450 pImage->PCHSGeometry.cHeads = 16;
2451 pImage->PCHSGeometry.cSectors = 63;
2452 }
2453
2454 /* Determine LCHS geometry (set to 0 if not specified). */
2455 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2456 VMDK_DDB_GEO_LCHS_CYLINDERS,
2457 &pImage->LCHSGeometry.cCylinders);
2458 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2459 pImage->LCHSGeometry.cCylinders = 0;
2460 else if (RT_FAILURE(rc))
2461 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2462 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2463 VMDK_DDB_GEO_LCHS_HEADS,
2464 &pImage->LCHSGeometry.cHeads);
2465 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2466 pImage->LCHSGeometry.cHeads = 0;
2467 else if (RT_FAILURE(rc))
2468 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2469 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2470 VMDK_DDB_GEO_LCHS_SECTORS,
2471 &pImage->LCHSGeometry.cSectors);
2472 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2473 pImage->LCHSGeometry.cSectors = 0;
2474 else if (RT_FAILURE(rc))
2475 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2476 if ( pImage->LCHSGeometry.cCylinders == 0
2477 || pImage->LCHSGeometry.cHeads == 0
2478 || pImage->LCHSGeometry.cSectors == 0)
2479 {
2480 pImage->LCHSGeometry.cCylinders = 0;
2481 pImage->LCHSGeometry.cHeads = 0;
2482 pImage->LCHSGeometry.cSectors = 0;
2483 }
2484
2485 /* Get image UUID. */
2486 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2487 &pImage->ImageUuid);
2488 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2489 {
2490 /* Image without UUID. Probably created by VMware and not yet used
2491 * by VirtualBox. Can only be added for images opened in read/write
2492 * mode, so don't bother producing a sensible UUID otherwise. */
2493 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2494 RTUuidClear(&pImage->ImageUuid);
2495 else
2496 {
2497 rc = RTUuidCreate(&pImage->ImageUuid);
2498 if (RT_FAILURE(rc))
2499 return rc;
2500 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2501 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2502 if (RT_FAILURE(rc))
2503 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2504 }
2505 }
2506 else if (RT_FAILURE(rc))
2507 return rc;
2508
2509 /* Get image modification UUID. */
2510 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2511 VMDK_DDB_MODIFICATION_UUID,
2512 &pImage->ModificationUuid);
2513 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2514 {
2515 /* Image without UUID. Probably created by VMware and not yet used
2516 * by VirtualBox. Can only be added for images opened in read/write
2517 * mode, so don't bother producing a sensible UUID otherwise. */
2518 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2519 RTUuidClear(&pImage->ModificationUuid);
2520 else
2521 {
2522 rc = RTUuidCreate(&pImage->ModificationUuid);
2523 if (RT_FAILURE(rc))
2524 return rc;
2525 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2526 VMDK_DDB_MODIFICATION_UUID,
2527 &pImage->ModificationUuid);
2528 if (RT_FAILURE(rc))
2529 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2530 }
2531 }
2532 else if (RT_FAILURE(rc))
2533 return rc;
2534
2535 /* Get UUID of parent image. */
2536 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2537 &pImage->ParentUuid);
2538 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2539 {
2540 /* Image without UUID. Probably created by VMware and not yet used
2541 * by VirtualBox. Can only be added for images opened in read/write
2542 * mode, so don't bother producing a sensible UUID otherwise. */
2543 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2544 RTUuidClear(&pImage->ParentUuid);
2545 else
2546 {
2547 rc = RTUuidClear(&pImage->ParentUuid);
2548 if (RT_FAILURE(rc))
2549 return rc;
2550 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2551 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2552 if (RT_FAILURE(rc))
2553 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2554 }
2555 }
2556 else if (RT_FAILURE(rc))
2557 return rc;
2558
2559 /* Get parent image modification UUID. */
2560 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2561 VMDK_DDB_PARENT_MODIFICATION_UUID,
2562 &pImage->ParentModificationUuid);
2563 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2564 {
2565 /* Image without UUID. Probably created by VMware and not yet used
2566 * by VirtualBox. Can only be added for images opened in read/write
2567 * mode, so don't bother producing a sensible UUID otherwise. */
2568 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2569 RTUuidClear(&pImage->ParentModificationUuid);
2570 else
2571 {
2572 RTUuidClear(&pImage->ParentModificationUuid);
2573 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2574 VMDK_DDB_PARENT_MODIFICATION_UUID,
2575 &pImage->ParentModificationUuid);
2576 if (RT_FAILURE(rc))
2577 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2578 }
2579 }
2580 else if (RT_FAILURE(rc))
2581 return rc;
2582
2583 return VINF_SUCCESS;
2584}
2585
2586/**
2587 * Internal : Prepares the descriptor to write to the image.
2588 */
2589static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2590 void **ppvData, size_t *pcbData)
2591{
2592 int rc = VINF_SUCCESS;
2593
2594 /*
2595 * Allocate temporary descriptor buffer.
2596 * In case there is no limit allocate a default
2597 * and increase if required.
2598 */
2599 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2600 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2601 size_t offDescriptor = 0;
2602
2603 if (!pszDescriptor)
2604 return VERR_NO_MEMORY;
2605
2606 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2607 {
2608 const char *psz = pImage->Descriptor.aLines[i];
2609 size_t cb = strlen(psz);
2610
2611 /*
2612 * Increase the descriptor if there is no limit and
2613 * there is not enough room left for this line.
2614 */
2615 if (offDescriptor + cb + 1 > cbDescriptor)
2616 {
2617 if (cbLimit)
2618 {
2619 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2620 break;
2621 }
2622 else
2623 {
2624 char *pszDescriptorNew = NULL;
2625 LogFlow(("Increasing descriptor cache\n"));
2626
2627 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2628 if (!pszDescriptorNew)
2629 {
2630 rc = VERR_NO_MEMORY;
2631 break;
2632 }
2633 pszDescriptor = pszDescriptorNew;
2634 cbDescriptor += cb + 4 * _1K;
2635 }
2636 }
2637
2638 if (cb > 0)
2639 {
2640 memcpy(pszDescriptor + offDescriptor, psz, cb);
2641 offDescriptor += cb;
2642 }
2643
2644 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2645 offDescriptor++;
2646 }
2647
2648 if (RT_SUCCESS(rc))
2649 {
2650 *ppvData = pszDescriptor;
2651 *pcbData = offDescriptor;
2652 }
2653 else if (pszDescriptor)
2654 RTMemFree(pszDescriptor);
2655
2656 return rc;
2657}
2658
2659/**
2660 * Internal: write/update the descriptor part of the image.
2661 */
2662static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2663{
2664 int rc = VINF_SUCCESS;
2665 uint64_t cbLimit;
2666 uint64_t uOffset;
2667 PVMDKFILE pDescFile;
2668 void *pvDescriptor = NULL;
2669 size_t cbDescriptor;
2670
2671 if (pImage->pDescData)
2672 {
2673 /* Separate descriptor file. */
2674 uOffset = 0;
2675 cbLimit = 0;
2676 pDescFile = pImage->pFile;
2677 }
2678 else
2679 {
2680 /* Embedded descriptor file. */
2681 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2682 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2683 pDescFile = pImage->pExtents[0].pFile;
2684 }
2685 /* Bail out if there is no file to write to. */
2686 if (pDescFile == NULL)
2687 return VERR_INVALID_PARAMETER;
2688
2689 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2690 if (RT_SUCCESS(rc))
2691 {
2692 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2693 uOffset, pvDescriptor,
2694 cbLimit ? cbLimit : cbDescriptor,
2695 pIoCtx, NULL, NULL);
2696 if ( RT_FAILURE(rc)
2697 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2698 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2699 }
2700
2701 if (RT_SUCCESS(rc) && !cbLimit)
2702 {
2703 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2704 if (RT_FAILURE(rc))
2705 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2706 }
2707
2708 if (RT_SUCCESS(rc))
2709 pImage->Descriptor.fDirty = false;
2710
2711 if (pvDescriptor)
2712 RTMemFree(pvDescriptor);
2713 return rc;
2714
2715}
2716
2717/**
2718 * Internal: validate the consistency check values in a binary header.
2719 */
2720static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2721{
2722 int rc = VINF_SUCCESS;
2723 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2724 {
2725 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2726 return rc;
2727 }
2728 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2729 {
2730 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2731 return rc;
2732 }
2733 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2734 && ( pHeader->singleEndLineChar != '\n'
2735 || pHeader->nonEndLineChar != ' '
2736 || pHeader->doubleEndLineChar1 != '\r'
2737 || pHeader->doubleEndLineChar2 != '\n') )
2738 {
2739 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2740 return rc;
2741 }
2742 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2743 {
2744 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2745 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2746 return rc;
2747 }
2748 return rc;
2749}
2750
2751/**
2752 * Internal: read metadata belonging to an extent with binary header, i.e.
2753 * as found in monolithic files.
2754 */
2755static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2756 bool fMagicAlreadyRead)
2757{
2758 SparseExtentHeader Header;
2759 int rc;
2760
2761 if (!fMagicAlreadyRead)
2762 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2763 &Header, sizeof(Header));
2764 else
2765 {
2766 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2767 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2768 RT_UOFFSETOF(SparseExtentHeader, version),
2769 &Header.version,
2770 sizeof(Header)
2771 - RT_UOFFSETOF(SparseExtentHeader, version));
2772 }
2773
2774 if (RT_SUCCESS(rc))
2775 {
2776 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2777 if (RT_SUCCESS(rc))
2778 {
2779 uint64_t cbFile = 0;
2780
2781 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2782 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2783 pExtent->fFooter = true;
2784
2785 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2786 || ( pExtent->fFooter
2787 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2788 {
2789 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2790 if (RT_FAILURE(rc))
2791 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2792 }
2793
2794 if (RT_SUCCESS(rc))
2795 {
2796 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2797 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2798
2799 if ( pExtent->fFooter
2800 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2801 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2802 {
2803 /* Read the footer, which comes before the end-of-stream marker. */
2804 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2805 cbFile - 2*512, &Header,
2806 sizeof(Header));
2807 if (RT_FAILURE(rc))
2808 {
2809 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2810 rc = VERR_VD_VMDK_INVALID_HEADER;
2811 }
2812
2813 if (RT_SUCCESS(rc))
2814 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2815 /* Prohibit any writes to this extent. */
2816 pExtent->uAppendPosition = 0;
2817 }
2818
2819 if (RT_SUCCESS(rc))
2820 {
2821 pExtent->uVersion = RT_LE2H_U32(Header.version);
2822 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2823 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2824 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2825 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2826 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2827 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2828 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2829 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2830 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2831 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2832 {
2833 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2834 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2835 }
2836 else
2837 {
2838 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2839 pExtent->uSectorRGD = 0;
2840 }
2841
2842 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2843 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2844 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2845
2846 if ( RT_SUCCESS(rc)
2847 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2848 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2849 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2850 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2851 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2852 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2853
2854 if (RT_SUCCESS(rc))
2855 {
2856 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2857 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2858 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2859 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2860 else
2861 {
2862 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2863 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2864
2865 /* Fix up the number of descriptor sectors, as some flat images have
2866 * really just one, and this causes failures when inserting the UUID
2867 * values and other extra information. */
2868 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2869 {
2870 /* Do it the easy way - just fix it for flat images which have no
2871 * other complicated metadata which needs space too. */
2872 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2873 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2874 pExtent->cDescriptorSectors = 4;
2875 }
2876 }
2877 }
2878 }
2879 }
2880 }
2881 }
2882 else
2883 {
2884 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2885 rc = VERR_VD_VMDK_INVALID_HEADER;
2886 }
2887
2888 if (RT_FAILURE(rc))
2889 vmdkFreeExtentData(pImage, pExtent, false);
2890
2891 return rc;
2892}
2893
2894/**
2895 * Internal: read additional metadata belonging to an extent. For those
2896 * extents which have no additional metadata just verify the information.
2897 */
2898static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2899{
2900 int rc = VINF_SUCCESS;
2901
2902/* disabled the check as there are too many truncated vmdk images out there */
2903#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2904 uint64_t cbExtentSize;
2905 /* The image must be a multiple of a sector in size and contain the data
2906 * area (flat images only). If not, it means the image is at least
2907 * truncated, or even seriously garbled. */
2908 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2909 if (RT_FAILURE(rc))
2910 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2911 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2912 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2913 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2914 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2915#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2916 if ( RT_SUCCESS(rc)
2917 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2918 {
2919 /* The spec says that this must be a power of two and greater than 8,
2920 * but probably they meant not less than 8. */
2921 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2922 || pExtent->cSectorsPerGrain < 8)
2923 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2924 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2925 else
2926 {
2927 /* This code requires that a grain table must hold a power of two multiple
2928 * of the number of entries per GT cache entry. */
2929 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2930 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2931 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2932 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2933 else
2934 {
2935 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2936 if (RT_SUCCESS(rc))
2937 {
2938 /* Prohibit any writes to this streamOptimized extent. */
2939 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2940 pExtent->uAppendPosition = 0;
2941
2942 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2943 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2944 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2945 rc = vmdkReadGrainDirectory(pImage, pExtent);
2946 else
2947 {
2948 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2949 pExtent->cbGrainStreamRead = 0;
2950 }
2951 }
2952 }
2953 }
2954 }
2955
2956 if (RT_FAILURE(rc))
2957 vmdkFreeExtentData(pImage, pExtent, false);
2958
2959 return rc;
2960}
2961
2962/**
2963 * Internal: write/update the metadata for a sparse extent.
2964 */
2965static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2966 uint64_t uOffset, PVDIOCTX pIoCtx)
2967{
2968 SparseExtentHeader Header;
2969
2970 memset(&Header, '\0', sizeof(Header));
2971 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2972 Header.version = RT_H2LE_U32(pExtent->uVersion);
2973 Header.flags = RT_H2LE_U32(RT_BIT(0));
2974 if (pExtent->pRGD)
2975 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2976 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2977 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2978 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2979 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2980 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2981 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2982 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2983 if (pExtent->fFooter && uOffset == 0)
2984 {
2985 if (pExtent->pRGD)
2986 {
2987 Assert(pExtent->uSectorRGD);
2988 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2989 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2990 }
2991 else
2992 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2993 }
2994 else
2995 {
2996 if (pExtent->pRGD)
2997 {
2998 Assert(pExtent->uSectorRGD);
2999 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3000 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3001 }
3002 else
3003 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3004 }
3005 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3006 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3007 Header.singleEndLineChar = '\n';
3008 Header.nonEndLineChar = ' ';
3009 Header.doubleEndLineChar1 = '\r';
3010 Header.doubleEndLineChar2 = '\n';
3011 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3012
3013 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
3014 uOffset, &Header, sizeof(Header),
3015 pIoCtx, NULL, NULL);
3016 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3017 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3018 return rc;
3019}
3020
3021/**
3022 * Internal: free the buffers used for streamOptimized images.
3023 */
3024static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
3025{
3026 if (pExtent->pvCompGrain)
3027 {
3028 RTMemFree(pExtent->pvCompGrain);
3029 pExtent->pvCompGrain = NULL;
3030 }
3031 if (pExtent->pvGrain)
3032 {
3033 RTMemFree(pExtent->pvGrain);
3034 pExtent->pvGrain = NULL;
3035 }
3036}
3037
3038/**
3039 * Internal: free the memory used by the extent data structure, optionally
3040 * deleting the referenced files.
3041 *
3042 * @returns VBox status code.
3043 * @param pImage Pointer to the image instance data.
3044 * @param pExtent The extent to free.
3045 * @param fDelete Flag whether to delete the backing storage.
3046 */
3047static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3048 bool fDelete)
3049{
3050 int rc = VINF_SUCCESS;
3051
3052 vmdkFreeGrainDirectory(pExtent);
3053 if (pExtent->pDescData)
3054 {
3055 RTMemFree(pExtent->pDescData);
3056 pExtent->pDescData = NULL;
3057 }
3058 if (pExtent->pFile != NULL)
3059 {
3060 /* Do not delete raw extents, these have full and base names equal. */
3061 rc = vmdkFileClose(pImage, &pExtent->pFile,
3062 fDelete
3063 && pExtent->pszFullname
3064 && pExtent->pszBasename
3065 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3066 }
3067 if (pExtent->pszBasename)
3068 {
3069 RTMemTmpFree((void *)pExtent->pszBasename);
3070 pExtent->pszBasename = NULL;
3071 }
3072 if (pExtent->pszFullname)
3073 {
3074 RTStrFree((char *)(void *)pExtent->pszFullname);
3075 pExtent->pszFullname = NULL;
3076 }
3077 vmdkFreeStreamBuffers(pExtent);
3078
3079 return rc;
3080}
3081
3082/**
3083 * Internal: allocate grain table cache if necessary for this image.
3084 */
3085static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3086{
3087 PVMDKEXTENT pExtent;
3088
3089 /* Allocate grain table cache if any sparse extent is present. */
3090 for (unsigned i = 0; i < pImage->cExtents; i++)
3091 {
3092 pExtent = &pImage->pExtents[i];
3093 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3094 {
3095 /* Allocate grain table cache. */
3096 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3097 if (!pImage->pGTCache)
3098 return VERR_NO_MEMORY;
3099 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3100 {
3101 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3102 pGCE->uExtent = UINT32_MAX;
3103 }
3104 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3105 break;
3106 }
3107 }
3108
3109 return VINF_SUCCESS;
3110}
3111
3112/**
3113 * Internal: allocate the given number of extents.
3114 */
3115static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3116{
3117 int rc = VINF_SUCCESS;
3118 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3119 if (pExtents)
3120 {
3121 for (unsigned i = 0; i < cExtents; i++)
3122 {
3123 pExtents[i].pFile = NULL;
3124 pExtents[i].pszBasename = NULL;
3125 pExtents[i].pszFullname = NULL;
3126 pExtents[i].pGD = NULL;
3127 pExtents[i].pRGD = NULL;
3128 pExtents[i].pDescData = NULL;
3129 pExtents[i].uVersion = 1;
3130 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3131 pExtents[i].uExtent = i;
3132 pExtents[i].pImage = pImage;
3133 }
3134 pImage->pExtents = pExtents;
3135 pImage->cExtents = cExtents;
3136 }
3137 else
3138 rc = VERR_NO_MEMORY;
3139
3140 return rc;
3141}
3142
3143/**
3144 * Internal: Create an additional file backed extent in split images.
3145 * Supports split sparse and flat images.
3146 *
3147 * @returns VBox status code.
3148 * @param pImage VMDK image instance.
3149 * @param cbSize Desiried size in bytes of new extent.
3150 */
3151static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
3152{
3153 int rc = VINF_SUCCESS;
3154 unsigned uImageFlags = pImage->uImageFlags;
3155
3156 /* Check for unsupported image type. */
3157 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3158 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3159 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3160 {
3161 return VERR_NOT_SUPPORTED;
3162 }
3163
3164 /* Allocate array of extents and copy existing extents to it. */
3165 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
3166 if (!pNewExtents)
3167 {
3168 return VERR_NO_MEMORY;
3169 }
3170
3171 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
3172
3173 /* Locate newly created extent and populate default metadata. */
3174 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
3175
3176 pExtent->pFile = NULL;
3177 pExtent->pszBasename = NULL;
3178 pExtent->pszFullname = NULL;
3179 pExtent->pGD = NULL;
3180 pExtent->pRGD = NULL;
3181 pExtent->pDescData = NULL;
3182 pExtent->uVersion = 1;
3183 pExtent->uCompression = VMDK_COMPRESSION_NONE;
3184 pExtent->uExtent = pImage->cExtents;
3185 pExtent->pImage = pImage;
3186 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3187 pExtent->enmAccess = VMDKACCESS_READWRITE;
3188 pExtent->uSectorOffset = 0;
3189 pExtent->fMetaDirty = true;
3190
3191 /* Apply image type specific meta data. */
3192 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3193 {
3194 pExtent->enmType = VMDKETYPE_FLAT;
3195 }
3196 else
3197 {
3198 uint64_t cSectorsPerGDE, cSectorsPerGD;
3199 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3200 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3201 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3202 pExtent->cGTEntries = 512;
3203 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3204 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3205 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3206 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3207 }
3208
3209 /* Allocate and set file name for extent. */
3210 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3211 AssertPtr(pszBasenameSubstr);
3212
3213 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3214 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3215 RTPathStripSuffix(pszBasenameBase);
3216 char *pszTmp;
3217 size_t cbTmp;
3218
3219 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
3220 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3221 pExtent->uExtent + 1, pszBasenameSuff);
3222 else
3223 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
3224 pszBasenameSuff);
3225
3226 RTStrFree(pszBasenameBase);
3227 if (!pszTmp)
3228 return VERR_NO_STR_MEMORY;
3229 cbTmp = strlen(pszTmp) + 1;
3230 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3231 if (!pszBasename)
3232 {
3233 RTStrFree(pszTmp);
3234 return VERR_NO_MEMORY;
3235 }
3236
3237 memcpy(pszBasename, pszTmp, cbTmp);
3238 RTStrFree(pszTmp);
3239
3240 pExtent->pszBasename = pszBasename;
3241
3242 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3243 if (!pszBasedirectory)
3244 return VERR_NO_STR_MEMORY;
3245 RTPathStripFilename(pszBasedirectory);
3246 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3247 RTStrFree(pszBasedirectory);
3248 if (!pszFullname)
3249 return VERR_NO_STR_MEMORY;
3250 pExtent->pszFullname = pszFullname;
3251
3252 /* Create file for extent. */
3253 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3254 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3255 true /* fCreate */));
3256 if (RT_FAILURE(rc))
3257 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3258
3259 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3260 {
3261 /* For flat images: Pre allocate file space. */
3262 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
3263 0 /* fFlags */, NULL, 0, 0);
3264 if (RT_FAILURE(rc))
3265 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3266 }
3267 else
3268 {
3269 /* For sparse images: Allocate new grain directories/tables. */
3270 /* fPreAlloc should never be false because VMware can't use such images. */
3271 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3272 RT_MAX( pExtent->uDescriptorSector
3273 + pExtent->cDescriptorSectors,
3274 1),
3275 true /* fPreAlloc */);
3276 if (RT_FAILURE(rc))
3277 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3278 }
3279
3280 /* Insert new extent into descriptor file. */
3281 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3282 pExtent->cNominalSectors, pExtent->enmType,
3283 pExtent->pszBasename, pExtent->uSectorOffset);
3284 if (RT_FAILURE(rc))
3285 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3286
3287 pImage->pExtents = pNewExtents;
3288 pImage->cExtents++;
3289
3290 return rc;
3291}
3292
3293/**
3294 * Reads and processes the descriptor embedded in sparse images.
3295 *
3296 * @returns VBox status code.
3297 * @param pImage VMDK image instance.
3298 * @param pFile The sparse file handle.
3299 */
3300static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3301{
3302 /* It's a hosted single-extent image. */
3303 int rc = vmdkCreateExtents(pImage, 1);
3304 if (RT_SUCCESS(rc))
3305 {
3306 /* The opened file is passed to the extent. No separate descriptor
3307 * file, so no need to keep anything open for the image. */
3308 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3309 pExtent->pFile = pFile;
3310 pImage->pFile = NULL;
3311 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3312 if (RT_LIKELY(pExtent->pszFullname))
3313 {
3314 /* As we're dealing with a monolithic image here, there must
3315 * be a descriptor embedded in the image file. */
3316 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3317 if ( RT_SUCCESS(rc)
3318 && pExtent->uDescriptorSector
3319 && pExtent->cDescriptorSectors)
3320 {
3321 /* HACK: extend the descriptor if it is unusually small and it fits in
3322 * the unused space after the image header. Allows opening VMDK files
3323 * with extremely small descriptor in read/write mode.
3324 *
3325 * The previous version introduced a possible regression for VMDK stream
3326 * optimized images from VMware which tend to have only a single sector sized
3327 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3328 * entries required to make it work with VBox but for stream optimized images
3329 * the updated binary header wasn't written to the disk creating a mismatch
3330 * between advertised and real descriptor size.
3331 *
3332 * The descriptor size will be increased even if opened readonly now if there
3333 * enough room but the new value will not be written back to the image.
3334 */
3335 if ( pExtent->cDescriptorSectors < 3
3336 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3337 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3338 {
3339 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3340
3341 pExtent->cDescriptorSectors = 4;
3342 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3343 {
3344 /*
3345 * Update the on disk number now to make sure we don't introduce inconsistencies
3346 * in case of stream optimized images from VMware where the descriptor is just
3347 * one sector big (the binary header is not written to disk for complete
3348 * stream optimized images in vmdkFlushImage()).
3349 */
3350 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3351 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3352 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3353 &u64DescSizeNew, sizeof(u64DescSizeNew));
3354 if (RT_FAILURE(rc))
3355 {
3356 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3357 /* Restore the old size and carry on. */
3358 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3359 }
3360 }
3361 }
3362 /* Read the descriptor from the extent. */
3363 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3364 if (RT_LIKELY(pExtent->pDescData))
3365 {
3366 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3367 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3368 pExtent->pDescData,
3369 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3370 if (RT_SUCCESS(rc))
3371 {
3372 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3373 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3374 if ( RT_SUCCESS(rc)
3375 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3376 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3377 {
3378 rc = vmdkReadMetaExtent(pImage, pExtent);
3379 if (RT_SUCCESS(rc))
3380 {
3381 /* Mark the extent as unclean if opened in read-write mode. */
3382 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3383 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3384 {
3385 pExtent->fUncleanShutdown = true;
3386 pExtent->fMetaDirty = true;
3387 }
3388 }
3389 }
3390 else if (RT_SUCCESS(rc))
3391 rc = VERR_NOT_SUPPORTED;
3392 }
3393 else
3394 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3395 }
3396 else
3397 rc = VERR_NO_MEMORY;
3398 }
3399 else if (RT_SUCCESS(rc))
3400 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3401 }
3402 else
3403 rc = VERR_NO_MEMORY;
3404 }
3405
3406 return rc;
3407}
3408
3409/**
3410 * Reads the descriptor from a pure text file.
3411 *
3412 * @returns VBox status code.
3413 * @param pImage VMDK image instance.
3414 * @param pFile The descriptor file handle.
3415 */
3416static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3417{
3418 /* Allocate at least 10K, and make sure that there is 5K free space
3419 * in case new entries need to be added to the descriptor. Never
3420 * allocate more than 128K, because that's no valid descriptor file
3421 * and will result in the correct "truncated read" error handling. */
3422 uint64_t cbFileSize;
3423 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3424 if ( RT_SUCCESS(rc)
3425 && cbFileSize >= 50)
3426 {
3427 uint64_t cbSize = cbFileSize;
3428 if (cbSize % VMDK_SECTOR2BYTE(10))
3429 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3430 else
3431 cbSize += VMDK_SECTOR2BYTE(10);
3432 cbSize = RT_MIN(cbSize, _128K);
3433 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3434 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3435 if (RT_LIKELY(pImage->pDescData))
3436 {
3437 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3438 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3439 if (RT_SUCCESS(rc))
3440 {
3441#if 0 /** @todo Revisit */
3442 cbRead += sizeof(u32Magic);
3443 if (cbRead == pImage->cbDescAlloc)
3444 {
3445 /* Likely the read is truncated. Better fail a bit too early
3446 * (normally the descriptor is much smaller than our buffer). */
3447 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3448 goto out;
3449 }
3450#endif
3451 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3452 pImage->cbDescAlloc);
3453 if (RT_SUCCESS(rc))
3454 {
3455 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3456 {
3457 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3458 if (pExtent->pszBasename)
3459 {
3460 /* Hack to figure out whether the specified name in the
3461 * extent descriptor is absolute. Doesn't always work, but
3462 * should be good enough for now. */
3463 char *pszFullname;
3464 /** @todo implement proper path absolute check. */
3465 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3466 {
3467 pszFullname = RTStrDup(pExtent->pszBasename);
3468 if (!pszFullname)
3469 {
3470 rc = VERR_NO_MEMORY;
3471 break;
3472 }
3473 }
3474 else
3475 {
3476 char *pszDirname = RTStrDup(pImage->pszFilename);
3477 if (!pszDirname)
3478 {
3479 rc = VERR_NO_MEMORY;
3480 break;
3481 }
3482 RTPathStripFilename(pszDirname);
3483 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3484 RTStrFree(pszDirname);
3485 if (!pszFullname)
3486 {
3487 rc = VERR_NO_STR_MEMORY;
3488 break;
3489 }
3490 }
3491 pExtent->pszFullname = pszFullname;
3492 }
3493 else
3494 pExtent->pszFullname = NULL;
3495
3496 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3497 switch (pExtent->enmType)
3498 {
3499 case VMDKETYPE_HOSTED_SPARSE:
3500 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3501 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3502 if (RT_FAILURE(rc))
3503 {
3504 /* Do NOT signal an appropriate error here, as the VD
3505 * layer has the choice of retrying the open if it
3506 * failed. */
3507 break;
3508 }
3509 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3510 false /* fMagicAlreadyRead */);
3511 if (RT_FAILURE(rc))
3512 break;
3513 rc = vmdkReadMetaExtent(pImage, pExtent);
3514 if (RT_FAILURE(rc))
3515 break;
3516
3517 /* Mark extent as unclean if opened in read-write mode. */
3518 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3519 {
3520 pExtent->fUncleanShutdown = true;
3521 pExtent->fMetaDirty = true;
3522 }
3523 break;
3524 case VMDKETYPE_VMFS:
3525 case VMDKETYPE_FLAT:
3526 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3527 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3528 if (RT_FAILURE(rc))
3529 {
3530 /* Do NOT signal an appropriate error here, as the VD
3531 * layer has the choice of retrying the open if it
3532 * failed. */
3533 break;
3534 }
3535 break;
3536 case VMDKETYPE_ZERO:
3537 /* Nothing to do. */
3538 break;
3539 default:
3540 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3541 }
3542 }
3543 }
3544 }
3545 else
3546 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3547 }
3548 else
3549 rc = VERR_NO_MEMORY;
3550 }
3551 else if (RT_SUCCESS(rc))
3552 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3553
3554 return rc;
3555}
3556
3557/**
3558 * Read and process the descriptor based on the image type.
3559 *
3560 * @returns VBox status code.
3561 * @param pImage VMDK image instance.
3562 * @param pFile VMDK file handle.
3563 */
3564static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3565{
3566 uint32_t u32Magic;
3567
3568 /* Read magic (if present). */
3569 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3570 &u32Magic, sizeof(u32Magic));
3571 if (RT_SUCCESS(rc))
3572 {
3573 /* Handle the file according to its magic number. */
3574 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3575 rc = vmdkDescriptorReadSparse(pImage, pFile);
3576 else
3577 rc = vmdkDescriptorReadAscii(pImage, pFile);
3578 }
3579 else
3580 {
3581 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3582 rc = VERR_VD_VMDK_INVALID_HEADER;
3583 }
3584
3585 return rc;
3586}
3587
3588/**
3589 * Internal: Open an image, constructing all necessary data structures.
3590 */
3591static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3592{
3593 pImage->uOpenFlags = uOpenFlags;
3594 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3595 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3596 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3597
3598 /*
3599 * Open the image.
3600 * We don't have to check for asynchronous access because
3601 * we only support raw access and the opened file is a description
3602 * file were no data is stored.
3603 */
3604 PVMDKFILE pFile;
3605 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3606 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3607 if (RT_SUCCESS(rc))
3608 {
3609 pImage->pFile = pFile;
3610
3611 rc = vmdkDescriptorRead(pImage, pFile);
3612 if (RT_SUCCESS(rc))
3613 {
3614 /* Determine PCHS geometry if not set. */
3615 if (pImage->PCHSGeometry.cCylinders == 0)
3616 {
3617 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3618 / pImage->PCHSGeometry.cHeads
3619 / pImage->PCHSGeometry.cSectors;
3620 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3621 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3622 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3623 {
3624 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3625 AssertRC(rc);
3626 }
3627 }
3628
3629 /* Update the image metadata now in case has changed. */
3630 rc = vmdkFlushImage(pImage, NULL);
3631 if (RT_SUCCESS(rc))
3632 {
3633 /* Figure out a few per-image constants from the extents. */
3634 pImage->cbSize = 0;
3635 for (unsigned i = 0; i < pImage->cExtents; i++)
3636 {
3637 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3638 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3639 {
3640 /* Here used to be a check whether the nominal size of an extent
3641 * is a multiple of the grain size. The spec says that this is
3642 * always the case, but unfortunately some files out there in the
3643 * wild violate the spec (e.g. ReactOS 0.3.1). */
3644 }
3645 else if ( pExtent->enmType == VMDKETYPE_FLAT
3646 || pExtent->enmType == VMDKETYPE_ZERO)
3647 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3648
3649 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3650 }
3651
3652 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3653 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3654 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3655 rc = vmdkAllocateGrainTableCache(pImage);
3656 }
3657 }
3658 }
3659 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3660 * choice of retrying the open if it failed. */
3661
3662 if (RT_SUCCESS(rc))
3663 {
3664 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3665 pImage->RegionList.fFlags = 0;
3666 pImage->RegionList.cRegions = 1;
3667
3668 pRegion->offRegion = 0; /* Disk start. */
3669 pRegion->cbBlock = 512;
3670 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3671 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3672 pRegion->cbData = 512;
3673 pRegion->cbMetadata = 0;
3674 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3675 }
3676 else
3677 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3678 return rc;
3679}
3680
3681/**
3682 * Frees a raw descriptor.
3683 * @internal
3684 */
3685static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3686{
3687 if (!pRawDesc)
3688 return VINF_SUCCESS;
3689
3690 RTStrFree(pRawDesc->pszRawDisk);
3691 pRawDesc->pszRawDisk = NULL;
3692
3693 /* Partitions: */
3694 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3695 {
3696 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3697 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3698
3699 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3700 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3701 }
3702
3703 RTMemFree(pRawDesc->pPartDescs);
3704 pRawDesc->pPartDescs = NULL;
3705
3706 RTMemFree(pRawDesc);
3707 return VINF_SUCCESS;
3708}
3709
3710/**
3711 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3712 * returning the pointer to the first new entry.
3713 * @internal
3714 */
3715static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3716{
3717 uint32_t const cOld = pRawDesc->cPartDescs;
3718 uint32_t const cNew = cOld + cToAdd;
3719 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3720 cOld * sizeof(pRawDesc->pPartDescs[0]),
3721 cNew * sizeof(pRawDesc->pPartDescs[0]));
3722 if (paNew)
3723 {
3724 pRawDesc->cPartDescs = cNew;
3725 pRawDesc->pPartDescs = paNew;
3726
3727 *ppRet = &paNew[cOld];
3728 return VINF_SUCCESS;
3729 }
3730 *ppRet = NULL;
3731 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3732 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3733 pImage->pszFilename, cOld, cNew);
3734}
3735
3736/**
3737 * @callback_method_impl{FNRTSORTCMP}
3738 */
3739static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3740{
3741 RT_NOREF(pvUser);
3742 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3743 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3744}
3745
3746/**
3747 * Post processes the partition descriptors.
3748 *
3749 * Sorts them and check that they don't overlap.
3750 */
3751static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3752{
3753 /*
3754 * Sort data areas in ascending order of start.
3755 */
3756 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3757
3758 /*
3759 * Check that we don't have overlapping descriptors. If we do, that's an
3760 * indication that the drive is corrupt or that the RTDvm code is buggy.
3761 */
3762 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3763 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3764 {
3765 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3766 if (offLast <= paPartDescs[i].offStartInVDisk)
3767 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3768 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3769 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3770 paPartDescs[i].pvPartitionData ? " (data)" : "");
3771 offLast -= 1;
3772
3773 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3774 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3775 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3776 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3777 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3778 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3779 if (offLast >= cbSize)
3780 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3781 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3782 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3783 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3784 }
3785
3786 return VINF_SUCCESS;
3787}
3788
3789
3790#ifdef RT_OS_LINUX
3791/**
3792 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3793 * 'dev' file matching @a uDevToLocate.
3794 *
3795 * This is used both
3796 *
3797 * @returns IPRT status code, errors have been reported properly.
3798 * @param pImage For error reporting.
3799 * @param pszBlockDevDir Input: Path to the directory search under.
3800 * Output: Path to the directory containing information
3801 * for @a uDevToLocate.
3802 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3803 * @param uDevToLocate The device number of the block device info dir to
3804 * locate.
3805 * @param pszDevToLocate For error reporting.
3806 */
3807static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3808 dev_t uDevToLocate, const char *pszDevToLocate)
3809{
3810 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3811 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3812
3813 RTDIR hDir = NIL_RTDIR;
3814 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3815 if (RT_SUCCESS(rc))
3816 {
3817 for (;;)
3818 {
3819 RTDIRENTRY Entry;
3820 rc = RTDirRead(hDir, &Entry, NULL);
3821 if (RT_SUCCESS(rc))
3822 {
3823 /* We're interested in directories and symlinks. */
3824 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3825 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3826 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3827 {
3828 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3829 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3830
3831 dev_t uThisDevNo = ~uDevToLocate;
3832 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3833 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3834 break;
3835 }
3836 }
3837 else
3838 {
3839 pszBlockDevDir[cchDir] = '\0';
3840 if (rc == VERR_NO_MORE_FILES)
3841 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3842 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3843 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3844 else
3845 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3846 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3847 pImage->pszFilename, pszBlockDevDir, rc);
3848 break;
3849 }
3850 }
3851 RTDirClose(hDir);
3852 }
3853 else
3854 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3855 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3856 pImage->pszFilename, pszBlockDevDir, rc);
3857 return rc;
3858}
3859#endif /* RT_OS_LINUX */
3860
3861#ifdef RT_OS_FREEBSD
3862
3863
3864/**
3865 * Reads the config data from the provider and returns offset and size
3866 *
3867 * @return IPRT status code
3868 * @param pProvider GEOM provider representing partition
3869 * @param pcbOffset Placeholder for the offset of the partition
3870 * @param pcbSize Placeholder for the size of the partition
3871 */
3872static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3873{
3874 gconfig *pConfEntry;
3875 int rc = VERR_NOT_FOUND;
3876
3877 /*
3878 * Required parameters are located in the list containing key/value pairs.
3879 * Both key and value are in text form. Manuals tells nothing about the fact
3880 * that the both parameters should be present in the list. Thus, there are
3881 * cases when only one parameter is presented. To handle such cases we treat
3882 * absent params as zero allowing the caller decide the case is either correct
3883 * or an error.
3884 */
3885 uint64_t cbOffset = 0;
3886 uint64_t cbSize = 0;
3887 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3888 {
3889 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3890 {
3891 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3892 rc = VINF_SUCCESS;
3893 }
3894 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3895 {
3896 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3897 rc = VINF_SUCCESS;
3898 }
3899 }
3900 if (RT_SUCCESS(rc))
3901 {
3902 *pcbOffset = cbOffset;
3903 *pcbSize = cbSize;
3904 }
3905 return rc;
3906}
3907
3908
3909/**
3910 * Searches the partition specified by name and calculates its size and absolute offset.
3911 *
3912 * @return IPRT status code.
3913 * @param pParentClass Class containing pParentGeom
3914 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3915 * @param pszProviderName Name of the provider we are looking for
3916 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3917 * @param psbSize Placeholder for the size of the partition.
3918 */
3919static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3920 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3921{
3922 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3923 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3924 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3925 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3926 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3927
3928 ggeom *pParentGeom;
3929 int rc = VERR_NOT_FOUND;
3930 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3931 {
3932 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3933 {
3934 rc = VINF_SUCCESS;
3935 break;
3936 }
3937 }
3938 if (RT_FAILURE(rc))
3939 return rc;
3940
3941 gprovider *pProvider;
3942 /*
3943 * First, go over providers without handling EBR or BSDLabel
3944 * partitions for case when looking provider is child
3945 * of the givng geom, to reduce searching time
3946 */
3947 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3948 {
3949 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3950 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3951 }
3952
3953 /*
3954 * No provider found. Go over the parent geom again
3955 * and make recursions if geom represents EBR or BSDLabel.
3956 * In this case given parent geom contains only EBR or BSDLabel
3957 * partition itself and their own partitions are in the separate
3958 * geoms. Also, partition offsets are relative to geom, so
3959 * we have to add offset from child provider with parent geoms
3960 * provider
3961 */
3962
3963 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3964 {
3965 uint64_t cbOffset = 0;
3966 uint64_t cbSize = 0;
3967 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3968 if (RT_FAILURE(rc))
3969 return rc;
3970
3971 uint64_t cbProviderOffset = 0;
3972 uint64_t cbProviderSize = 0;
3973 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3974 if (RT_SUCCESS(rc))
3975 {
3976 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3977 *pcbSize = cbProviderSize;
3978 return rc;
3979 }
3980 }
3981
3982 return VERR_NOT_FOUND;
3983}
3984#endif
3985
3986
3987/**
3988 * Attempts to verify the raw partition path.
3989 *
3990 * We don't want to trust RTDvm and the partition device node morphing blindly.
3991 */
3992static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3993 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3994{
3995 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3996
3997 /*
3998 * Try open the raw partition device.
3999 */
4000 RTFILE hRawPart = NIL_RTFILE;
4001 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4002 if (RT_FAILURE(rc))
4003 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4004 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
4005 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
4006
4007 /*
4008 * Compare the partition UUID if we can get it.
4009 */
4010#ifdef RT_OS_WINDOWS
4011 DWORD cbReturned;
4012
4013 /* 1. Get the device numbers for both handles, they should have the same disk. */
4014 STORAGE_DEVICE_NUMBER DevNum1;
4015 RT_ZERO(DevNum1);
4016 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4017 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
4018 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4019 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4020 pImage->pszFilename, pszRawDrive, GetLastError());
4021
4022 STORAGE_DEVICE_NUMBER DevNum2;
4023 RT_ZERO(DevNum2);
4024 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4025 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
4026 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4027 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4028 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
4029 if ( RT_SUCCESS(rc)
4030 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
4031 || DevNum1.DeviceType != DevNum2.DeviceType))
4032 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4033 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
4034 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4035 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
4036 if (RT_SUCCESS(rc))
4037 {
4038 /* Get the partitions from the raw drive and match up with the volume info
4039 from RTDvm. The partition number is found in DevNum2. */
4040 DWORD cbNeeded = 0;
4041 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4042 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
4043 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
4044 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
4045 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
4046 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
4047 if (pLayout)
4048 {
4049 cbReturned = 0;
4050 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4051 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
4052 {
4053 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
4054 unsigned iEntry = 0;
4055 while ( iEntry < pLayout->PartitionCount
4056 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
4057 iEntry++;
4058 if (iEntry < pLayout->PartitionCount)
4059 {
4060 /* Compare the basics */
4061 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
4062 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
4063 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4064 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
4065 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4066 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
4067 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
4068 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4069 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
4070 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4071 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
4072 /** @todo We could compare the MBR type, GPT type and ID. */
4073 RT_NOREF(hVol);
4074 }
4075 else
4076 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4077 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
4078 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4079 DevNum2.PartitionNumber, pLayout->PartitionCount);
4080# ifndef LOG_ENABLED
4081 if (RT_FAILURE(rc))
4082# endif
4083 {
4084 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
4085 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
4086 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
4087 {
4088 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
4089 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
4090 pEntry->PartitionStyle, pEntry->RewritePartition));
4091 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
4092 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
4093 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
4094 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
4095 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
4096 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
4097 else
4098 LogRel(("\n"));
4099 }
4100 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
4101 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
4102 }
4103 }
4104 else
4105 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4106 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
4107 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
4108 RTMemTmpFree(pLayout);
4109 }
4110 else
4111 rc = VERR_NO_TMP_MEMORY;
4112 }
4113
4114#elif defined(RT_OS_LINUX)
4115 RT_NOREF(hVol);
4116
4117 /* Stat the two devices first to get their device numbers. (We probably
4118 could make some assumptions here about the major & minor number assignments
4119 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
4120 struct stat StDrive, StPart;
4121 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4122 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4123 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4124 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
4125 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4126 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
4127 else
4128 {
4129 /* Scan the directories immediately under /sys/block/ for one with a
4130 'dev' file matching the drive's device number: */
4131 char szSysPath[RTPATH_MAX];
4132 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
4133 AssertRCReturn(rc, rc); /* this shall not fail */
4134 if (RTDirExists(szSysPath))
4135 {
4136 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
4137
4138 /* Now, scan the directories under that again for a partition device
4139 matching the hRawPart device's number: */
4140 if (RT_SUCCESS(rc))
4141 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
4142
4143 /* Having found the /sys/block/device/partition/ path, we can finally
4144 read the partition attributes and compare with hVol. */
4145 if (RT_SUCCESS(rc))
4146 {
4147 /* partition number: */
4148 int64_t iLnxPartition = 0;
4149 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
4150 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
4151 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4152 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
4153 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
4154 /* else: ignore failure? */
4155
4156 /* start offset: */
4157 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
4158 if (RT_SUCCESS(rc))
4159 {
4160 int64_t offLnxStart = -1;
4161 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
4162 offLnxStart *= cbLnxSector;
4163 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
4164 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4165 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4166 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
4167 /* else: ignore failure? */
4168 }
4169
4170 /* the size: */
4171 if (RT_SUCCESS(rc))
4172 {
4173 int64_t cbLnxData = -1;
4174 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
4175 cbLnxData *= cbLnxSector;
4176 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
4177 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4178 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4179 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
4180 /* else: ignore failure? */
4181 }
4182 }
4183 }
4184 /* else: We've got nothing to work on, so only do content comparison. */
4185 }
4186
4187#elif defined(RT_OS_FREEBSD)
4188 char szDriveDevName[256];
4189 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
4190 if (pszDevName == NULL)
4191 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4192 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
4193 char szPartDevName[256];
4194 if (RT_SUCCESS(rc))
4195 {
4196 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
4197 if (pszDevName == NULL)
4198 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4199 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
4200 }
4201 if (RT_SUCCESS(rc))
4202 {
4203 gmesh geomMesh;
4204 int err = geom_gettree(&geomMesh);
4205 if (err == 0)
4206 {
4207 /* Find root class containg partitions info */
4208 gclass* pPartClass;
4209 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
4210 {
4211 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
4212 break;
4213 }
4214 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
4215 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
4216 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
4217
4218
4219 if (RT_SUCCESS(rc))
4220 {
4221 /* Find provider representing partition device */
4222 uint64_t cbOffset;
4223 uint64_t cbSize;
4224 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
4225 if (RT_SUCCESS(rc))
4226 {
4227 if (cbOffset != pPartDesc->offStartInVDisk)
4228 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4229 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4230 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4231 if (cbSize != pPartDesc->cbData)
4232 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4233 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4234 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4235 }
4236 else
4237 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4238 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
4239 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
4240 }
4241
4242 geom_deletetree(&geomMesh);
4243 }
4244 else
4245 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
4246 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
4247 }
4248
4249#elif defined(RT_OS_SOLARIS)
4250 RT_NOREF(hVol);
4251
4252 dk_cinfo dkiDriveInfo;
4253 dk_cinfo dkiPartInfo;
4254 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
4255 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4256 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4257 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
4258 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4259 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4260 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
4261 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
4262 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
4263 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
4264 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
4265 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4266 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
4267 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4268 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
4269 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
4270 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
4271 else
4272 {
4273 uint64_t cbOffset = 0;
4274 uint64_t cbSize = 0;
4275 dk_gpt *pEfi = NULL;
4276 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
4277 if (idxEfiPart >= 0)
4278 {
4279 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
4280 {
4281 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
4282 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
4283 }
4284 else
4285 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4286 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4287 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4288 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
4289 efi_free(pEfi);
4290 }
4291 else
4292 {
4293 /*
4294 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
4295 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
4296 * real error or just no EFI table found. Therefore, let's try to obtain partition info
4297 * using another way. If there is an error, it returns errno which will be handled below.
4298 */
4299
4300 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
4301 if (numPartition > NDKMAP)
4302 numPartition -= NDKMAP;
4303 if (numPartition != idxPartition)
4304 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4305 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4306 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4307 idxPartition, numPartition);
4308 else
4309 {
4310 dk_minfo_ext mediaInfo;
4311 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
4312 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4313 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4314 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4315 else
4316 {
4317 extpart_info extPartInfo;
4318 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
4319 {
4320 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
4321 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
4322 }
4323 else
4324 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4325 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4326 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4327 }
4328 }
4329 }
4330 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
4331 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4332 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4333 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4334
4335 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
4336 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4337 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4338 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4339 }
4340
4341#elif defined(RT_OS_DARWIN)
4342 /* Stat the drive get its device number. */
4343 struct stat StDrive;
4344 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4345 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4346 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
4347 else
4348 {
4349 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
4350 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4351 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
4352 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4353 else
4354 {
4355 uint32_t cbBlockSize = 0;
4356 uint64_t cbOffset = 0;
4357 uint64_t cbSize = 0;
4358 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
4359 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4360 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
4361 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4362 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
4363 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4364 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
4365 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4366 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
4367 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4368 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
4369 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4370 else
4371 {
4372 cbSize *= (uint64_t)cbBlockSize;
4373 dk_physical_extent_t dkPartExtent = {0};
4374 dkPartExtent.offset = 0;
4375 dkPartExtent.length = cbSize;
4376 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
4377 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4378 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
4379 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4380 else
4381 {
4382 if (dkPartExtent.dev != StDrive.st_rdev)
4383 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4384 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
4385 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
4386 else if (cbOffset != pPartDesc->offStartInVDisk)
4387 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4388 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4389 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4390 else if (cbSize != pPartDesc->cbData)
4391 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4392 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4393 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4394 }
4395 }
4396
4397 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
4398 {
4399 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4400 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
4401 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4402 if (RT_SUCCESS(rc))
4403 rc = rc2;
4404 }
4405 }
4406 }
4407
4408#else
4409 RT_NOREF(hVol); /* PORTME */
4410 rc = VERR_NOT_SUPPORTED;
4411#endif
4412 if (RT_SUCCESS(rc))
4413 {
4414 /*
4415 * Compare the first 32 sectors of the partition.
4416 *
4417 * This might not be conclusive, but for partitions formatted with the more
4418 * common file systems it should be as they have a superblock copy at or near
4419 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
4420 */
4421 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
4422 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
4423 if (pbSector1 != NULL)
4424 {
4425 uint8_t *pbSector2 = pbSector1 + cbToCompare;
4426
4427 /* Do the comparing, we repeat if it fails and the data might be volatile. */
4428 uint64_t uPrevCrc1 = 0;
4429 uint64_t uPrevCrc2 = 0;
4430 uint32_t cStable = 0;
4431 for (unsigned iTry = 0; iTry < 256; iTry++)
4432 {
4433 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
4434 if (RT_SUCCESS(rc))
4435 {
4436 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
4437 if (RT_SUCCESS(rc))
4438 {
4439 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
4440 {
4441 rc = VERR_MISMATCH;
4442
4443 /* Do data stability checks before repeating: */
4444 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4445 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4446 if ( uPrevCrc1 != uCrc1
4447 || uPrevCrc2 != uCrc2)
4448 cStable = 0;
4449 else if (++cStable > 4)
4450 break;
4451 uPrevCrc1 = uCrc1;
4452 uPrevCrc2 = uCrc2;
4453 continue;
4454 }
4455 rc = VINF_SUCCESS;
4456 }
4457 else
4458 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4459 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4460 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4461 }
4462 else
4463 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4464 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4465 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4466 break;
4467 }
4468 if (rc == VERR_MISMATCH)
4469 {
4470 /* Find the first mismatching bytes: */
4471 size_t offMissmatch = 0;
4472 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4473 offMissmatch++;
4474 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4475
4476 if (cStable > 0)
4477 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4478 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4479 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4480 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4481 else
4482 {
4483 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4484 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4485 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4486 rc = -rc;
4487 }
4488 }
4489
4490 RTMemTmpFree(pbSector1);
4491 }
4492 else
4493 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4494 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4495 pImage->pszFilename, cbToCompare * 2);
4496 }
4497 RTFileClose(hRawPart);
4498 return rc;
4499}
4500
4501#ifdef RT_OS_WINDOWS
4502/**
4503 * Construct the device name for the given partition number.
4504 */
4505static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4506 char **ppszRawPartition)
4507{
4508 int rc = VINF_SUCCESS;
4509 DWORD cbReturned = 0;
4510 STORAGE_DEVICE_NUMBER DevNum;
4511 RT_ZERO(DevNum);
4512 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4513 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4514 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4515 else
4516 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4517 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4518 pImage->pszFilename, pszRawDrive, GetLastError());
4519 return rc;
4520}
4521#endif /* RT_OS_WINDOWS */
4522
4523/**
4524 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4525 * 'Partitions' configuration value is present.
4526 *
4527 * @returns VBox status code, error message has been set on failure.
4528 *
4529 * @note Caller is assumed to clean up @a pRawDesc and release
4530 * @a *phVolToRelease.
4531 * @internal
4532 */
4533static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4534 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4535 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4536 PRTDVMVOLUME phVolToRelease)
4537{
4538 *phVolToRelease = NIL_RTDVMVOLUME;
4539
4540 /* Check sanity/understanding. */
4541 Assert(fPartitions);
4542 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4543
4544 /*
4545 * Allocate on descriptor for each volume up front.
4546 */
4547 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4548
4549 PVDISKRAWPARTDESC paPartDescs = NULL;
4550 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4551 AssertRCReturn(rc, rc);
4552
4553 /*
4554 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4555 */
4556 uint32_t fPartitionsLeft = fPartitions;
4557 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4558 for (uint32_t i = 0; i < cVolumes; i++)
4559 {
4560 /*
4561 * Get the next/first volume and release the current.
4562 */
4563 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4564 if (i == 0)
4565 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4566 else
4567 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4568 if (RT_FAILURE(rc))
4569 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4570 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4571 pImage->pszFilename, i, pszRawDrive, rc);
4572 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4573 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4574 *phVolToRelease = hVol = hVolNext;
4575
4576 /*
4577 * Depending on the fPartitions selector and associated read-only mask,
4578 * the guest either gets read-write or read-only access (bits set)
4579 * or no access (selector bit clear, access directed to the VMDK).
4580 */
4581 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4582
4583 uint64_t offVolumeEndIgnored = 0;
4584 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4585 if (RT_FAILURE(rc))
4586 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4587 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4588 pImage->pszFilename, i, pszRawDrive, rc);
4589 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4590
4591 /* Note! The index must match IHostDrivePartition::number. */
4592 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4593 if ( idxPartition < 32
4594 && (fPartitions & RT_BIT_32(idxPartition)))
4595 {
4596 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4597 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4598 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4599
4600 if (!fRelative)
4601 {
4602 /*
4603 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4604 */
4605 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4606 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4607 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4608 }
4609 else
4610 {
4611 /*
4612 * Relative means access the partition data via the device node for that
4613 * partition, allowing the sysadmin/OS to allow a user access to individual
4614 * partitions without necessarily being able to compromise the host OS.
4615 * Obviously, the creation of the VMDK requires read access to the main
4616 * device node for the drive, but that's a one-time thing and can be done
4617 * by the sysadmin. Here data starts at offset zero in the device node.
4618 */
4619 paPartDescs[i].offStartInDevice = 0;
4620
4621#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4622 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4623 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4624#elif defined(RT_OS_LINUX)
4625 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4626 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4627 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4628#elif defined(RT_OS_WINDOWS)
4629 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4630 AssertRCReturn(rc, rc);
4631#elif defined(RT_OS_SOLARIS)
4632 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4633 {
4634 /*
4635 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4636 * where X is the controller,
4637 * Y is target (SCSI device number),
4638 * Z is disk number,
4639 * K is partition number,
4640 * where p0 is the whole disk
4641 * p1-pN are the partitions of the disk
4642 */
4643 const char *pszRawDrivePath = pszRawDrive;
4644 char szDrivePath[RTPATH_MAX];
4645 size_t cbRawDrive = strlen(pszRawDrive);
4646 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4647 {
4648 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4649 szDrivePath[cbRawDrive - 2] = '\0';
4650 pszRawDrivePath = szDrivePath;
4651 }
4652 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4653 }
4654 else /* GPT */
4655 {
4656 /*
4657 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4658 * where X is the controller,
4659 * Y is target (SCSI device number),
4660 * Z is disk number,
4661 * K is partition number, zero based. Can be only from 0 to 6.
4662 * Thus, only partitions numbered 0 through 6 have device nodes.
4663 */
4664 if (idxPartition > 7)
4665 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4666 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4667 pImage->pszFilename, idxPartition, pszRawDrive);
4668 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4669 }
4670#else
4671 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4672#endif
4673 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4674
4675 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4676 AssertRCReturn(rc, rc);
4677 }
4678 }
4679 else
4680 {
4681 /* Not accessible to the guest. */
4682 paPartDescs[i].offStartInDevice = 0;
4683 paPartDescs[i].pszRawDevice = NULL;
4684 }
4685 } /* for each volume */
4686
4687 RTDvmVolumeRelease(hVol);
4688 *phVolToRelease = NIL_RTDVMVOLUME;
4689
4690 /*
4691 * Check that we found all the partitions the user selected.
4692 */
4693 if (fPartitionsLeft)
4694 {
4695 char szLeft[3 * sizeof(fPartitions) * 8];
4696 size_t cchLeft = 0;
4697 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4698 if (fPartitionsLeft & RT_BIT_32(i))
4699 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4700 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4701 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4702 pImage->pszFilename, pszRawDrive, szLeft);
4703 }
4704
4705 return VINF_SUCCESS;
4706}
4707
4708/**
4709 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4710 * of the partition tables and associated padding areas when the 'Partitions'
4711 * configuration value is present.
4712 *
4713 * The guest is not allowed access to the partition tables, however it needs
4714 * them to be able to access the drive. So, create descriptors for each of the
4715 * tables and attach the current disk content. vmdkCreateRawImage() will later
4716 * write the content to the VMDK. Any changes the guest later makes to the
4717 * partition tables will then go to the VMDK copy, rather than the host drive.
4718 *
4719 * @returns VBox status code, error message has been set on failure.
4720 *
4721 * @note Caller is assumed to clean up @a pRawDesc
4722 * @internal
4723 */
4724static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4725 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4726{
4727 /*
4728 * Query the locations.
4729 */
4730 /* Determin how many locations there are: */
4731 size_t cLocations = 0;
4732 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4733 if (rc != VERR_BUFFER_OVERFLOW)
4734 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4735 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4736 pImage->pszFilename, pszRawDrive, rc);
4737 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4738
4739 /* We can allocate the partition descriptors here to save an intentation level. */
4740 PVDISKRAWPARTDESC paPartDescs = NULL;
4741 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4742 AssertRCReturn(rc, rc);
4743
4744 /* Allocate the result table and repeat the location table query: */
4745 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4746 if (!paLocations)
4747 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4748 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4749 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4750 if (RT_SUCCESS(rc))
4751 {
4752 /*
4753 * Translate them into descriptors.
4754 *
4755 * We restrict the amount of partition alignment padding to 4MiB as more
4756 * will just be a waste of space. The use case for including the padding
4757 * are older boot loaders and boot manager (including one by a team member)
4758 * that put data and code in the 62 sectors between the MBR and the first
4759 * partition (total of 63). Later CHS was abandond and partition started
4760 * being aligned on power of two sector boundraries (typically 64KiB or
4761 * 1MiB depending on the media size).
4762 */
4763 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4764 {
4765 Assert(paLocations[i].cb > 0);
4766 if (paLocations[i].cb <= _64M)
4767 {
4768 /* Create the partition descriptor entry: */
4769 //paPartDescs[i].pszRawDevice = NULL;
4770 //paPartDescs[i].offStartInDevice = 0;
4771 //paPartDescs[i].uFlags = 0;
4772 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4773 paPartDescs[i].cbData = paLocations[i].cb;
4774 if (paPartDescs[i].cbData < _4M)
4775 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4776 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4777 if (paPartDescs[i].pvPartitionData)
4778 {
4779 /* Read the content from the drive: */
4780 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4781 (size_t)paPartDescs[i].cbData, NULL);
4782 if (RT_SUCCESS(rc))
4783 {
4784 /* Do we have custom boot sector code? */
4785 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4786 {
4787 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4788 Instead we fail as we weren't able to do what the user requested us to do.
4789 Better if the user knows than starts questioning why the guest isn't
4790 booting as expected. */
4791 if (cbBootSector <= paPartDescs[i].cbData)
4792 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4793 else
4794 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4795 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4796 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4797 }
4798 }
4799 else
4800 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4801 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4802 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4803 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4804 }
4805 else
4806 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4807 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4808 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4809 }
4810 else
4811 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4812 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4813 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4814 }
4815 }
4816 else
4817 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4818 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4819 pImage->pszFilename, pszRawDrive, rc);
4820 RTMemFree(paLocations);
4821 return rc;
4822}
4823
4824/**
4825 * Opens the volume manager for the raw drive when in selected-partition mode.
4826 *
4827 * @param pImage The VMDK image (for errors).
4828 * @param hRawDrive The raw drive handle.
4829 * @param pszRawDrive The raw drive device path (for errors).
4830 * @param cbSector The sector size.
4831 * @param phVolMgr Where to return the handle to the volume manager on
4832 * success.
4833 * @returns VBox status code, errors have been reported.
4834 * @internal
4835 */
4836static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4837{
4838 *phVolMgr = NIL_RTDVM;
4839
4840 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4841 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4842 if (RT_FAILURE(rc))
4843 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4844 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4845 pImage->pszFilename, pszRawDrive, rc);
4846
4847 RTDVM hVolMgr = NIL_RTDVM;
4848 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4849
4850 RTVfsFileRelease(hVfsFile);
4851
4852 if (RT_FAILURE(rc))
4853 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4854 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4855 pImage->pszFilename, pszRawDrive, rc);
4856
4857 rc = RTDvmMapOpen(hVolMgr);
4858 if (RT_SUCCESS(rc))
4859 {
4860 *phVolMgr = hVolMgr;
4861 return VINF_SUCCESS;
4862 }
4863 RTDvmRelease(hVolMgr);
4864 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4865 pImage->pszFilename, pszRawDrive, rc);
4866}
4867
4868/**
4869 * Opens the raw drive device and get the sizes for it.
4870 *
4871 * @param pImage The image (for error reporting).
4872 * @param pszRawDrive The device/whatever to open.
4873 * @param phRawDrive Where to return the file handle.
4874 * @param pcbRawDrive Where to return the size.
4875 * @param pcbSector Where to return the sector size.
4876 * @returns IPRT status code, errors have been reported.
4877 * @internal
4878 */
4879static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4880 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4881{
4882 /*
4883 * Open the device for the raw drive.
4884 */
4885 RTFILE hRawDrive = NIL_RTFILE;
4886 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4887 if (RT_FAILURE(rc))
4888 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4889 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4890 pImage->pszFilename, pszRawDrive, rc);
4891
4892 /*
4893 * Get the sector size.
4894 */
4895 uint32_t cbSector = 0;
4896 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4897 if (RT_SUCCESS(rc))
4898 {
4899 /* sanity checks */
4900 if ( cbSector >= 512
4901 && cbSector <= _64K
4902 && RT_IS_POWER_OF_TWO(cbSector))
4903 {
4904 /*
4905 * Get the size.
4906 */
4907 uint64_t cbRawDrive = 0;
4908 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4909 if (RT_SUCCESS(rc))
4910 {
4911 /* Check whether cbSize is actually sensible. */
4912 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4913 {
4914 *phRawDrive = hRawDrive;
4915 *pcbRawDrive = cbRawDrive;
4916 *pcbSector = cbSector;
4917 return VINF_SUCCESS;
4918 }
4919 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4920 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4921 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4922 }
4923 else
4924 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4925 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4926 pImage->pszFilename, pszRawDrive, rc);
4927 }
4928 else
4929 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4930 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4931 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4932 }
4933 else
4934 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4935 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4936 pImage->pszFilename, pszRawDrive, rc);
4937 RTFileClose(hRawDrive);
4938 return rc;
4939}
4940
4941/**
4942 * Reads the raw disk configuration, leaving initalization and cleanup to the
4943 * caller (regardless of return status).
4944 *
4945 * @returns VBox status code, errors properly reported.
4946 * @internal
4947 */
4948static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4949 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4950 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4951 char **ppszFreeMe)
4952{
4953 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4954 if (!pImgCfg)
4955 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4956 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4957
4958 /*
4959 * RawDrive = path
4960 */
4961 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4962 if (RT_FAILURE(rc))
4963 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4964 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4965 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4966
4967 /*
4968 * Partitions=n[r][,...]
4969 */
4970 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4971 *pfPartitions = *pfPartitionsReadOnly = 0;
4972
4973 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4974 if (RT_SUCCESS(rc))
4975 {
4976 char *psz = *ppszFreeMe;
4977 while (*psz != '\0')
4978 {
4979 char *pszNext;
4980 uint32_t u32;
4981 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4982 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4983 rc = -rc;
4984 if (RT_FAILURE(rc))
4985 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4986 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4987 pImage->pszFilename, rc, psz);
4988 if (u32 >= cMaxPartitionBits)
4989 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4990 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4991 pImage->pszFilename, u32, cMaxPartitionBits);
4992 *pfPartitions |= RT_BIT_32(u32);
4993 psz = pszNext;
4994 if (*psz == 'r')
4995 {
4996 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4997 psz++;
4998 }
4999 if (*psz == ',')
5000 psz++;
5001 else if (*psz != '\0')
5002 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5003 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
5004 pImage->pszFilename, psz);
5005 }
5006
5007 RTStrFree(*ppszFreeMe);
5008 *ppszFreeMe = NULL;
5009 }
5010 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5011 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5012 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5013
5014 /*
5015 * BootSector=base64
5016 */
5017 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
5018 if (RT_SUCCESS(rc))
5019 {
5020 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
5021 if (cbBootSector < 0)
5022 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
5023 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
5024 pImage->pszFilename, *ppszRawDrive);
5025 if (cbBootSector == 0)
5026 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5027 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
5028 pImage->pszFilename, *ppszRawDrive);
5029 if (cbBootSector > _4M) /* this is just a preliminary max */
5030 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5031 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
5032 pImage->pszFilename, *ppszRawDrive, cbBootSector);
5033
5034 /* Refuse the boot sector if whole-drive. This used to be done quietly,
5035 however, bird disagrees and thinks the user should be told that what
5036 he/she/it tries to do isn't possible. There should be less head
5037 scratching this way when the guest doesn't do the expected thing. */
5038 if (!*pfPartitions)
5039 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5040 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
5041 pImage->pszFilename, *ppszRawDrive);
5042
5043 *pcbBootSector = (size_t)cbBootSector;
5044 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
5045 if (!*ppvBootSector)
5046 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5047 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
5048 pImage->pszFilename, cbBootSector, *ppszRawDrive);
5049
5050 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
5051 if (RT_FAILURE(rc))
5052 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5053 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
5054 pImage->pszFilename, *ppszRawDrive, rc);
5055
5056 RTStrFree(*ppszFreeMe);
5057 *ppszFreeMe = NULL;
5058 }
5059 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5060 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5061 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5062
5063 /*
5064 * Relative=0/1
5065 */
5066 *pfRelative = false;
5067 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
5068 if (RT_SUCCESS(rc))
5069 {
5070 if (!*pfPartitions && *pfRelative != false)
5071 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5072 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
5073 pImage->pszFilename);
5074#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
5075 if (*pfRelative == true)
5076 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5077 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
5078 pImage->pszFilename);
5079#endif
5080 }
5081 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5082 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5083 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5084 else
5085#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
5086 *pfRelative = true;
5087#else
5088 *pfRelative = false;
5089#endif
5090
5091 return VINF_SUCCESS;
5092}
5093
5094/**
5095 * Creates a raw drive (nee disk) descriptor.
5096 *
5097 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
5098 * here much later. That's one of the reasons why we produce a descriptor just
5099 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
5100 *
5101 * @returns VBox status code.
5102 * @param pImage The image.
5103 * @param ppRaw Where to return the raw drive descriptor. Caller must
5104 * free it using vmdkRawDescFree regardless of the status
5105 * code.
5106 * @internal
5107 */
5108static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
5109{
5110 /* Make sure it's NULL. */
5111 *ppRaw = NULL;
5112
5113 /*
5114 * Read the configuration.
5115 */
5116 char *pszRawDrive = NULL;
5117 uint32_t fPartitions = 0; /* zero if whole-drive */
5118 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
5119 void *pvBootSector = NULL;
5120 size_t cbBootSector = 0;
5121 bool fRelative = false;
5122 char *pszFreeMe = NULL; /* lazy bird cleanup. */
5123 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
5124 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
5125 RTStrFree(pszFreeMe);
5126 if (RT_SUCCESS(rc))
5127 {
5128 /*
5129 * Open the device, getting the sector size and drive size.
5130 */
5131 uint64_t cbSize = 0;
5132 uint32_t cbSector = 0;
5133 RTFILE hRawDrive = NIL_RTFILE;
5134 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
5135 if (RT_SUCCESS(rc))
5136 {
5137 pImage->cbSize = cbSize;
5138 /*
5139 * Create the raw-drive descriptor
5140 */
5141 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
5142 if (pRawDesc)
5143 {
5144 pRawDesc->szSignature[0] = 'R';
5145 pRawDesc->szSignature[1] = 'A';
5146 pRawDesc->szSignature[2] = 'W';
5147 //pRawDesc->szSignature[3] = '\0';
5148 if (!fPartitions)
5149 {
5150 /*
5151 * It's simple for when doing the whole drive.
5152 */
5153 pRawDesc->uFlags = VDISKRAW_DISK;
5154 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
5155 }
5156 else
5157 {
5158 /*
5159 * In selected partitions mode we've got a lot more work ahead of us.
5160 */
5161 pRawDesc->uFlags = VDISKRAW_NORMAL;
5162 //pRawDesc->pszRawDisk = NULL;
5163 //pRawDesc->cPartDescs = 0;
5164 //pRawDesc->pPartDescs = NULL;
5165
5166 /* We need to parse the partition map to complete the descriptor: */
5167 RTDVM hVolMgr = NIL_RTDVM;
5168 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
5169 if (RT_SUCCESS(rc))
5170 {
5171 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
5172 if ( enmFormatType == RTDVMFORMATTYPE_MBR
5173 || enmFormatType == RTDVMFORMATTYPE_GPT)
5174 {
5175 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
5176 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
5177
5178 /* Add copies of the partition tables: */
5179 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
5180 pvBootSector, cbBootSector);
5181 if (RT_SUCCESS(rc))
5182 {
5183 /* Add descriptors for the partitions/volumes, indicating which
5184 should be accessible and how to access them: */
5185 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
5186 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
5187 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
5188 RTDvmVolumeRelease(hVolRelease);
5189
5190 /* Finally, sort the partition and check consistency (overlaps, etc): */
5191 if (RT_SUCCESS(rc))
5192 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
5193 }
5194 }
5195 else
5196 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5197 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
5198 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
5199 RTDvmRelease(hVolMgr);
5200 }
5201 }
5202 if (RT_SUCCESS(rc))
5203 {
5204 /*
5205 * We succeeded.
5206 */
5207 *ppRaw = pRawDesc;
5208 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
5209 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
5210 if (pRawDesc->cPartDescs)
5211 {
5212 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
5213 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
5214 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
5215 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
5216 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
5217 }
5218 }
5219 else
5220 vmdkRawDescFree(pRawDesc);
5221 }
5222 else
5223 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5224 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
5225 pImage->pszFilename, sizeof(*pRawDesc));
5226 RTFileClose(hRawDrive);
5227 }
5228 }
5229 RTStrFree(pszRawDrive);
5230 RTMemFree(pvBootSector);
5231 return rc;
5232}
5233
5234/**
5235 * Internal: create VMDK images for raw disk/partition access.
5236 */
5237static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
5238 uint64_t cbSize)
5239{
5240 int rc = VINF_SUCCESS;
5241 PVMDKEXTENT pExtent;
5242
5243 if (pRaw->uFlags & VDISKRAW_DISK)
5244 {
5245 /* Full raw disk access. This requires setting up a descriptor
5246 * file and open the (flat) raw disk. */
5247 rc = vmdkCreateExtents(pImage, 1);
5248 if (RT_FAILURE(rc))
5249 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5250 pExtent = &pImage->pExtents[0];
5251 /* Create raw disk descriptor file. */
5252 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5253 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5254 true /* fCreate */));
5255 if (RT_FAILURE(rc))
5256 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5257
5258 /* Set up basename for extent description. Cannot use StrDup. */
5259 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
5260 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5261 if (!pszBasename)
5262 return VERR_NO_MEMORY;
5263 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
5264 pExtent->pszBasename = pszBasename;
5265 /* For raw disks the full name is identical to the base name. */
5266 pExtent->pszFullname = RTStrDup(pszBasename);
5267 if (!pExtent->pszFullname)
5268 return VERR_NO_MEMORY;
5269 pExtent->enmType = VMDKETYPE_FLAT;
5270 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5271 pExtent->uSectorOffset = 0;
5272 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5273 pExtent->fMetaDirty = false;
5274
5275 /* Open flat image, the raw disk. */
5276 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5277 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5278 false /* fCreate */));
5279 if (RT_FAILURE(rc))
5280 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
5281 }
5282 else
5283 {
5284 /* Raw partition access. This requires setting up a descriptor
5285 * file, write the partition information to a flat extent and
5286 * open all the (flat) raw disk partitions. */
5287
5288 /* First pass over the partition data areas to determine how many
5289 * extents we need. One data area can require up to 2 extents, as
5290 * it might be necessary to skip over unpartitioned space. */
5291 unsigned cExtents = 0;
5292 uint64_t uStart = 0;
5293 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5294 {
5295 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5296 if (uStart > pPart->offStartInVDisk)
5297 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5298 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
5299
5300 if (uStart < pPart->offStartInVDisk)
5301 cExtents++;
5302 uStart = pPart->offStartInVDisk + pPart->cbData;
5303 cExtents++;
5304 }
5305 /* Another extent for filling up the rest of the image. */
5306 if (uStart != cbSize)
5307 cExtents++;
5308
5309 rc = vmdkCreateExtents(pImage, cExtents);
5310 if (RT_FAILURE(rc))
5311 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5312
5313 /* Create raw partition descriptor file. */
5314 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5315 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5316 true /* fCreate */));
5317 if (RT_FAILURE(rc))
5318 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5319
5320 /* Create base filename for the partition table extent. */
5321 /** @todo remove fixed buffer without creating memory leaks. */
5322 char pszPartition[1024];
5323 const char *pszBase = RTPathFilename(pImage->pszFilename);
5324 const char *pszSuff = RTPathSuffix(pszBase);
5325 if (pszSuff == NULL)
5326 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
5327 char *pszBaseBase = RTStrDup(pszBase);
5328 if (!pszBaseBase)
5329 return VERR_NO_MEMORY;
5330 RTPathStripSuffix(pszBaseBase);
5331 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
5332 pszBaseBase, pszSuff);
5333 RTStrFree(pszBaseBase);
5334
5335 /* Second pass over the partitions, now define all extents. */
5336 uint64_t uPartOffset = 0;
5337 cExtents = 0;
5338 uStart = 0;
5339 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5340 {
5341 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5342 pExtent = &pImage->pExtents[cExtents++];
5343
5344 if (uStart < pPart->offStartInVDisk)
5345 {
5346 pExtent->pszBasename = NULL;
5347 pExtent->pszFullname = NULL;
5348 pExtent->enmType = VMDKETYPE_ZERO;
5349 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
5350 pExtent->uSectorOffset = 0;
5351 pExtent->enmAccess = VMDKACCESS_READWRITE;
5352 pExtent->fMetaDirty = false;
5353 /* go to next extent */
5354 pExtent = &pImage->pExtents[cExtents++];
5355 }
5356 uStart = pPart->offStartInVDisk + pPart->cbData;
5357
5358 if (pPart->pvPartitionData)
5359 {
5360 /* Set up basename for extent description. Can't use StrDup. */
5361 size_t cbBasename = strlen(pszPartition) + 1;
5362 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5363 if (!pszBasename)
5364 return VERR_NO_MEMORY;
5365 memcpy(pszBasename, pszPartition, cbBasename);
5366 pExtent->pszBasename = pszBasename;
5367
5368 /* Set up full name for partition extent. */
5369 char *pszDirname = RTStrDup(pImage->pszFilename);
5370 if (!pszDirname)
5371 return VERR_NO_STR_MEMORY;
5372 RTPathStripFilename(pszDirname);
5373 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
5374 RTStrFree(pszDirname);
5375 if (!pszFullname)
5376 return VERR_NO_STR_MEMORY;
5377 pExtent->pszFullname = pszFullname;
5378 pExtent->enmType = VMDKETYPE_FLAT;
5379 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5380 pExtent->uSectorOffset = uPartOffset;
5381 pExtent->enmAccess = VMDKACCESS_READWRITE;
5382 pExtent->fMetaDirty = false;
5383
5384 /* Create partition table flat image. */
5385 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5386 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5387 true /* fCreate */));
5388 if (RT_FAILURE(rc))
5389 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
5390 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5391 VMDK_SECTOR2BYTE(uPartOffset),
5392 pPart->pvPartitionData,
5393 pPart->cbData);
5394 if (RT_FAILURE(rc))
5395 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
5396 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
5397 }
5398 else
5399 {
5400 if (pPart->pszRawDevice)
5401 {
5402 /* Set up basename for extent descr. Can't use StrDup. */
5403 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
5404 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5405 if (!pszBasename)
5406 return VERR_NO_MEMORY;
5407 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
5408 pExtent->pszBasename = pszBasename;
5409 /* For raw disks full name is identical to base name. */
5410 pExtent->pszFullname = RTStrDup(pszBasename);
5411 if (!pExtent->pszFullname)
5412 return VERR_NO_MEMORY;
5413 pExtent->enmType = VMDKETYPE_FLAT;
5414 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5415 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
5416 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5417 pExtent->fMetaDirty = false;
5418
5419 /* Open flat image, the raw partition. */
5420 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5421 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5422 false /* fCreate */));
5423 if (RT_FAILURE(rc))
5424 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
5425 }
5426 else
5427 {
5428 pExtent->pszBasename = NULL;
5429 pExtent->pszFullname = NULL;
5430 pExtent->enmType = VMDKETYPE_ZERO;
5431 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5432 pExtent->uSectorOffset = 0;
5433 pExtent->enmAccess = VMDKACCESS_READWRITE;
5434 pExtent->fMetaDirty = false;
5435 }
5436 }
5437 }
5438 /* Another extent for filling up the rest of the image. */
5439 if (uStart != cbSize)
5440 {
5441 pExtent = &pImage->pExtents[cExtents++];
5442 pExtent->pszBasename = NULL;
5443 pExtent->pszFullname = NULL;
5444 pExtent->enmType = VMDKETYPE_ZERO;
5445 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
5446 pExtent->uSectorOffset = 0;
5447 pExtent->enmAccess = VMDKACCESS_READWRITE;
5448 pExtent->fMetaDirty = false;
5449 }
5450 }
5451
5452 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5453 (pRaw->uFlags & VDISKRAW_DISK) ?
5454 "fullDevice" : "partitionedDevice");
5455 if (RT_FAILURE(rc))
5456 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5457 return rc;
5458}
5459
5460/**
5461 * Internal: create a regular (i.e. file-backed) VMDK image.
5462 */
5463static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
5464 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
5465 unsigned uPercentStart, unsigned uPercentSpan)
5466{
5467 int rc = VINF_SUCCESS;
5468 unsigned cExtents = 1;
5469 uint64_t cbOffset = 0;
5470 uint64_t cbRemaining = cbSize;
5471
5472 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5473 {
5474 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
5475 /* Do proper extent computation: need one smaller extent if the total
5476 * size isn't evenly divisible by the split size. */
5477 if (cbSize % VMDK_2G_SPLIT_SIZE)
5478 cExtents++;
5479 }
5480 rc = vmdkCreateExtents(pImage, cExtents);
5481 if (RT_FAILURE(rc))
5482 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5483
5484 /* Basename strings needed for constructing the extent names. */
5485 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5486 AssertPtr(pszBasenameSubstr);
5487 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5488
5489 /* Create separate descriptor file if necessary. */
5490 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
5491 {
5492 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5493 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5494 true /* fCreate */));
5495 if (RT_FAILURE(rc))
5496 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
5497 }
5498 else
5499 pImage->pFile = NULL;
5500
5501 /* Set up all extents. */
5502 for (unsigned i = 0; i < cExtents; i++)
5503 {
5504 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5505 uint64_t cbExtent = cbRemaining;
5506
5507 /* Set up fullname/basename for extent description. Cannot use StrDup
5508 * for basename, as it is not guaranteed that the memory can be freed
5509 * with RTMemTmpFree, which must be used as in other code paths
5510 * StrDup is not usable. */
5511 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5512 {
5513 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5514 if (!pszBasename)
5515 return VERR_NO_MEMORY;
5516 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5517 pExtent->pszBasename = pszBasename;
5518 }
5519 else
5520 {
5521 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5522 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5523 RTPathStripSuffix(pszBasenameBase);
5524 char *pszTmp;
5525 size_t cbTmp;
5526 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5527 {
5528 if (cExtents == 1)
5529 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5530 pszBasenameSuff);
5531 else
5532 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5533 i+1, pszBasenameSuff);
5534 }
5535 else
5536 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5537 pszBasenameSuff);
5538 RTStrFree(pszBasenameBase);
5539 if (!pszTmp)
5540 return VERR_NO_STR_MEMORY;
5541 cbTmp = strlen(pszTmp) + 1;
5542 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5543 if (!pszBasename)
5544 {
5545 RTStrFree(pszTmp);
5546 return VERR_NO_MEMORY;
5547 }
5548 memcpy(pszBasename, pszTmp, cbTmp);
5549 RTStrFree(pszTmp);
5550 pExtent->pszBasename = pszBasename;
5551 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5552 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5553 }
5554 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5555 if (!pszBasedirectory)
5556 return VERR_NO_STR_MEMORY;
5557 RTPathStripFilename(pszBasedirectory);
5558 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5559 RTStrFree(pszBasedirectory);
5560 if (!pszFullname)
5561 return VERR_NO_STR_MEMORY;
5562 pExtent->pszFullname = pszFullname;
5563
5564 /* Create file for extent. */
5565 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5566 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5567 true /* fCreate */));
5568 if (RT_FAILURE(rc))
5569 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5570 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5571 {
5572 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5573 0 /* fFlags */, pIfProgress,
5574 uPercentStart + cbOffset * uPercentSpan / cbSize,
5575 cbExtent * uPercentSpan / cbSize);
5576 if (RT_FAILURE(rc))
5577 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5578 }
5579
5580 /* Place descriptor file information (where integrated). */
5581 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5582 {
5583 pExtent->uDescriptorSector = 1;
5584 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5585 /* The descriptor is part of the (only) extent. */
5586 pExtent->pDescData = pImage->pDescData;
5587 pImage->pDescData = NULL;
5588 }
5589
5590 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5591 {
5592 uint64_t cSectorsPerGDE, cSectorsPerGD;
5593 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5594 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5595 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5596 pExtent->cGTEntries = 512;
5597 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5598 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5599 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5600 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5601 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5602 {
5603 /* The spec says version is 1 for all VMDKs, but the vast
5604 * majority of streamOptimized VMDKs actually contain
5605 * version 3 - so go with the majority. Both are accepted. */
5606 pExtent->uVersion = 3;
5607 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5608 }
5609 }
5610 else
5611 {
5612 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5613 pExtent->enmType = VMDKETYPE_VMFS;
5614 else
5615 pExtent->enmType = VMDKETYPE_FLAT;
5616 }
5617
5618 pExtent->enmAccess = VMDKACCESS_READWRITE;
5619 pExtent->fUncleanShutdown = true;
5620 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5621 pExtent->uSectorOffset = 0;
5622 pExtent->fMetaDirty = true;
5623
5624 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5625 {
5626 /* fPreAlloc should never be false because VMware can't use such images. */
5627 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5628 RT_MAX( pExtent->uDescriptorSector
5629 + pExtent->cDescriptorSectors,
5630 1),
5631 true /* fPreAlloc */);
5632 if (RT_FAILURE(rc))
5633 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5634 }
5635
5636 cbOffset += cbExtent;
5637
5638 if (RT_SUCCESS(rc))
5639 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5640
5641 cbRemaining -= cbExtent;
5642 }
5643
5644 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5645 {
5646 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5647 * controller type is set in an image. */
5648 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5649 if (RT_FAILURE(rc))
5650 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5651 }
5652
5653 const char *pszDescType = NULL;
5654 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5655 {
5656 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5657 pszDescType = "vmfs";
5658 else
5659 pszDescType = (cExtents == 1)
5660 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5661 }
5662 else
5663 {
5664 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5665 pszDescType = "streamOptimized";
5666 else
5667 {
5668 pszDescType = (cExtents == 1)
5669 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5670 }
5671 }
5672 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5673 pszDescType);
5674 if (RT_FAILURE(rc))
5675 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5676 return rc;
5677}
5678
5679/**
5680 * Internal: Create a real stream optimized VMDK using only linear writes.
5681 */
5682static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5683{
5684 int rc = vmdkCreateExtents(pImage, 1);
5685 if (RT_FAILURE(rc))
5686 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5687
5688 /* Basename strings needed for constructing the extent names. */
5689 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5690 AssertPtr(pszBasenameSubstr);
5691 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5692
5693 /* No separate descriptor file. */
5694 pImage->pFile = NULL;
5695
5696 /* Set up all extents. */
5697 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5698
5699 /* Set up fullname/basename for extent description. Cannot use StrDup
5700 * for basename, as it is not guaranteed that the memory can be freed
5701 * with RTMemTmpFree, which must be used as in other code paths
5702 * StrDup is not usable. */
5703 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5704 if (!pszBasename)
5705 return VERR_NO_MEMORY;
5706 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5707 pExtent->pszBasename = pszBasename;
5708
5709 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5710 RTPathStripFilename(pszBasedirectory);
5711 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5712 RTStrFree(pszBasedirectory);
5713 if (!pszFullname)
5714 return VERR_NO_STR_MEMORY;
5715 pExtent->pszFullname = pszFullname;
5716
5717 /* Create file for extent. Make it write only, no reading allowed. */
5718 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5719 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5720 true /* fCreate */)
5721 & ~RTFILE_O_READ);
5722 if (RT_FAILURE(rc))
5723 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5724
5725 /* Place descriptor file information. */
5726 pExtent->uDescriptorSector = 1;
5727 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5728 /* The descriptor is part of the (only) extent. */
5729 pExtent->pDescData = pImage->pDescData;
5730 pImage->pDescData = NULL;
5731
5732 uint64_t cSectorsPerGDE, cSectorsPerGD;
5733 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5734 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5735 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5736 pExtent->cGTEntries = 512;
5737 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5738 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5739 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5740 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5741
5742 /* The spec says version is 1 for all VMDKs, but the vast
5743 * majority of streamOptimized VMDKs actually contain
5744 * version 3 - so go with the majority. Both are accepted. */
5745 pExtent->uVersion = 3;
5746 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5747 pExtent->fFooter = true;
5748
5749 pExtent->enmAccess = VMDKACCESS_READONLY;
5750 pExtent->fUncleanShutdown = false;
5751 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5752 pExtent->uSectorOffset = 0;
5753 pExtent->fMetaDirty = true;
5754
5755 /* Create grain directory, without preallocating it straight away. It will
5756 * be constructed on the fly when writing out the data and written when
5757 * closing the image. The end effect is that the full grain directory is
5758 * allocated, which is a requirement of the VMDK specs. */
5759 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5760 false /* fPreAlloc */);
5761 if (RT_FAILURE(rc))
5762 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5763
5764 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5765 "streamOptimized");
5766 if (RT_FAILURE(rc))
5767 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5768
5769 return rc;
5770}
5771
5772/**
5773 * Initializes the UUID fields in the DDB.
5774 *
5775 * @returns VBox status code.
5776 * @param pImage The VMDK image instance.
5777 */
5778static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5779{
5780 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5781 if (RT_SUCCESS(rc))
5782 {
5783 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5784 if (RT_SUCCESS(rc))
5785 {
5786 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5787 &pImage->ModificationUuid);
5788 if (RT_SUCCESS(rc))
5789 {
5790 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5791 &pImage->ParentModificationUuid);
5792 if (RT_FAILURE(rc))
5793 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5794 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5795 }
5796 else
5797 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5798 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5799 }
5800 else
5801 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5802 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5803 }
5804 else
5805 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5806 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5807
5808 return rc;
5809}
5810
5811/**
5812 * Internal: The actual code for creating any VMDK variant currently in
5813 * existence on hosted environments.
5814 */
5815static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5816 unsigned uImageFlags, const char *pszComment,
5817 PCVDGEOMETRY pPCHSGeometry,
5818 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5819 PVDINTERFACEPROGRESS pIfProgress,
5820 unsigned uPercentStart, unsigned uPercentSpan)
5821{
5822 pImage->uImageFlags = uImageFlags;
5823
5824 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5825 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5826 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5827
5828 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5829 &pImage->Descriptor);
5830 if (RT_SUCCESS(rc))
5831 {
5832 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5833 {
5834 /* Raw disk image (includes raw partition). */
5835 PVDISKRAW pRaw = NULL;
5836 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5837 if (RT_FAILURE(rc))
5838 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"),
5839 pImage->pszFilename);
5840 if (!cbSize)
5841 cbSize = pImage->cbSize;
5842
5843 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5844 vmdkRawDescFree(pRaw);
5845 }
5846 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5847 {
5848 /* Stream optimized sparse image (monolithic). */
5849 rc = vmdkCreateStreamImage(pImage, cbSize);
5850 }
5851 else
5852 {
5853 /* Regular fixed or sparse image (monolithic or split). */
5854 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5855 pIfProgress, uPercentStart,
5856 uPercentSpan * 95 / 100);
5857 }
5858
5859 if (RT_SUCCESS(rc))
5860 {
5861 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5862
5863 pImage->cbSize = cbSize;
5864
5865 for (unsigned i = 0; i < pImage->cExtents; i++)
5866 {
5867 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5868
5869 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5870 pExtent->cNominalSectors, pExtent->enmType,
5871 pExtent->pszBasename, pExtent->uSectorOffset);
5872 if (RT_FAILURE(rc))
5873 {
5874 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5875 break;
5876 }
5877 }
5878
5879 if (RT_SUCCESS(rc))
5880 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5881
5882 pImage->LCHSGeometry = *pLCHSGeometry;
5883 pImage->PCHSGeometry = *pPCHSGeometry;
5884
5885 if (RT_SUCCESS(rc))
5886 {
5887 if ( pPCHSGeometry->cCylinders != 0
5888 && pPCHSGeometry->cHeads != 0
5889 && pPCHSGeometry->cSectors != 0)
5890 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5891 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5892 {
5893 VDGEOMETRY RawDiskPCHSGeometry;
5894 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383);
5895 RawDiskPCHSGeometry.cHeads = 16;
5896 RawDiskPCHSGeometry.cSectors = 63;
5897 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry);
5898 }
5899 }
5900
5901 if ( RT_SUCCESS(rc)
5902 && pLCHSGeometry->cCylinders != 0
5903 && pLCHSGeometry->cHeads != 0
5904 && pLCHSGeometry->cSectors != 0)
5905 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5906
5907 pImage->ImageUuid = *pUuid;
5908 RTUuidClear(&pImage->ParentUuid);
5909 RTUuidClear(&pImage->ModificationUuid);
5910 RTUuidClear(&pImage->ParentModificationUuid);
5911
5912 if (RT_SUCCESS(rc))
5913 rc = vmdkCreateImageDdbUuidsInit(pImage);
5914
5915 if (RT_SUCCESS(rc))
5916 rc = vmdkAllocateGrainTableCache(pImage);
5917
5918 if (RT_SUCCESS(rc))
5919 {
5920 rc = vmdkSetImageComment(pImage, pszComment);
5921 if (RT_FAILURE(rc))
5922 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5923 }
5924
5925 if (RT_SUCCESS(rc))
5926 {
5927 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5928
5929 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5930 {
5931 /* streamOptimized is a bit special, we cannot trigger the flush
5932 * until all data has been written. So we write the necessary
5933 * information explicitly. */
5934 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5935 - pImage->Descriptor.aLines[0], 512));
5936 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5937 if (RT_SUCCESS(rc))
5938 {
5939 rc = vmdkWriteDescriptor(pImage, NULL);
5940 if (RT_FAILURE(rc))
5941 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5942 }
5943 else
5944 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5945 }
5946 else
5947 rc = vmdkFlushImage(pImage, NULL);
5948 }
5949 }
5950 }
5951 else
5952 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5953
5954
5955 if (RT_SUCCESS(rc))
5956 {
5957 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5958 pImage->RegionList.fFlags = 0;
5959 pImage->RegionList.cRegions = 1;
5960
5961 pRegion->offRegion = 0; /* Disk start. */
5962 pRegion->cbBlock = 512;
5963 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5964 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5965 pRegion->cbData = 512;
5966 pRegion->cbMetadata = 0;
5967 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5968
5969 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5970 }
5971 else
5972 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5973 return rc;
5974}
5975
5976/**
5977 * Internal: Update image comment.
5978 */
5979static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5980{
5981 char *pszCommentEncoded = NULL;
5982 if (pszComment)
5983 {
5984 pszCommentEncoded = vmdkEncodeString(pszComment);
5985 if (!pszCommentEncoded)
5986 return VERR_NO_MEMORY;
5987 }
5988
5989 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5990 "ddb.comment", pszCommentEncoded);
5991 if (pszCommentEncoded)
5992 RTStrFree(pszCommentEncoded);
5993 if (RT_FAILURE(rc))
5994 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5995 return VINF_SUCCESS;
5996}
5997
5998/**
5999 * Internal. Clear the grain table buffer for real stream optimized writing.
6000 */
6001static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
6002{
6003 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6004 for (uint32_t i = 0; i < cCacheLines; i++)
6005 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
6006 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6007}
6008
6009/**
6010 * Internal. Flush the grain table buffer for real stream optimized writing.
6011 */
6012static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6013 uint32_t uGDEntry)
6014{
6015 int rc = VINF_SUCCESS;
6016 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6017
6018 /* VMware does not write out completely empty grain tables in the case
6019 * of streamOptimized images, which according to my interpretation of
6020 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
6021 * handle it without problems do it the same way and save some bytes. */
6022 bool fAllZero = true;
6023 for (uint32_t i = 0; i < cCacheLines; i++)
6024 {
6025 /* Convert the grain table to little endian in place, as it will not
6026 * be used at all after this function has been called. */
6027 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6028 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6029 if (*pGTTmp)
6030 {
6031 fAllZero = false;
6032 break;
6033 }
6034 if (!fAllZero)
6035 break;
6036 }
6037 if (fAllZero)
6038 return VINF_SUCCESS;
6039
6040 uint64_t uFileOffset = pExtent->uAppendPosition;
6041 if (!uFileOffset)
6042 return VERR_INTERNAL_ERROR;
6043 /* Align to sector, as the previous write could have been any size. */
6044 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6045
6046 /* Grain table marker. */
6047 uint8_t aMarker[512];
6048 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6049 memset(pMarker, '\0', sizeof(aMarker));
6050 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
6051 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
6052 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6053 aMarker, sizeof(aMarker));
6054 AssertRC(rc);
6055 uFileOffset += 512;
6056
6057 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
6058 return VERR_INTERNAL_ERROR;
6059
6060 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6061
6062 for (uint32_t i = 0; i < cCacheLines; i++)
6063 {
6064 /* Convert the grain table to little endian in place, as it will not
6065 * be used at all after this function has been called. */
6066 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6067 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6068 *pGTTmp = RT_H2LE_U32(*pGTTmp);
6069
6070 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6071 &pImage->pGTCache->aGTCache[i].aGTData[0],
6072 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6073 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
6074 if (RT_FAILURE(rc))
6075 break;
6076 }
6077 Assert(!(uFileOffset % 512));
6078 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
6079 return rc;
6080}
6081
6082/**
6083 * Internal. Free all allocated space for representing an image, and optionally
6084 * delete the image from disk.
6085 */
6086static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
6087{
6088 int rc = VINF_SUCCESS;
6089
6090 /* Freeing a never allocated image (e.g. because the open failed) is
6091 * not signalled as an error. After all nothing bad happens. */
6092 if (pImage)
6093 {
6094 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6095 {
6096 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6097 {
6098 /* Check if all extents are clean. */
6099 for (unsigned i = 0; i < pImage->cExtents; i++)
6100 {
6101 Assert(!pImage->pExtents[i].fUncleanShutdown);
6102 }
6103 }
6104 else
6105 {
6106 /* Mark all extents as clean. */
6107 for (unsigned i = 0; i < pImage->cExtents; i++)
6108 {
6109 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
6110 && pImage->pExtents[i].fUncleanShutdown)
6111 {
6112 pImage->pExtents[i].fUncleanShutdown = false;
6113 pImage->pExtents[i].fMetaDirty = true;
6114 }
6115
6116 /* From now on it's not safe to append any more data. */
6117 pImage->pExtents[i].uAppendPosition = 0;
6118 }
6119 }
6120 }
6121
6122 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6123 {
6124 /* No need to write any pending data if the file will be deleted
6125 * or if the new file wasn't successfully created. */
6126 if ( !fDelete && pImage->pExtents
6127 && pImage->pExtents[0].cGTEntries
6128 && pImage->pExtents[0].uAppendPosition)
6129 {
6130 PVMDKEXTENT pExtent = &pImage->pExtents[0];
6131 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6132 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6133 AssertRC(rc);
6134 vmdkStreamClearGT(pImage, pExtent);
6135 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
6136 {
6137 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6138 AssertRC(rc);
6139 }
6140
6141 uint64_t uFileOffset = pExtent->uAppendPosition;
6142 if (!uFileOffset)
6143 return VERR_INTERNAL_ERROR;
6144 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6145
6146 /* From now on it's not safe to append any more data. */
6147 pExtent->uAppendPosition = 0;
6148
6149 /* Grain directory marker. */
6150 uint8_t aMarker[512];
6151 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6152 memset(pMarker, '\0', sizeof(aMarker));
6153 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
6154 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
6155 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6156 aMarker, sizeof(aMarker));
6157 AssertRC(rc);
6158 uFileOffset += 512;
6159
6160 /* Write grain directory in little endian style. The array will
6161 * not be used after this, so convert in place. */
6162 uint32_t *pGDTmp = pExtent->pGD;
6163 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
6164 *pGDTmp = RT_H2LE_U32(*pGDTmp);
6165 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6166 uFileOffset, pExtent->pGD,
6167 pExtent->cGDEntries * sizeof(uint32_t));
6168 AssertRC(rc);
6169
6170 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
6171 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
6172 uFileOffset = RT_ALIGN_64( uFileOffset
6173 + pExtent->cGDEntries * sizeof(uint32_t),
6174 512);
6175
6176 /* Footer marker. */
6177 memset(pMarker, '\0', sizeof(aMarker));
6178 pMarker->uSector = VMDK_BYTE2SECTOR(512);
6179 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
6180 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6181 uFileOffset, aMarker, sizeof(aMarker));
6182 AssertRC(rc);
6183
6184 uFileOffset += 512;
6185 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
6186 AssertRC(rc);
6187
6188 uFileOffset += 512;
6189 /* End-of-stream marker. */
6190 memset(pMarker, '\0', sizeof(aMarker));
6191 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6192 uFileOffset, aMarker, sizeof(aMarker));
6193 AssertRC(rc);
6194 }
6195 }
6196 else if (!fDelete && fFlush)
6197 vmdkFlushImage(pImage, NULL);
6198
6199 if (pImage->pExtents != NULL)
6200 {
6201 for (unsigned i = 0 ; i < pImage->cExtents; i++)
6202 {
6203 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
6204 if (RT_SUCCESS(rc))
6205 rc = rc2; /* Propogate any error when closing the file. */
6206 }
6207 RTMemFree(pImage->pExtents);
6208 pImage->pExtents = NULL;
6209 }
6210 pImage->cExtents = 0;
6211 if (pImage->pFile != NULL)
6212 {
6213 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
6214 if (RT_SUCCESS(rc))
6215 rc = rc2; /* Propogate any error when closing the file. */
6216 }
6217 int rc2 = vmdkFileCheckAllClose(pImage);
6218 if (RT_SUCCESS(rc))
6219 rc = rc2; /* Propogate any error when closing the file. */
6220
6221 if (pImage->pGTCache)
6222 {
6223 RTMemFree(pImage->pGTCache);
6224 pImage->pGTCache = NULL;
6225 }
6226 if (pImage->pDescData)
6227 {
6228 RTMemFree(pImage->pDescData);
6229 pImage->pDescData = NULL;
6230 }
6231 }
6232
6233 LogFlowFunc(("returns %Rrc\n", rc));
6234 return rc;
6235}
6236
6237/**
6238 * Internal. Flush image data (and metadata) to disk.
6239 */
6240static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
6241{
6242 PVMDKEXTENT pExtent;
6243 int rc = VINF_SUCCESS;
6244
6245 /* Update descriptor if changed. */
6246 if (pImage->Descriptor.fDirty)
6247 rc = vmdkWriteDescriptor(pImage, pIoCtx);
6248
6249 if (RT_SUCCESS(rc))
6250 {
6251 for (unsigned i = 0; i < pImage->cExtents; i++)
6252 {
6253 pExtent = &pImage->pExtents[i];
6254 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6255 {
6256 switch (pExtent->enmType)
6257 {
6258 case VMDKETYPE_HOSTED_SPARSE:
6259 if (!pExtent->fFooter)
6260 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
6261 else
6262 {
6263 uint64_t uFileOffset = pExtent->uAppendPosition;
6264 /* Simply skip writing anything if the streamOptimized
6265 * image hasn't been just created. */
6266 if (!uFileOffset)
6267 break;
6268 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6269 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
6270 uFileOffset, pIoCtx);
6271 }
6272 break;
6273 case VMDKETYPE_VMFS:
6274 case VMDKETYPE_FLAT:
6275 /* Nothing to do. */
6276 break;
6277 case VMDKETYPE_ZERO:
6278 default:
6279 AssertMsgFailed(("extent with type %d marked as dirty\n",
6280 pExtent->enmType));
6281 break;
6282 }
6283 }
6284
6285 if (RT_FAILURE(rc))
6286 break;
6287
6288 switch (pExtent->enmType)
6289 {
6290 case VMDKETYPE_HOSTED_SPARSE:
6291 case VMDKETYPE_VMFS:
6292 case VMDKETYPE_FLAT:
6293 /** @todo implement proper path absolute check. */
6294 if ( pExtent->pFile != NULL
6295 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6296 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6297 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
6298 NULL, NULL);
6299 break;
6300 case VMDKETYPE_ZERO:
6301 /* No need to do anything for this extent. */
6302 break;
6303 default:
6304 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6305 break;
6306 }
6307 }
6308 }
6309
6310 return rc;
6311}
6312
6313/**
6314 * Internal. Find extent corresponding to the sector number in the disk.
6315 */
6316static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
6317 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
6318{
6319 PVMDKEXTENT pExtent = NULL;
6320 int rc = VINF_SUCCESS;
6321
6322 for (unsigned i = 0; i < pImage->cExtents; i++)
6323 {
6324 if (offSector < pImage->pExtents[i].cNominalSectors)
6325 {
6326 pExtent = &pImage->pExtents[i];
6327 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
6328 break;
6329 }
6330 offSector -= pImage->pExtents[i].cNominalSectors;
6331 }
6332
6333 if (pExtent)
6334 *ppExtent = pExtent;
6335 else
6336 rc = VERR_IO_SECTOR_NOT_FOUND;
6337
6338 return rc;
6339}
6340
6341/**
6342 * Internal. Hash function for placing the grain table hash entries.
6343 */
6344static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
6345 unsigned uExtent)
6346{
6347 /** @todo this hash function is quite simple, maybe use a better one which
6348 * scrambles the bits better. */
6349 return (uSector + uExtent) % pCache->cEntries;
6350}
6351
6352/**
6353 * Internal. Get sector number in the extent file from the relative sector
6354 * number in the extent.
6355 */
6356static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
6357 PVMDKEXTENT pExtent, uint64_t uSector,
6358 uint64_t *puExtentSector)
6359{
6360 PVMDKGTCACHE pCache = pImage->pGTCache;
6361 uint64_t uGDIndex, uGTSector, uGTBlock;
6362 uint32_t uGTHash, uGTBlockIndex;
6363 PVMDKGTCACHEENTRY pGTCacheEntry;
6364 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6365 int rc;
6366
6367 /* For newly created and readonly/sequentially opened streamOptimized
6368 * images this must be a no-op, as the grain directory is not there. */
6369 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6370 && pExtent->uAppendPosition)
6371 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6372 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
6373 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6374 {
6375 *puExtentSector = 0;
6376 return VINF_SUCCESS;
6377 }
6378
6379 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6380 if (uGDIndex >= pExtent->cGDEntries)
6381 return VERR_OUT_OF_RANGE;
6382 uGTSector = pExtent->pGD[uGDIndex];
6383 if (!uGTSector)
6384 {
6385 /* There is no grain table referenced by this grain directory
6386 * entry. So there is absolutely no data in this area. */
6387 *puExtentSector = 0;
6388 return VINF_SUCCESS;
6389 }
6390
6391 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6392 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6393 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6394 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6395 || pGTCacheEntry->uGTBlock != uGTBlock)
6396 {
6397 /* Cache miss, fetch data from disk. */
6398 PVDMETAXFER pMetaXfer;
6399 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6400 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6401 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
6402 if (RT_FAILURE(rc))
6403 return rc;
6404 /* We can release the metadata transfer immediately. */
6405 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6406 pGTCacheEntry->uExtent = pExtent->uExtent;
6407 pGTCacheEntry->uGTBlock = uGTBlock;
6408 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6409 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6410 }
6411 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6412 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
6413 if (uGrainSector)
6414 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
6415 else
6416 *puExtentSector = 0;
6417 return VINF_SUCCESS;
6418}
6419
6420/**
6421 * Internal. Writes the grain and also if necessary the grain tables.
6422 * Uses the grain table cache as a true grain table.
6423 */
6424static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6425 uint64_t uSector, PVDIOCTX pIoCtx,
6426 uint64_t cbWrite)
6427{
6428 uint32_t uGrain;
6429 uint32_t uGDEntry, uLastGDEntry;
6430 uint32_t cbGrain = 0;
6431 uint32_t uCacheLine, uCacheEntry;
6432 const void *pData;
6433 int rc;
6434
6435 /* Very strict requirements: always write at least one full grain, with
6436 * proper alignment. Everything else would require reading of already
6437 * written data, which we don't support for obvious reasons. The only
6438 * exception is the last grain, and only if the image size specifies
6439 * that only some portion holds data. In any case the write must be
6440 * within the image limits, no "overshoot" allowed. */
6441 if ( cbWrite == 0
6442 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
6443 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
6444 || uSector % pExtent->cSectorsPerGrain
6445 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
6446 return VERR_INVALID_PARAMETER;
6447
6448 /* Clip write range to at most the rest of the grain. */
6449 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
6450
6451 /* Do not allow to go back. */
6452 uGrain = uSector / pExtent->cSectorsPerGrain;
6453 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
6454 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
6455 uGDEntry = uGrain / pExtent->cGTEntries;
6456 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6457 if (uGrain < pExtent->uLastGrainAccess)
6458 return VERR_VD_VMDK_INVALID_WRITE;
6459
6460 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
6461 * to allocate something, we also need to detect the situation ourself. */
6462 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
6463 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
6464 return VINF_SUCCESS;
6465
6466 if (uGDEntry != uLastGDEntry)
6467 {
6468 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6469 if (RT_FAILURE(rc))
6470 return rc;
6471 vmdkStreamClearGT(pImage, pExtent);
6472 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
6473 {
6474 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6475 if (RT_FAILURE(rc))
6476 return rc;
6477 }
6478 }
6479
6480 uint64_t uFileOffset;
6481 uFileOffset = pExtent->uAppendPosition;
6482 if (!uFileOffset)
6483 return VERR_INTERNAL_ERROR;
6484 /* Align to sector, as the previous write could have been any size. */
6485 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6486
6487 /* Paranoia check: extent type, grain table buffer presence and
6488 * grain table buffer space. Also grain table entry must be clear. */
6489 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
6490 || !pImage->pGTCache
6491 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
6492 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
6493 return VERR_INTERNAL_ERROR;
6494
6495 /* Update grain table entry. */
6496 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6497
6498 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6499 {
6500 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
6501 memset((char *)pExtent->pvGrain + cbWrite, '\0',
6502 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
6503 pData = pExtent->pvGrain;
6504 }
6505 else
6506 {
6507 RTSGSEG Segment;
6508 unsigned cSegments = 1;
6509 size_t cbSeg = 0;
6510
6511 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6512 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6513 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6514 pData = Segment.pvSeg;
6515 }
6516 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
6517 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6518 uSector, &cbGrain);
6519 if (RT_FAILURE(rc))
6520 {
6521 pExtent->uGrainSectorAbs = 0;
6522 AssertRC(rc);
6523 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6524 }
6525 pExtent->uLastGrainAccess = uGrain;
6526 pExtent->uAppendPosition += cbGrain;
6527
6528 return rc;
6529}
6530
6531/**
6532 * Internal: Updates the grain table during grain allocation.
6533 */
6534static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6535 PVMDKGRAINALLOCASYNC pGrainAlloc)
6536{
6537 int rc = VINF_SUCCESS;
6538 PVMDKGTCACHE pCache = pImage->pGTCache;
6539 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6540 uint32_t uGTHash, uGTBlockIndex;
6541 uint64_t uGTSector, uRGTSector, uGTBlock;
6542 uint64_t uSector = pGrainAlloc->uSector;
6543 PVMDKGTCACHEENTRY pGTCacheEntry;
6544
6545 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
6546 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
6547
6548 uGTSector = pGrainAlloc->uGTSector;
6549 uRGTSector = pGrainAlloc->uRGTSector;
6550 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6551
6552 /* Update the grain table (and the cache). */
6553 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6554 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6555 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6556 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6557 || pGTCacheEntry->uGTBlock != uGTBlock)
6558 {
6559 /* Cache miss, fetch data from disk. */
6560 LogFlow(("Cache miss, fetch data from disk\n"));
6561 PVDMETAXFER pMetaXfer = NULL;
6562 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6563 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6564 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6565 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
6566 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6567 {
6568 pGrainAlloc->cIoXfersPending++;
6569 pGrainAlloc->fGTUpdateNeeded = true;
6570 /* Leave early, we will be called again after the read completed. */
6571 LogFlowFunc(("Metadata read in progress, leaving\n"));
6572 return rc;
6573 }
6574 else if (RT_FAILURE(rc))
6575 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
6576 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6577 pGTCacheEntry->uExtent = pExtent->uExtent;
6578 pGTCacheEntry->uGTBlock = uGTBlock;
6579 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6580 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6581 }
6582 else
6583 {
6584 /* Cache hit. Convert grain table block back to disk format, otherwise
6585 * the code below will write garbage for all but the updated entry. */
6586 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6587 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
6588 }
6589 pGrainAlloc->fGTUpdateNeeded = false;
6590 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6591 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
6592 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
6593 /* Update grain table on disk. */
6594 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6595 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6596 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6597 vmdkAllocGrainComplete, pGrainAlloc);
6598 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6599 pGrainAlloc->cIoXfersPending++;
6600 else if (RT_FAILURE(rc))
6601 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
6602 if (pExtent->pRGD)
6603 {
6604 /* Update backup grain table on disk. */
6605 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6606 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6607 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6608 vmdkAllocGrainComplete, pGrainAlloc);
6609 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6610 pGrainAlloc->cIoXfersPending++;
6611 else if (RT_FAILURE(rc))
6612 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
6613 }
6614
6615 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6616 return rc;
6617}
6618
6619/**
6620 * Internal - complete the grain allocation by updating disk grain table if required.
6621 */
6622static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
6623{
6624 RT_NOREF1(rcReq);
6625 int rc = VINF_SUCCESS;
6626 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6627 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
6628
6629 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
6630 pBackendData, pIoCtx, pvUser, rcReq));
6631
6632 pGrainAlloc->cIoXfersPending--;
6633 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6634 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6635
6636 if (!pGrainAlloc->cIoXfersPending)
6637 {
6638 /* Grain allocation completed. */
6639 RTMemFree(pGrainAlloc);
6640 }
6641
6642 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6643 return rc;
6644}
6645
6646/**
6647 * Internal. Allocates a new grain table (if necessary).
6648 */
6649static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6650 uint64_t uSector, uint64_t cbWrite)
6651{
6652 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6653 uint64_t uGDIndex, uGTSector, uRGTSector;
6654 uint64_t uFileOffset;
6655 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6656 int rc;
6657
6658 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6659 pCache, pExtent, pIoCtx, uSector, cbWrite));
6660
6661 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6662 if (!pGrainAlloc)
6663 return VERR_NO_MEMORY;
6664
6665 pGrainAlloc->pExtent = pExtent;
6666 pGrainAlloc->uSector = uSector;
6667
6668 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6669 if (uGDIndex >= pExtent->cGDEntries)
6670 {
6671 RTMemFree(pGrainAlloc);
6672 return VERR_OUT_OF_RANGE;
6673 }
6674 uGTSector = pExtent->pGD[uGDIndex];
6675 if (pExtent->pRGD)
6676 uRGTSector = pExtent->pRGD[uGDIndex];
6677 else
6678 uRGTSector = 0; /**< avoid compiler warning */
6679 if (!uGTSector)
6680 {
6681 LogFlow(("Allocating new grain table\n"));
6682
6683 /* There is no grain table referenced by this grain directory
6684 * entry. So there is absolutely no data in this area. Allocate
6685 * a new grain table and put the reference to it in the GDs. */
6686 uFileOffset = pExtent->uAppendPosition;
6687 if (!uFileOffset)
6688 {
6689 RTMemFree(pGrainAlloc);
6690 return VERR_INTERNAL_ERROR;
6691 }
6692 Assert(!(uFileOffset % 512));
6693
6694 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6695 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6696
6697 /* Normally the grain table is preallocated for hosted sparse extents
6698 * that support more than 32 bit sector numbers. So this shouldn't
6699 * ever happen on a valid extent. */
6700 if (uGTSector > UINT32_MAX)
6701 {
6702 RTMemFree(pGrainAlloc);
6703 return VERR_VD_VMDK_INVALID_HEADER;
6704 }
6705
6706 /* Write grain table by writing the required number of grain table
6707 * cache chunks. Allocate memory dynamically here or we flood the
6708 * metadata cache with very small entries. */
6709 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6710 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6711
6712 if (!paGTDataTmp)
6713 {
6714 RTMemFree(pGrainAlloc);
6715 return VERR_NO_MEMORY;
6716 }
6717
6718 memset(paGTDataTmp, '\0', cbGTDataTmp);
6719 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6720 VMDK_SECTOR2BYTE(uGTSector),
6721 paGTDataTmp, cbGTDataTmp, pIoCtx,
6722 vmdkAllocGrainComplete, pGrainAlloc);
6723 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6724 pGrainAlloc->cIoXfersPending++;
6725 else if (RT_FAILURE(rc))
6726 {
6727 RTMemTmpFree(paGTDataTmp);
6728 RTMemFree(pGrainAlloc);
6729 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6730 }
6731 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6732 + cbGTDataTmp, 512);
6733
6734 if (pExtent->pRGD)
6735 {
6736 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6737 uFileOffset = pExtent->uAppendPosition;
6738 if (!uFileOffset)
6739 return VERR_INTERNAL_ERROR;
6740 Assert(!(uFileOffset % 512));
6741 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6742
6743 /* Normally the redundant grain table is preallocated for hosted
6744 * sparse extents that support more than 32 bit sector numbers. So
6745 * this shouldn't ever happen on a valid extent. */
6746 if (uRGTSector > UINT32_MAX)
6747 {
6748 RTMemTmpFree(paGTDataTmp);
6749 return VERR_VD_VMDK_INVALID_HEADER;
6750 }
6751
6752 /* Write grain table by writing the required number of grain table
6753 * cache chunks. Allocate memory dynamically here or we flood the
6754 * metadata cache with very small entries. */
6755 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6756 VMDK_SECTOR2BYTE(uRGTSector),
6757 paGTDataTmp, cbGTDataTmp, pIoCtx,
6758 vmdkAllocGrainComplete, pGrainAlloc);
6759 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6760 pGrainAlloc->cIoXfersPending++;
6761 else if (RT_FAILURE(rc))
6762 {
6763 RTMemTmpFree(paGTDataTmp);
6764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6765 }
6766
6767 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6768 }
6769
6770 RTMemTmpFree(paGTDataTmp);
6771
6772 /* Update the grain directory on disk (doing it before writing the
6773 * grain table will result in a garbled extent if the operation is
6774 * aborted for some reason. Otherwise the worst that can happen is
6775 * some unused sectors in the extent. */
6776 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6777 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6778 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6779 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6780 vmdkAllocGrainComplete, pGrainAlloc);
6781 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6782 pGrainAlloc->cIoXfersPending++;
6783 else if (RT_FAILURE(rc))
6784 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6785 if (pExtent->pRGD)
6786 {
6787 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6788 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6789 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6790 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6791 vmdkAllocGrainComplete, pGrainAlloc);
6792 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6793 pGrainAlloc->cIoXfersPending++;
6794 else if (RT_FAILURE(rc))
6795 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6796 }
6797
6798 /* As the final step update the in-memory copy of the GDs. */
6799 pExtent->pGD[uGDIndex] = uGTSector;
6800 if (pExtent->pRGD)
6801 pExtent->pRGD[uGDIndex] = uRGTSector;
6802 }
6803
6804 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6805 pGrainAlloc->uGTSector = uGTSector;
6806 pGrainAlloc->uRGTSector = uRGTSector;
6807
6808 uFileOffset = pExtent->uAppendPosition;
6809 if (!uFileOffset)
6810 return VERR_INTERNAL_ERROR;
6811 Assert(!(uFileOffset % 512));
6812
6813 pGrainAlloc->uGrainOffset = uFileOffset;
6814
6815 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6816 {
6817 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6818 ("Accesses to stream optimized images must be synchronous\n"),
6819 VERR_INVALID_STATE);
6820
6821 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6822 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6823
6824 /* Invalidate cache, just in case some code incorrectly allows mixing
6825 * of reads and writes. Normally shouldn't be needed. */
6826 pExtent->uGrainSectorAbs = 0;
6827
6828 /* Write compressed data block and the markers. */
6829 uint32_t cbGrain = 0;
6830 size_t cbSeg = 0;
6831 RTSGSEG Segment;
6832 unsigned cSegments = 1;
6833
6834 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6835 &cSegments, cbWrite);
6836 Assert(cbSeg == cbWrite);
6837
6838 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6839 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6840 if (RT_FAILURE(rc))
6841 {
6842 AssertRC(rc);
6843 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6844 }
6845 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6846 pExtent->uAppendPosition += cbGrain;
6847 }
6848 else
6849 {
6850 /* Write the data. Always a full grain, or we're in big trouble. */
6851 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6852 uFileOffset, pIoCtx, cbWrite,
6853 vmdkAllocGrainComplete, pGrainAlloc);
6854 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6855 pGrainAlloc->cIoXfersPending++;
6856 else if (RT_FAILURE(rc))
6857 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6858
6859 pExtent->uAppendPosition += cbWrite;
6860 }
6861
6862 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6863
6864 if (!pGrainAlloc->cIoXfersPending)
6865 {
6866 /* Grain allocation completed. */
6867 RTMemFree(pGrainAlloc);
6868 }
6869
6870 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6871
6872 return rc;
6873}
6874
6875/**
6876 * Internal. Reads the contents by sequentially going over the compressed
6877 * grains (hoping that they are in sequence).
6878 */
6879static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6880 uint64_t uSector, PVDIOCTX pIoCtx,
6881 uint64_t cbRead)
6882{
6883 int rc;
6884
6885 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6886 pImage, pExtent, uSector, pIoCtx, cbRead));
6887
6888 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6889 ("Async I/O not supported for sequential stream optimized images\n"),
6890 VERR_INVALID_STATE);
6891
6892 /* Do not allow to go back. */
6893 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6894 if (uGrain < pExtent->uLastGrainAccess)
6895 return VERR_VD_VMDK_INVALID_STATE;
6896 pExtent->uLastGrainAccess = uGrain;
6897
6898 /* After a previous error do not attempt to recover, as it would need
6899 * seeking (in the general case backwards which is forbidden). */
6900 if (!pExtent->uGrainSectorAbs)
6901 return VERR_VD_VMDK_INVALID_STATE;
6902
6903 /* Check if we need to read something from the image or if what we have
6904 * in the buffer is good to fulfill the request. */
6905 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6906 {
6907 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6908 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6909
6910 /* Get the marker from the next data block - and skip everything which
6911 * is not a compressed grain. If it's a compressed grain which is for
6912 * the requested sector (or after), read it. */
6913 VMDKMARKER Marker;
6914 do
6915 {
6916 RT_ZERO(Marker);
6917 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6918 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6919 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6920 if (RT_FAILURE(rc))
6921 return rc;
6922 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6923 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6924
6925 if (Marker.cbSize == 0)
6926 {
6927 /* A marker for something else than a compressed grain. */
6928 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6929 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6930 + RT_UOFFSETOF(VMDKMARKER, uType),
6931 &Marker.uType, sizeof(Marker.uType));
6932 if (RT_FAILURE(rc))
6933 return rc;
6934 Marker.uType = RT_LE2H_U32(Marker.uType);
6935 switch (Marker.uType)
6936 {
6937 case VMDK_MARKER_EOS:
6938 uGrainSectorAbs++;
6939 /* Read (or mostly skip) to the end of file. Uses the
6940 * Marker (LBA sector) as it is unused anyway. This
6941 * makes sure that really everything is read in the
6942 * success case. If this read fails it means the image
6943 * is truncated, but this is harmless so ignore. */
6944 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6945 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6946 + 511,
6947 &Marker.uSector, 1);
6948 break;
6949 case VMDK_MARKER_GT:
6950 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6951 break;
6952 case VMDK_MARKER_GD:
6953 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6954 break;
6955 case VMDK_MARKER_FOOTER:
6956 uGrainSectorAbs += 2;
6957 break;
6958 case VMDK_MARKER_UNSPECIFIED:
6959 /* Skip over the contents of the unspecified marker
6960 * type 4 which exists in some vSphere created files. */
6961 /** @todo figure out what the payload means. */
6962 uGrainSectorAbs += 1;
6963 break;
6964 default:
6965 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6966 pExtent->uGrainSectorAbs = 0;
6967 return VERR_VD_VMDK_INVALID_STATE;
6968 }
6969 pExtent->cbGrainStreamRead = 0;
6970 }
6971 else
6972 {
6973 /* A compressed grain marker. If it is at/after what we're
6974 * interested in read and decompress data. */
6975 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6976 {
6977 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6978 continue;
6979 }
6980 uint64_t uLBA = 0;
6981 uint32_t cbGrainStreamRead = 0;
6982 rc = vmdkFileInflateSync(pImage, pExtent,
6983 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6984 pExtent->pvGrain,
6985 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6986 &Marker, &uLBA, &cbGrainStreamRead);
6987 if (RT_FAILURE(rc))
6988 {
6989 pExtent->uGrainSectorAbs = 0;
6990 return rc;
6991 }
6992 if ( pExtent->uGrain
6993 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6994 {
6995 pExtent->uGrainSectorAbs = 0;
6996 return VERR_VD_VMDK_INVALID_STATE;
6997 }
6998 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6999 pExtent->cbGrainStreamRead = cbGrainStreamRead;
7000 break;
7001 }
7002 } while (Marker.uType != VMDK_MARKER_EOS);
7003
7004 pExtent->uGrainSectorAbs = uGrainSectorAbs;
7005
7006 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
7007 {
7008 pExtent->uGrain = UINT32_MAX;
7009 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
7010 * the next read would try to get more data, and we're at EOF. */
7011 pExtent->cbGrainStreamRead = 1;
7012 }
7013 }
7014
7015 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
7016 {
7017 /* The next data block we have is not for this area, so just return
7018 * that there is no data. */
7019 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
7020 return VERR_VD_BLOCK_FREE;
7021 }
7022
7023 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
7024 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7025 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
7026 cbRead);
7027 LogFlowFunc(("returns VINF_SUCCESS\n"));
7028 return VINF_SUCCESS;
7029}
7030
7031/**
7032 * Replaces a fragment of a string with the specified string.
7033 *
7034 * @returns Pointer to the allocated UTF-8 string.
7035 * @param pszWhere UTF-8 string to search in.
7036 * @param pszWhat UTF-8 string to search for.
7037 * @param pszByWhat UTF-8 string to replace the found string with.
7038 *
7039 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
7040 * for updating the base name in the descriptor, the second is for
7041 * generating new filenames for extents. This code borked when
7042 * RTPathAbs started correcting the driver letter case on windows,
7043 * when strstr failed because the pExtent->pszFullname was not
7044 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
7045 * this by apply RTPathAbs to the places it wasn't applied.
7046 *
7047 * However, this highlights some undocumented ASSUMPTIONS as well as
7048 * terrible short commings of the approach.
7049 *
7050 * Given the right filename, it may also screw up the descriptor. Take
7051 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
7052 * we'll be asked to replace "Test0" with something, no problem. No,
7053 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
7054 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
7055 * its bum. The descriptor string must be parsed and reconstructed,
7056 * the lazy strstr approach doesn't cut it.
7057 *
7058 * I'm also curious as to what would be the correct escaping of '"' in
7059 * the file name and how that is supposed to be handled, because it
7060 * needs to be or such names must be rejected in several places (maybe
7061 * they are, I didn't check).
7062 *
7063 * When this function is used to replace the start of a path, I think
7064 * the assumption from the prep/setup code is that we kind of knows
7065 * what we're working on (I could be wrong). However, using strstr
7066 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
7067 * Especially on unix systems, weird stuff could happen if someone
7068 * unwittingly tinkers with the prep/setup code. What should really be
7069 * done here is using a new RTPathStartEx function that (via flags)
7070 * allows matching partial final component and returns the length of
7071 * what it matched up (in case it skipped slashes and '.' components).
7072 *
7073 */
7074static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
7075 const char *pszByWhat)
7076{
7077 AssertPtr(pszWhere);
7078 AssertPtr(pszWhat);
7079 AssertPtr(pszByWhat);
7080 const char *pszFoundStr = strstr(pszWhere, pszWhat);
7081 if (!pszFoundStr)
7082 {
7083 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
7084 return NULL;
7085 }
7086 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
7087 char *pszNewStr = RTStrAlloc(cbFinal);
7088 if (pszNewStr)
7089 {
7090 char *pszTmp = pszNewStr;
7091 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
7092 pszTmp += pszFoundStr - pszWhere;
7093 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
7094 pszTmp += strlen(pszByWhat);
7095 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
7096 }
7097 return pszNewStr;
7098}
7099
7100
7101/** @copydoc VDIMAGEBACKEND::pfnProbe */
7102static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
7103 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
7104{
7105 RT_NOREF(enmDesiredType);
7106 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
7107 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
7108 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7109 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7110
7111 int rc = VINF_SUCCESS;
7112 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7113 if (RT_LIKELY(pImage))
7114 {
7115 pImage->pszFilename = pszFilename;
7116 pImage->pFile = NULL;
7117 pImage->pExtents = NULL;
7118 pImage->pFiles = NULL;
7119 pImage->pGTCache = NULL;
7120 pImage->pDescData = NULL;
7121 pImage->pVDIfsDisk = pVDIfsDisk;
7122 pImage->pVDIfsImage = pVDIfsImage;
7123 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
7124 * much as possible in vmdkOpenImage. */
7125 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
7126 vmdkFreeImage(pImage, false, false /*fFlush*/);
7127 RTMemFree(pImage);
7128
7129 if (RT_SUCCESS(rc))
7130 *penmType = VDTYPE_HDD;
7131 }
7132 else
7133 rc = VERR_NO_MEMORY;
7134
7135 LogFlowFunc(("returns %Rrc\n", rc));
7136 return rc;
7137}
7138
7139/** @copydoc VDIMAGEBACKEND::pfnOpen */
7140static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
7141 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7142 VDTYPE enmType, void **ppBackendData)
7143{
7144 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
7145
7146 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
7147 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
7148 int rc;
7149
7150 /* Check open flags. All valid flags are supported. */
7151 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7152 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7153 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7154
7155
7156 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7157 if (RT_LIKELY(pImage))
7158 {
7159 pImage->pszFilename = pszFilename;
7160 pImage->pFile = NULL;
7161 pImage->pExtents = NULL;
7162 pImage->pFiles = NULL;
7163 pImage->pGTCache = NULL;
7164 pImage->pDescData = NULL;
7165 pImage->pVDIfsDisk = pVDIfsDisk;
7166 pImage->pVDIfsImage = pVDIfsImage;
7167
7168 rc = vmdkOpenImage(pImage, uOpenFlags);
7169 if (RT_SUCCESS(rc))
7170 *ppBackendData = pImage;
7171 else
7172 RTMemFree(pImage);
7173 }
7174 else
7175 rc = VERR_NO_MEMORY;
7176
7177 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7178 return rc;
7179}
7180
7181/** @copydoc VDIMAGEBACKEND::pfnCreate */
7182static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
7183 unsigned uImageFlags, const char *pszComment,
7184 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7185 PCRTUUID pUuid, unsigned uOpenFlags,
7186 unsigned uPercentStart, unsigned uPercentSpan,
7187 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7188 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
7189 void **ppBackendData)
7190{
7191 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
7192 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
7193 int rc;
7194
7195 /* Check the VD container type and image flags. */
7196 if ( enmType != VDTYPE_HDD
7197 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
7198 return VERR_VD_INVALID_TYPE;
7199
7200 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
7201 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7202 && ( !cbSize
7203 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
7204 return VERR_VD_INVALID_SIZE;
7205
7206 /* Check image flags for invalid combinations. */
7207 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7208 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
7209 return VERR_INVALID_PARAMETER;
7210
7211 /* Check open flags. All valid flags are supported. */
7212 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7213 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7214 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7215 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
7216 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
7217 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
7218 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
7219 VERR_INVALID_PARAMETER);
7220
7221 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7222 if (RT_LIKELY(pImage))
7223 {
7224 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7225
7226 pImage->pszFilename = pszFilename;
7227 pImage->pFile = NULL;
7228 pImage->pExtents = NULL;
7229 pImage->pFiles = NULL;
7230 pImage->pGTCache = NULL;
7231 pImage->pDescData = NULL;
7232 pImage->pVDIfsDisk = pVDIfsDisk;
7233 pImage->pVDIfsImage = pVDIfsImage;
7234 /* Descriptors for split images can be pretty large, especially if the
7235 * filename is long. So prepare for the worst, and allocate quite some
7236 * memory for the descriptor in this case. */
7237 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7238 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
7239 else
7240 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
7241 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
7242 if (RT_LIKELY(pImage->pDescData))
7243 {
7244 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
7245 pPCHSGeometry, pLCHSGeometry, pUuid,
7246 pIfProgress, uPercentStart, uPercentSpan);
7247 if (RT_SUCCESS(rc))
7248 {
7249 /* So far the image is opened in read/write mode. Make sure the
7250 * image is opened in read-only mode if the caller requested that. */
7251 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7252 {
7253 vmdkFreeImage(pImage, false, true /*fFlush*/);
7254 rc = vmdkOpenImage(pImage, uOpenFlags);
7255 }
7256
7257 if (RT_SUCCESS(rc))
7258 *ppBackendData = pImage;
7259 }
7260
7261 if (RT_FAILURE(rc))
7262 RTMemFree(pImage->pDescData);
7263 }
7264 else
7265 rc = VERR_NO_MEMORY;
7266
7267 if (RT_FAILURE(rc))
7268 RTMemFree(pImage);
7269 }
7270 else
7271 rc = VERR_NO_MEMORY;
7272
7273 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7274 return rc;
7275}
7276
7277/**
7278 * Prepares the state for renaming a VMDK image, setting up the state and allocating
7279 * memory.
7280 *
7281 * @returns VBox status code.
7282 * @param pImage VMDK image instance.
7283 * @param pRenameState The state to initialize.
7284 * @param pszFilename The new filename.
7285 */
7286static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7287{
7288 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
7289
7290 int rc = VINF_SUCCESS;
7291
7292 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
7293
7294 /*
7295 * Allocate an array to store both old and new names of renamed files
7296 * in case we have to roll back the changes. Arrays are initialized
7297 * with zeros. We actually save stuff when and if we change it.
7298 */
7299 pRenameState->cExtents = pImage->cExtents;
7300 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7301 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7302 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
7303 if ( pRenameState->apszOldName
7304 && pRenameState->apszNewName
7305 && pRenameState->apszNewLines)
7306 {
7307 /* Save the descriptor size and position. */
7308 if (pImage->pDescData)
7309 {
7310 /* Separate descriptor file. */
7311 pRenameState->fEmbeddedDesc = false;
7312 }
7313 else
7314 {
7315 /* Embedded descriptor file. */
7316 pRenameState->ExtentCopy = pImage->pExtents[0];
7317 pRenameState->fEmbeddedDesc = true;
7318 }
7319
7320 /* Save the descriptor content. */
7321 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
7322 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7323 {
7324 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
7325 if (!pRenameState->DescriptorCopy.aLines[i])
7326 {
7327 rc = VERR_NO_MEMORY;
7328 break;
7329 }
7330 }
7331
7332 if (RT_SUCCESS(rc))
7333 {
7334 /* Prepare both old and new base names used for string replacement. */
7335 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
7336 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
7337 RTPathStripSuffix(pRenameState->pszNewBaseName);
7338
7339 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
7340 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
7341 RTPathStripSuffix(pRenameState->pszOldBaseName);
7342
7343 /* Prepare both old and new full names used for string replacement.
7344 Note! Must abspath the stuff here, so the strstr weirdness later in
7345 the renaming process get a match against abspath'ed extent paths.
7346 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
7347 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
7348 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
7349 RTPathStripSuffix(pRenameState->pszNewFullName);
7350
7351 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
7352 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
7353 RTPathStripSuffix(pRenameState->pszOldFullName);
7354
7355 /* Save the old name for easy access to the old descriptor file. */
7356 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
7357 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
7358
7359 /* Save old image name. */
7360 pRenameState->pszOldImageName = pImage->pszFilename;
7361 }
7362 }
7363 else
7364 rc = VERR_NO_TMP_MEMORY;
7365
7366 return rc;
7367}
7368
7369/**
7370 * Destroys the given rename state, freeing all allocated memory.
7371 *
7372 * @param pRenameState The rename state to destroy.
7373 */
7374static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
7375{
7376 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7377 if (pRenameState->DescriptorCopy.aLines[i])
7378 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
7379 if (pRenameState->apszOldName)
7380 {
7381 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7382 if (pRenameState->apszOldName[i])
7383 RTStrFree(pRenameState->apszOldName[i]);
7384 RTMemTmpFree(pRenameState->apszOldName);
7385 }
7386 if (pRenameState->apszNewName)
7387 {
7388 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7389 if (pRenameState->apszNewName[i])
7390 RTStrFree(pRenameState->apszNewName[i]);
7391 RTMemTmpFree(pRenameState->apszNewName);
7392 }
7393 if (pRenameState->apszNewLines)
7394 {
7395 for (unsigned i = 0; i < pRenameState->cExtents; i++)
7396 if (pRenameState->apszNewLines[i])
7397 RTStrFree(pRenameState->apszNewLines[i]);
7398 RTMemTmpFree(pRenameState->apszNewLines);
7399 }
7400 if (pRenameState->pszOldDescName)
7401 RTStrFree(pRenameState->pszOldDescName);
7402 if (pRenameState->pszOldBaseName)
7403 RTStrFree(pRenameState->pszOldBaseName);
7404 if (pRenameState->pszNewBaseName)
7405 RTStrFree(pRenameState->pszNewBaseName);
7406 if (pRenameState->pszOldFullName)
7407 RTStrFree(pRenameState->pszOldFullName);
7408 if (pRenameState->pszNewFullName)
7409 RTStrFree(pRenameState->pszNewFullName);
7410}
7411
7412/**
7413 * Rolls back the rename operation to the original state.
7414 *
7415 * @returns VBox status code.
7416 * @param pImage VMDK image instance.
7417 * @param pRenameState The rename state.
7418 */
7419static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
7420{
7421 int rc = VINF_SUCCESS;
7422
7423 if (!pRenameState->fImageFreed)
7424 {
7425 /*
7426 * Some extents may have been closed, close the rest. We will
7427 * re-open the whole thing later.
7428 */
7429 vmdkFreeImage(pImage, false, true /*fFlush*/);
7430 }
7431
7432 /* Rename files back. */
7433 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7434 {
7435 if (pRenameState->apszOldName[i])
7436 {
7437 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
7438 AssertRC(rc);
7439 }
7440 }
7441 /* Restore the old descriptor. */
7442 PVMDKFILE pFile;
7443 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
7444 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
7445 false /* fCreate */));
7446 AssertRC(rc);
7447 if (pRenameState->fEmbeddedDesc)
7448 {
7449 pRenameState->ExtentCopy.pFile = pFile;
7450 pImage->pExtents = &pRenameState->ExtentCopy;
7451 }
7452 else
7453 {
7454 /* Shouldn't be null for separate descriptor.
7455 * There will be no access to the actual content.
7456 */
7457 pImage->pDescData = pRenameState->pszOldDescName;
7458 pImage->pFile = pFile;
7459 }
7460 pImage->Descriptor = pRenameState->DescriptorCopy;
7461 vmdkWriteDescriptor(pImage, NULL);
7462 vmdkFileClose(pImage, &pFile, false);
7463 /* Get rid of the stuff we implanted. */
7464 pImage->pExtents = NULL;
7465 pImage->pFile = NULL;
7466 pImage->pDescData = NULL;
7467 /* Re-open the image back. */
7468 pImage->pszFilename = pRenameState->pszOldImageName;
7469 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7470
7471 return rc;
7472}
7473
7474/**
7475 * Rename worker doing the real work.
7476 *
7477 * @returns VBox status code.
7478 * @param pImage VMDK image instance.
7479 * @param pRenameState The rename state.
7480 * @param pszFilename The new filename.
7481 */
7482static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7483{
7484 int rc = VINF_SUCCESS;
7485 unsigned i, line;
7486
7487 /* Update the descriptor with modified extent names. */
7488 for (i = 0, line = pImage->Descriptor.uFirstExtent;
7489 i < pRenameState->cExtents;
7490 i++, line = pImage->Descriptor.aNextLines[line])
7491 {
7492 /* Update the descriptor. */
7493 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
7494 pRenameState->pszOldBaseName,
7495 pRenameState->pszNewBaseName);
7496 if (!pRenameState->apszNewLines[i])
7497 {
7498 rc = VERR_NO_MEMORY;
7499 break;
7500 }
7501 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
7502 }
7503
7504 if (RT_SUCCESS(rc))
7505 {
7506 /* Make sure the descriptor gets written back. */
7507 pImage->Descriptor.fDirty = true;
7508 /* Flush the descriptor now, in case it is embedded. */
7509 vmdkFlushImage(pImage, NULL);
7510
7511 /* Close and rename/move extents. */
7512 for (i = 0; i < pRenameState->cExtents; i++)
7513 {
7514 PVMDKEXTENT pExtent = &pImage->pExtents[i];
7515 /* Compose new name for the extent. */
7516 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
7517 pRenameState->pszOldFullName,
7518 pRenameState->pszNewFullName);
7519 if (!pRenameState->apszNewName[i])
7520 {
7521 rc = VERR_NO_MEMORY;
7522 break;
7523 }
7524 /* Close the extent file. */
7525 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
7526 if (RT_FAILURE(rc))
7527 break;;
7528
7529 /* Rename the extent file. */
7530 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
7531 if (RT_FAILURE(rc))
7532 break;
7533 /* Remember the old name. */
7534 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
7535 }
7536
7537 if (RT_SUCCESS(rc))
7538 {
7539 /* Release all old stuff. */
7540 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
7541 if (RT_SUCCESS(rc))
7542 {
7543 pRenameState->fImageFreed = true;
7544
7545 /* Last elements of new/old name arrays are intended for
7546 * storing descriptor's names.
7547 */
7548 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
7549 /* Rename the descriptor file if it's separate. */
7550 if (!pRenameState->fEmbeddedDesc)
7551 {
7552 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
7553 if (RT_SUCCESS(rc))
7554 {
7555 /* Save old name only if we may need to change it back. */
7556 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
7557 }
7558 }
7559
7560 /* Update pImage with the new information. */
7561 pImage->pszFilename = pszFilename;
7562
7563 /* Open the new image. */
7564 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7565 }
7566 }
7567 }
7568
7569 return rc;
7570}
7571
7572/** @copydoc VDIMAGEBACKEND::pfnRename */
7573static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
7574{
7575 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
7576
7577 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7578 VMDKRENAMESTATE RenameState;
7579
7580 memset(&RenameState, 0, sizeof(RenameState));
7581
7582 /* Check arguments. */
7583 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
7584 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7585 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7586 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
7587
7588 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
7589 if (RT_SUCCESS(rc))
7590 {
7591 /* --- Up to this point we have not done any damage yet. --- */
7592
7593 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
7594 /* Roll back all changes in case of failure. */
7595 if (RT_FAILURE(rc))
7596 {
7597 int rrc = vmdkRenameRollback(pImage, &RenameState);
7598 AssertRC(rrc);
7599 }
7600 }
7601
7602 vmdkRenameStateDestroy(&RenameState);
7603 LogFlowFunc(("returns %Rrc\n", rc));
7604 return rc;
7605}
7606
7607/** @copydoc VDIMAGEBACKEND::pfnClose */
7608static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
7609{
7610 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
7611 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7612
7613 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
7614 RTMemFree(pImage);
7615
7616 LogFlowFunc(("returns %Rrc\n", rc));
7617 return rc;
7618}
7619
7620/** @copydoc VDIMAGEBACKEND::pfnRead */
7621static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
7622 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7623{
7624 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7625 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
7626 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7627
7628 AssertPtr(pImage);
7629 Assert(uOffset % 512 == 0);
7630 Assert(cbToRead % 512 == 0);
7631 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7632 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
7633 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
7634
7635 /* Find the extent and check access permissions as defined in the extent descriptor. */
7636 PVMDKEXTENT pExtent;
7637 uint64_t uSectorExtentRel;
7638 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7639 &pExtent, &uSectorExtentRel);
7640 if ( RT_SUCCESS(rc)
7641 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7642 {
7643 /* Clip read range to remain in this extent. */
7644 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7645
7646 /* Handle the read according to the current extent type. */
7647 switch (pExtent->enmType)
7648 {
7649 case VMDKETYPE_HOSTED_SPARSE:
7650 {
7651 uint64_t uSectorExtentAbs;
7652
7653 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7654 if (RT_FAILURE(rc))
7655 break;
7656 /* Clip read range to at most the rest of the grain. */
7657 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7658 Assert(!(cbToRead % 512));
7659 if (uSectorExtentAbs == 0)
7660 {
7661 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7662 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7663 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7664 rc = VERR_VD_BLOCK_FREE;
7665 else
7666 rc = vmdkStreamReadSequential(pImage, pExtent,
7667 uSectorExtentRel,
7668 pIoCtx, cbToRead);
7669 }
7670 else
7671 {
7672 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7673 {
7674 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7675 ("Async I/O is not supported for stream optimized VMDK's\n"));
7676
7677 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7678 uSectorExtentAbs -= uSectorInGrain;
7679 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7680 {
7681 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7682 rc = vmdkFileInflateSync(pImage, pExtent,
7683 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7684 pExtent->pvGrain,
7685 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7686 NULL, &uLBA, NULL);
7687 if (RT_FAILURE(rc))
7688 {
7689 pExtent->uGrainSectorAbs = 0;
7690 break;
7691 }
7692 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7693 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7694 Assert(uLBA == uSectorExtentRel);
7695 }
7696 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7697 (uint8_t *)pExtent->pvGrain
7698 + VMDK_SECTOR2BYTE(uSectorInGrain),
7699 cbToRead);
7700 }
7701 else
7702 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7703 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7704 pIoCtx, cbToRead);
7705 }
7706 break;
7707 }
7708 case VMDKETYPE_VMFS:
7709 case VMDKETYPE_FLAT:
7710 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7711 VMDK_SECTOR2BYTE(uSectorExtentRel),
7712 pIoCtx, cbToRead);
7713 break;
7714 case VMDKETYPE_ZERO:
7715 {
7716 size_t cbSet;
7717
7718 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7719 Assert(cbSet == cbToRead);
7720 break;
7721 }
7722 }
7723 if (pcbActuallyRead)
7724 *pcbActuallyRead = cbToRead;
7725 }
7726 else if (RT_SUCCESS(rc))
7727 rc = VERR_VD_VMDK_INVALID_STATE;
7728
7729 LogFlowFunc(("returns %Rrc\n", rc));
7730 return rc;
7731}
7732
7733/** @copydoc VDIMAGEBACKEND::pfnWrite */
7734static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7735 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7736 size_t *pcbPostRead, unsigned fWrite)
7737{
7738 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7739 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7740 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7741 int rc;
7742
7743 AssertPtr(pImage);
7744 Assert(uOffset % 512 == 0);
7745 Assert(cbToWrite % 512 == 0);
7746 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7747 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7748
7749 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7750 {
7751 PVMDKEXTENT pExtent;
7752 uint64_t uSectorExtentRel;
7753 uint64_t uSectorExtentAbs;
7754
7755 /* No size check here, will do that later when the extent is located.
7756 * There are sparse images out there which according to the spec are
7757 * invalid, because the total size is not a multiple of the grain size.
7758 * Also for sparse images which are stitched together in odd ways (not at
7759 * grain boundaries, and with the nominal size not being a multiple of the
7760 * grain size), this would prevent writing to the last grain. */
7761
7762 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7763 &pExtent, &uSectorExtentRel);
7764 if (RT_SUCCESS(rc))
7765 {
7766 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7767 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7768 && !pImage->pExtents[0].uAppendPosition
7769 && pExtent->enmAccess != VMDKACCESS_READONLY))
7770 rc = VERR_VD_VMDK_INVALID_STATE;
7771 else
7772 {
7773 /* Handle the write according to the current extent type. */
7774 switch (pExtent->enmType)
7775 {
7776 case VMDKETYPE_HOSTED_SPARSE:
7777 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7778 if (RT_SUCCESS(rc))
7779 {
7780 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7781 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7782 rc = VERR_VD_VMDK_INVALID_WRITE;
7783 else
7784 {
7785 /* Clip write range to at most the rest of the grain. */
7786 cbToWrite = RT_MIN(cbToWrite,
7787 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7788 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7789 if (uSectorExtentAbs == 0)
7790 {
7791 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7792 {
7793 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7794 {
7795 /* Full block write to a previously unallocated block.
7796 * Check if the caller wants to avoid the automatic alloc. */
7797 if (!(fWrite & VD_WRITE_NO_ALLOC))
7798 {
7799 /* Allocate GT and find out where to store the grain. */
7800 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7801 uSectorExtentRel, cbToWrite);
7802 }
7803 else
7804 rc = VERR_VD_BLOCK_FREE;
7805 *pcbPreRead = 0;
7806 *pcbPostRead = 0;
7807 }
7808 else
7809 {
7810 /* Clip write range to remain in this extent. */
7811 cbToWrite = RT_MIN(cbToWrite,
7812 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7813 + pExtent->cNominalSectors - uSectorExtentRel));
7814 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7815 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7816 rc = VERR_VD_BLOCK_FREE;
7817 }
7818 }
7819 else
7820 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7821 pIoCtx, cbToWrite);
7822 }
7823 else
7824 {
7825 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7826 {
7827 /* A partial write to a streamOptimized image is simply
7828 * invalid. It requires rewriting already compressed data
7829 * which is somewhere between expensive and impossible. */
7830 rc = VERR_VD_VMDK_INVALID_STATE;
7831 pExtent->uGrainSectorAbs = 0;
7832 AssertRC(rc);
7833 }
7834 else
7835 {
7836 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7837 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7838 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7839 pIoCtx, cbToWrite, NULL, NULL);
7840 }
7841 }
7842 }
7843 }
7844 break;
7845 case VMDKETYPE_VMFS:
7846 case VMDKETYPE_FLAT:
7847 /* Clip write range to remain in this extent. */
7848 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7849 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7850 VMDK_SECTOR2BYTE(uSectorExtentRel),
7851 pIoCtx, cbToWrite, NULL, NULL);
7852 break;
7853 case VMDKETYPE_ZERO:
7854 /* Clip write range to remain in this extent. */
7855 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7856 break;
7857 }
7858 }
7859
7860 if (pcbWriteProcess)
7861 *pcbWriteProcess = cbToWrite;
7862 }
7863 }
7864 else
7865 rc = VERR_VD_IMAGE_READ_ONLY;
7866
7867 LogFlowFunc(("returns %Rrc\n", rc));
7868 return rc;
7869}
7870
7871/** @copydoc VDIMAGEBACKEND::pfnFlush */
7872static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7873{
7874 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7875
7876 return vmdkFlushImage(pImage, pIoCtx);
7877}
7878
7879/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7880static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7881{
7882 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7883 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7884
7885 AssertPtrReturn(pImage, 0);
7886
7887 return VMDK_IMAGE_VERSION;
7888}
7889
7890/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7891static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7892{
7893 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7894 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7895 uint64_t cb = 0;
7896
7897 AssertPtrReturn(pImage, 0);
7898
7899 if (pImage->pFile != NULL)
7900 {
7901 uint64_t cbFile;
7902 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7903 if (RT_SUCCESS(rc))
7904 cb += cbFile;
7905 }
7906 for (unsigned i = 0; i < pImage->cExtents; i++)
7907 {
7908 if (pImage->pExtents[i].pFile != NULL)
7909 {
7910 uint64_t cbFile;
7911 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7912 if (RT_SUCCESS(rc))
7913 cb += cbFile;
7914 }
7915 }
7916
7917 LogFlowFunc(("returns %lld\n", cb));
7918 return cb;
7919}
7920
7921/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7922static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7923{
7924 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7925 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7926 int rc = VINF_SUCCESS;
7927
7928 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7929
7930 if (pImage->PCHSGeometry.cCylinders)
7931 *pPCHSGeometry = pImage->PCHSGeometry;
7932 else
7933 rc = VERR_VD_GEOMETRY_NOT_SET;
7934
7935 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7936 return rc;
7937}
7938
7939/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7940static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7941{
7942 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7943 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7944 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7945 int rc = VINF_SUCCESS;
7946
7947 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7948
7949 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7950 {
7951 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7952 {
7953 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7954 if (RT_SUCCESS(rc))
7955 pImage->PCHSGeometry = *pPCHSGeometry;
7956 }
7957 else
7958 rc = VERR_NOT_SUPPORTED;
7959 }
7960 else
7961 rc = VERR_VD_IMAGE_READ_ONLY;
7962
7963 LogFlowFunc(("returns %Rrc\n", rc));
7964 return rc;
7965}
7966
7967/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7968static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7969{
7970 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7971 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7972 int rc = VINF_SUCCESS;
7973
7974 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7975
7976 if (pImage->LCHSGeometry.cCylinders)
7977 *pLCHSGeometry = pImage->LCHSGeometry;
7978 else
7979 rc = VERR_VD_GEOMETRY_NOT_SET;
7980
7981 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7982 return rc;
7983}
7984
7985/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7986static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7987{
7988 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7989 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7990 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7991 int rc = VINF_SUCCESS;
7992
7993 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7994
7995 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7996 {
7997 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7998 {
7999 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
8000 if (RT_SUCCESS(rc))
8001 pImage->LCHSGeometry = *pLCHSGeometry;
8002 }
8003 else
8004 rc = VERR_NOT_SUPPORTED;
8005 }
8006 else
8007 rc = VERR_VD_IMAGE_READ_ONLY;
8008
8009 LogFlowFunc(("returns %Rrc\n", rc));
8010 return rc;
8011}
8012
8013/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
8014static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
8015{
8016 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
8017 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8018
8019 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
8020
8021 *ppRegionList = &pThis->RegionList;
8022 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
8023 return VINF_SUCCESS;
8024}
8025
8026/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
8027static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
8028{
8029 RT_NOREF1(pRegionList);
8030 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
8031 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8032 AssertPtr(pThis); RT_NOREF(pThis);
8033
8034 /* Nothing to do here. */
8035}
8036
8037/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
8038static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
8039{
8040 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8041 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8042
8043 AssertPtrReturn(pImage, 0);
8044
8045 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
8046 return pImage->uImageFlags;
8047}
8048
8049/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
8050static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
8051{
8052 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8053 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8054
8055 AssertPtrReturn(pImage, 0);
8056
8057 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
8058 return pImage->uOpenFlags;
8059}
8060
8061/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
8062static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
8063{
8064 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
8065 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8066 int rc;
8067
8068 /* Image must be opened and the new flags must be valid. */
8069 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
8070 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
8071 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
8072 rc = VERR_INVALID_PARAMETER;
8073 else
8074 {
8075 /* StreamOptimized images need special treatment: reopen is prohibited. */
8076 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
8077 {
8078 if (pImage->uOpenFlags == uOpenFlags)
8079 rc = VINF_SUCCESS;
8080 else
8081 rc = VERR_INVALID_PARAMETER;
8082 }
8083 else
8084 {
8085 /* Implement this operation via reopening the image. */
8086 vmdkFreeImage(pImage, false, true /*fFlush*/);
8087 rc = vmdkOpenImage(pImage, uOpenFlags);
8088 }
8089 }
8090
8091 LogFlowFunc(("returns %Rrc\n", rc));
8092 return rc;
8093}
8094
8095/** @copydoc VDIMAGEBACKEND::pfnGetComment */
8096static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
8097{
8098 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
8099 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8100
8101 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8102
8103 char *pszCommentEncoded = NULL;
8104 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
8105 "ddb.comment", &pszCommentEncoded);
8106 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
8107 {
8108 pszCommentEncoded = NULL;
8109 rc = VINF_SUCCESS;
8110 }
8111
8112 if (RT_SUCCESS(rc))
8113 {
8114 if (pszComment && pszCommentEncoded)
8115 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
8116 else if (pszComment)
8117 *pszComment = '\0';
8118
8119 if (pszCommentEncoded)
8120 RTMemTmpFree(pszCommentEncoded);
8121 }
8122
8123 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
8124 return rc;
8125}
8126
8127/** @copydoc VDIMAGEBACKEND::pfnSetComment */
8128static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
8129{
8130 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
8131 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8132 int rc;
8133
8134 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8135
8136 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8137 {
8138 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8139 rc = vmdkSetImageComment(pImage, pszComment);
8140 else
8141 rc = VERR_NOT_SUPPORTED;
8142 }
8143 else
8144 rc = VERR_VD_IMAGE_READ_ONLY;
8145
8146 LogFlowFunc(("returns %Rrc\n", rc));
8147 return rc;
8148}
8149
8150/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
8151static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
8152{
8153 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8154 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8155
8156 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8157
8158 *pUuid = pImage->ImageUuid;
8159
8160 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8161 return VINF_SUCCESS;
8162}
8163
8164/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
8165static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
8166{
8167 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8168 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8169 int rc = VINF_SUCCESS;
8170
8171 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8172
8173 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8174 {
8175 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8176 {
8177 pImage->ImageUuid = *pUuid;
8178 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8179 VMDK_DDB_IMAGE_UUID, pUuid);
8180 if (RT_FAILURE(rc))
8181 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8182 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
8183 }
8184 else
8185 rc = VERR_NOT_SUPPORTED;
8186 }
8187 else
8188 rc = VERR_VD_IMAGE_READ_ONLY;
8189
8190 LogFlowFunc(("returns %Rrc\n", rc));
8191 return rc;
8192}
8193
8194/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
8195static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
8196{
8197 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8198 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8199
8200 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8201
8202 *pUuid = pImage->ModificationUuid;
8203
8204 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8205 return VINF_SUCCESS;
8206}
8207
8208/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
8209static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
8210{
8211 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8212 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8213 int rc = VINF_SUCCESS;
8214
8215 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8216
8217 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8218 {
8219 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8220 {
8221 /* Only touch the modification uuid if it changed. */
8222 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
8223 {
8224 pImage->ModificationUuid = *pUuid;
8225 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8226 VMDK_DDB_MODIFICATION_UUID, pUuid);
8227 if (RT_FAILURE(rc))
8228 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
8229 }
8230 }
8231 else
8232 rc = VERR_NOT_SUPPORTED;
8233 }
8234 else
8235 rc = VERR_VD_IMAGE_READ_ONLY;
8236
8237 LogFlowFunc(("returns %Rrc\n", rc));
8238 return rc;
8239}
8240
8241/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
8242static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
8243{
8244 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8245 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8246
8247 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8248
8249 *pUuid = pImage->ParentUuid;
8250
8251 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8252 return VINF_SUCCESS;
8253}
8254
8255/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
8256static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
8257{
8258 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8259 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8260 int rc = VINF_SUCCESS;
8261
8262 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8263
8264 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8265 {
8266 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8267 {
8268 pImage->ParentUuid = *pUuid;
8269 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8270 VMDK_DDB_PARENT_UUID, pUuid);
8271 if (RT_FAILURE(rc))
8272 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8273 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8274 }
8275 else
8276 rc = VERR_NOT_SUPPORTED;
8277 }
8278 else
8279 rc = VERR_VD_IMAGE_READ_ONLY;
8280
8281 LogFlowFunc(("returns %Rrc\n", rc));
8282 return rc;
8283}
8284
8285/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
8286static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
8287{
8288 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8289 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8290
8291 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8292
8293 *pUuid = pImage->ParentModificationUuid;
8294
8295 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8296 return VINF_SUCCESS;
8297}
8298
8299/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
8300static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
8301{
8302 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8303 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8304 int rc = VINF_SUCCESS;
8305
8306 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8307
8308 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8309 {
8310 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8311 {
8312 pImage->ParentModificationUuid = *pUuid;
8313 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8314 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
8315 if (RT_FAILURE(rc))
8316 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8317 }
8318 else
8319 rc = VERR_NOT_SUPPORTED;
8320 }
8321 else
8322 rc = VERR_VD_IMAGE_READ_ONLY;
8323
8324 LogFlowFunc(("returns %Rrc\n", rc));
8325 return rc;
8326}
8327
8328/** @copydoc VDIMAGEBACKEND::pfnDump */
8329static DECLCALLBACK(void) vmdkDump(void *pBackendData)
8330{
8331 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8332
8333 AssertPtrReturnVoid(pImage);
8334 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
8335 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
8336 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
8337 VMDK_BYTE2SECTOR(pImage->cbSize));
8338 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
8339 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
8340 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
8341 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
8342}
8343
8344
8345/**
8346 * Returns the size, in bytes, of the sparse extent overhead for
8347 * the number of desired total sectors and based on the current
8348 * sectors of the extent.
8349 *
8350 * @returns uint64_t size of new overhead in bytes.
8351 * @param pExtent VMDK extent instance.
8352 * @param cSectorsNew Number of desired total sectors.
8353 */
8354static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew)
8355{
8356 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8357 if (cSectorsNew % pExtent->cSectorsPerGDE)
8358 cNewDirEntries++;
8359
8360 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8361 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8362 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8363 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector
8364 + pExtent->cDescriptorSectors, 1)
8365 + cbNewDirSize + cbNewAllTablesSize, 512);
8366 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize;
8367 cbNewOverhead = RT_ALIGN_64(cbNewOverhead,
8368 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8369
8370 return cbNewOverhead;
8371}
8372
8373/**
8374 * Internal: Replaces the size (in sectors) of an extent in the descriptor file.
8375 *
8376 * @returns VBox status code.
8377 * @param pImage VMDK image instance.
8378 * @param pExtent VMDK extent instance.
8379 * @param uLine Line number of descriptor to change.
8380 * @param cSectorsOld Existing number of sectors.
8381 * @param cSectorsNew New number of sectors.
8382 */
8383static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld,
8384 uint64_t cSectorsNew)
8385{
8386 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE];
8387 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE];
8388
8389 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld);
8390 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors))
8391 return VERR_BUFFER_OVERFLOW;
8392
8393 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew);
8394 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors))
8395 return VERR_BUFFER_OVERFLOW;
8396
8397 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine],
8398 szOldExtentSectors,
8399 szNewExtentSectors);
8400
8401 if (RT_UNLIKELY(!pszNewExtentLine))
8402 return VERR_INVALID_PARAMETER;
8403
8404 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine);
8405 vmdkDescExtInsert(pImage, &pImage->Descriptor,
8406 pExtent->enmAccess, cSectorsNew,
8407 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset);
8408
8409 RTStrFree(pszNewExtentLine);
8410 pszNewExtentLine = NULL;
8411
8412 pImage->Descriptor.fDirty = true;
8413
8414 return VINF_SUCCESS;
8415}
8416
8417/**
8418 * Moves sectors down to make room for new overhead.
8419 * Used for sparse extent resize.
8420 *
8421 * @returns VBox status code.
8422 * @param pImage VMDK image instance.
8423 * @param pExtent VMDK extent instance.
8424 * @param cSectorsNew Number of sectors after resize.
8425 */
8426static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8427 uint64_t cSectorsNew)
8428{
8429 int rc = VINF_SUCCESS;
8430
8431 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8432
8433 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8434 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8435
8436 uint64_t cbFile = 0;
8437 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
8438
8439 uint64_t uNewAppendPosition;
8440
8441 /* Calculate how many sectors need to be relocated. */
8442 unsigned cSectorsReloc = cOverheadSectorDiff;
8443 if (cbNewOverhead % VMDK_SECTOR_SIZE)
8444 cSectorsReloc++;
8445
8446 if (cSectorsReloc < pExtent->cSectors)
8447 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512);
8448 else
8449 uNewAppendPosition = cbFile;
8450
8451 /*
8452 * Get the blocks we need to relocate first, they are appended to the end
8453 * of the image.
8454 */
8455 void *pvBuf = NULL, *pvZero = NULL;
8456 do
8457 {
8458 /* Allocate data buffer. */
8459 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8460 if (!pvBuf)
8461 {
8462 rc = VERR_NO_MEMORY;
8463 break;
8464 }
8465
8466 /* Allocate buffer for overwriting with zeroes. */
8467 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8468 if (!pvZero)
8469 {
8470 RTMemFree(pvBuf);
8471 pvBuf = NULL;
8472
8473 rc = VERR_NO_MEMORY;
8474 break;
8475 }
8476
8477 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8478 if(!aGTDataTmp)
8479 {
8480 RTMemFree(pvBuf);
8481 pvBuf = NULL;
8482
8483 RTMemFree(pvZero);
8484 pvZero = NULL;
8485
8486 rc = VERR_NO_MEMORY;
8487 break;
8488 }
8489
8490 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8491 if(!aRGTDataTmp)
8492 {
8493 RTMemFree(pvBuf);
8494 pvBuf = NULL;
8495
8496 RTMemFree(pvZero);
8497 pvZero = NULL;
8498
8499 RTMemFree(aGTDataTmp);
8500 aGTDataTmp = NULL;
8501
8502 rc = VERR_NO_MEMORY;
8503 break;
8504 }
8505
8506 /* Search for overlap sector in the grain table. */
8507 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++)
8508 {
8509 uint64_t uGTSector = pExtent->pGD[idxGD];
8510 uint64_t uRGTSector = pExtent->pRGD[idxGD];
8511
8512 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8513 VMDK_SECTOR2BYTE(uGTSector),
8514 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8515
8516 if (RT_FAILURE(rc))
8517 break;
8518
8519 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8520 VMDK_SECTOR2BYTE(uRGTSector),
8521 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8522
8523 if (RT_FAILURE(rc))
8524 break;
8525
8526 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++)
8527 {
8528 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]);
8529 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]);
8530
8531 /**
8532 * Check if grain table is valid. If not dump out with an error.
8533 * Shoudln't ever get here (given other checks) but good sanity check.
8534 */
8535 if (aGTEntryLE != aRGTEntryLE)
8536 {
8537 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8538 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname);
8539 break;
8540 }
8541
8542 if (aGTEntryLE < cNewOverheadSectors
8543 && aGTEntryLE != 0)
8544 {
8545 /* Read data and append grain to the end of the image. */
8546 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8547 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf,
8548 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8549 if (RT_FAILURE(rc))
8550 break;
8551
8552 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8553 uNewAppendPosition, pvBuf,
8554 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8555 if (RT_FAILURE(rc))
8556 break;
8557
8558 /* Zero out the old block area. */
8559 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8560 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero,
8561 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8562 if (RT_FAILURE(rc))
8563 break;
8564
8565 /* Write updated grain tables to file */
8566 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8567 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8568
8569 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries))
8570 {
8571 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8572 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
8573 break;
8574 }
8575
8576 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8577 VMDK_SECTOR2BYTE(uGTSector),
8578 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8579
8580 if (RT_FAILURE(rc))
8581 break;
8582
8583 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8584 VMDK_SECTOR2BYTE(uRGTSector),
8585 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8586
8587 break;
8588 }
8589 }
8590 }
8591
8592 RTMemFree(aGTDataTmp);
8593 aGTDataTmp = NULL;
8594
8595 RTMemFree(aRGTDataTmp);
8596 aRGTDataTmp = NULL;
8597
8598 if (RT_FAILURE(rc))
8599 break;
8600
8601 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain);
8602 } while (0);
8603
8604 if (pvBuf)
8605 {
8606 RTMemFree(pvBuf);
8607 pvBuf = NULL;
8608 }
8609
8610 if (pvZero)
8611 {
8612 RTMemFree(pvZero);
8613 pvZero = NULL;
8614 }
8615
8616 // Update append position for extent
8617 pExtent->uAppendPosition = uNewAppendPosition;
8618
8619 return rc;
8620}
8621
8622/**
8623 * Resizes meta/overhead for sparse extent resize.
8624 *
8625 * @returns VBox status code.
8626 * @param pImage VMDK image instance.
8627 * @param pExtent VMDK extent instance.
8628 * @param cSectorsNew Number of sectors after resize.
8629 */
8630static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8631 uint64_t cSectorsNew)
8632{
8633 int rc = VINF_SUCCESS;
8634 uint32_t cOldGDEntries = pExtent->cGDEntries;
8635
8636 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8637 if (cSectorsNew % pExtent->cSectorsPerGDE)
8638 cNewDirEntries++;
8639
8640 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8641
8642 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8643 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512);
8644 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize);
8645
8646 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8647 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512);
8648 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize);
8649
8650 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8651 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8652 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8653
8654 /*
8655 * Get the blocks we need to relocate first, they are appended to the end
8656 * of the image.
8657 */
8658 void *pvBuf = NULL, *pvZero = NULL;
8659
8660 do
8661 {
8662 /* Allocate data buffer. */
8663 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8664 if (!pvBuf)
8665 {
8666 rc = VERR_NO_MEMORY;
8667 break;
8668 }
8669
8670 /* Allocate buffer for overwriting with zeroes. */
8671 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8672 if (!pvZero)
8673 {
8674 RTMemFree(pvBuf);
8675 pvBuf = NULL;
8676
8677 rc = VERR_NO_MEMORY;
8678 break;
8679 }
8680
8681 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8682
8683 // points to last element in the grain table
8684 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8685 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512);
8686
8687 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8688 {
8689 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8690 uGTTail, pvBuf,
8691 VMDK_GRAIN_TABLE_SIZE);
8692 if (RT_FAILURE(rc))
8693 break;
8694
8695 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8696 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf,
8697 VMDK_GRAIN_TABLE_SIZE);
8698 if (RT_FAILURE(rc))
8699 break;
8700
8701 // This overshoots when i == 0, but we don't need it anymore.
8702 uGTTail -= VMDK_GRAIN_TABLE_SIZE;
8703 }
8704
8705
8706 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */
8707 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8708 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf,
8709 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8710 if (RT_FAILURE(rc))
8711 break;
8712
8713 int * tmpBuf = (int *)pvBuf;
8714
8715 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8716 {
8717 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff);
8718 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff);
8719 }
8720
8721 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8722 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf,
8723 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8724 if (RT_FAILURE(rc))
8725 break;
8726
8727 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff;
8728
8729 /* Repeat both steps with the redundant grain table/directory. */
8730
8731 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8732
8733 // points to last element in the grain table
8734 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8735 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512);
8736
8737 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8738 {
8739 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8740 uRGTTail, pvBuf,
8741 VMDK_GRAIN_TABLE_SIZE);
8742 if (RT_FAILURE(rc))
8743 break;
8744
8745 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8746 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf,
8747 VMDK_GRAIN_TABLE_SIZE);
8748 if (RT_FAILURE(rc))
8749 break;
8750
8751 // This overshoots when i == 0, but we don't need it anymore.
8752 uRGTTail -= VMDK_GRAIN_TABLE_SIZE;
8753 }
8754
8755 /* Update locations of GT entries. */
8756 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8757 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8758 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8759 if (RT_FAILURE(rc))
8760 break;
8761
8762 tmpBuf = (int *)pvBuf;
8763
8764 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8765 {
8766 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff;
8767 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff;
8768 }
8769
8770 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8771 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8772 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8773 if (RT_FAILURE(rc))
8774 break;
8775
8776 pExtent->uSectorRGD = pExtent->uSectorRGD;
8777 pExtent->cOverheadSectors += cOverheadSectorDiff;
8778
8779 } while (0);
8780
8781 if (pvBuf)
8782 {
8783 RTMemFree(pvBuf);
8784 pvBuf = NULL;
8785 }
8786
8787 if (pvZero)
8788 {
8789 RTMemFree(pvZero);
8790 pvZero = NULL;
8791 }
8792
8793 pExtent->cGDEntries = cNewDirEntries;
8794
8795 /* Allocate buffer for overwriting with zeroes. */
8796 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8797 if (!pvZero)
8798 return VERR_NO_MEMORY;
8799
8800 // Allocate additional grain dir
8801 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8802 if (RT_LIKELY(pExtent->pGD))
8803 {
8804 if (pExtent->uSectorRGD)
8805 {
8806 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8807 if (RT_UNLIKELY(!pExtent->pRGD))
8808 rc = VERR_NO_MEMORY;
8809 }
8810 }
8811 else
8812 return VERR_NO_MEMORY;
8813
8814
8815 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8816 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8817 {
8818 pExtent->pGD[i] = uTmpDirVal;
8819
8820 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8821 VMDK_SECTOR2BYTE(uTmpDirVal), pvZero,
8822 VMDK_GRAIN_TABLE_SIZE);
8823
8824 if (RT_FAILURE(rc))
8825 return rc;
8826
8827 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8828 }
8829
8830 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8831 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8832 {
8833 pExtent->pRGD[i] = uRTmpDirVal;
8834
8835 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8836 VMDK_SECTOR2BYTE(uRTmpDirVal), pvZero,
8837 VMDK_GRAIN_TABLE_SIZE);
8838
8839 if (RT_FAILURE(rc))
8840 return rc;
8841
8842 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8843 }
8844
8845 RTMemFree(pvZero);
8846 pvZero = NULL;
8847
8848 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8849 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD,
8850 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8851 if (RT_FAILURE(rc))
8852 return rc;
8853
8854 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8855 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD,
8856 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8857 if (RT_FAILURE(rc))
8858 return rc;
8859
8860 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent,
8861 pExtent->cNominalSectors, cSectorsNew);
8862 if (RT_FAILURE(rc))
8863 return rc;
8864
8865 return rc;
8866}
8867
8868/** @copydoc VDIMAGEBACKEND::pfnResize */
8869static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
8870 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
8871 unsigned uPercentStart, unsigned uPercentSpan,
8872 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
8873 PVDINTERFACE pVDIfsOperation)
8874{
8875 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
8876
8877 // Establish variables and objects needed
8878 int rc = VINF_SUCCESS;
8879 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8880 unsigned uImageFlags = pImage->uImageFlags;
8881 PVMDKEXTENT pExtent = &pImage->pExtents[0];
8882 pExtent->fMetaDirty = true;
8883
8884 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
8885 if (cbSize % VMDK_SECTOR_SIZE)
8886 cSectorsNew++;
8887
8888 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
8889 if (pImage->cbSize % VMDK_SECTOR_SIZE)
8890 cSectorsOld++;
8891 unsigned cExtents = pImage->cExtents;
8892
8893 /* Check size is within min/max bounds. */
8894 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
8895 && ( !cbSize
8896 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
8897 return VERR_VD_INVALID_SIZE;
8898
8899 /*
8900 * Making the image smaller is not supported at the moment.
8901 */
8902 /** @todo implement making the image smaller, it is the responsibility of
8903 * the user to know what they're doing. */
8904 if (cbSize < pImage->cbSize)
8905 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
8906 else if (cbSize > pImage->cbSize)
8907 {
8908 /**
8909 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
8910 */
8911 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8912 {
8913 /** Required space in bytes for the extent after the resize. */
8914 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
8915 pExtent = &pImage->pExtents[0];
8916
8917 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
8918 0 /* fFlags */, NULL,
8919 uPercentStart, uPercentSpan);
8920 if (RT_FAILURE(rc))
8921 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8922
8923 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
8924 if (RT_FAILURE(rc))
8925 return rc;
8926 }
8927
8928 /**
8929 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
8930 */
8931 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8932 {
8933 /* Check to see how much space remains in last extent */
8934 bool fSpaceAvailible = false;
8935 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8936 if (cLastExtentRemSectors)
8937 fSpaceAvailible = true;
8938
8939 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
8940
8941 /** Space remaining in current last extent file that we don't need to create another one. */
8942 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
8943 {
8944 pExtent = &pImage->pExtents[cExtents - 1];
8945 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
8946 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
8947 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
8948 if (RT_FAILURE(rc))
8949 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8950
8951 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8952 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
8953 if (RT_FAILURE(rc))
8954 return rc;
8955 }
8956 //** Need more extent files to handle all the requested space. */
8957 else
8958 {
8959 if (fSpaceAvailible)
8960 {
8961 pExtent = &pImage->pExtents[cExtents - 1];
8962 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
8963 0 /* fFlags */, NULL,
8964 uPercentStart, uPercentSpan);
8965 if (RT_FAILURE(rc))
8966 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8967
8968 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
8969
8970 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8971 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
8972 if (RT_FAILURE(rc))
8973 return rc;
8974 }
8975
8976 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
8977 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
8978 cNewExtents++;
8979
8980 for (unsigned i = cExtents;
8981 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8982 i++)
8983 {
8984 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
8985 if (RT_FAILURE(rc))
8986 return rc;
8987
8988 pExtent = &pImage->pExtents[i];
8989
8990 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8991 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8992 }
8993
8994 if (cSectorsNeeded)
8995 {
8996 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
8997 if (RT_FAILURE(rc))
8998 return rc;
8999 }
9000 }
9001 }
9002
9003 /**
9004 * monolithicSparse.
9005 */
9006 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
9007 {
9008 // 1. Calculate sectors needed for new overhead.
9009
9010 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
9011 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
9012 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
9013
9014 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT
9015 if (cOverheadSectorDiff > 0)
9016 {
9017 if (pExtent->cSectors > 0)
9018 {
9019 /* Do the relocation. */
9020 LogFlow(("Relocating VMDK sectors\n"));
9021 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew);
9022 if (RT_FAILURE(rc))
9023 return rc;
9024
9025 rc = vmdkFlushImage(pImage, NULL);
9026 if (RT_FAILURE(rc))
9027 return rc;
9028 }
9029
9030 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew);
9031 if (RT_FAILURE(rc))
9032 return rc;
9033 }
9034 }
9035
9036 /**
9037 * twoGbSparseExtent
9038 */
9039 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
9040 {
9041 /* Check to see how much space remains in last extent */
9042 bool fSpaceAvailible = false;
9043 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9044 if (cLastExtentRemSectors)
9045 fSpaceAvailible = true;
9046
9047 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
9048
9049 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
9050 {
9051 pExtent = &pImage->pExtents[cExtents - 1];
9052 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9053 if (RT_FAILURE(rc))
9054 return rc;
9055
9056 rc = vmdkFlushImage(pImage, NULL);
9057 if (RT_FAILURE(rc))
9058 return rc;
9059
9060 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9061 if (RT_FAILURE(rc))
9062 return rc;
9063 }
9064 else
9065 {
9066 if (fSpaceAvailible)
9067 {
9068 pExtent = &pImage->pExtents[cExtents - 1];
9069 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9070 if (RT_FAILURE(rc))
9071 return rc;
9072
9073 rc = vmdkFlushImage(pImage, NULL);
9074 if (RT_FAILURE(rc))
9075 return rc;
9076
9077 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9078 if (RT_FAILURE(rc))
9079 return rc;
9080
9081 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
9082 }
9083
9084 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
9085 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
9086 cNewExtents++;
9087
9088 for (unsigned i = cExtents;
9089 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9090 i++)
9091 {
9092 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
9093 if (RT_FAILURE(rc))
9094 return rc;
9095
9096 pExtent = &pImage->pExtents[i];
9097
9098 rc = vmdkFlushImage(pImage, NULL);
9099 if (RT_FAILURE(rc))
9100 return rc;
9101
9102 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9103 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9104 }
9105
9106 if (cSectorsNeeded)
9107 {
9108 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
9109 if (RT_FAILURE(rc))
9110 return rc;
9111
9112 pExtent = &pImage->pExtents[pImage->cExtents];
9113
9114 rc = vmdkFlushImage(pImage, NULL);
9115 if (RT_FAILURE(rc))
9116 return rc;
9117 }
9118 }
9119 }
9120
9121 /* Successful resize. Update metadata */
9122 if (RT_SUCCESS(rc))
9123 {
9124 /* Update size and new block count. */
9125 pImage->cbSize = cbSize;
9126 pExtent->cNominalSectors = cSectorsNew;
9127 pExtent->cSectors = cSectorsNew;
9128
9129 /* Update geometry. */
9130 pImage->PCHSGeometry = *pPCHSGeometry;
9131 pImage->LCHSGeometry = *pLCHSGeometry;
9132 }
9133
9134 /* Update header information in base image file. */
9135 pImage->Descriptor.fDirty = true;
9136 rc = vmdkWriteDescriptor(pImage, NULL);
9137
9138 if (RT_SUCCESS(rc))
9139 rc = vmdkFlushImage(pImage, NULL);
9140 }
9141 /* Same size doesn't change the image at all. */
9142
9143 LogFlowFunc(("returns %Rrc\n", rc));
9144 return rc;
9145}
9146
9147const VDIMAGEBACKEND g_VmdkBackend =
9148{
9149 /* u32Version */
9150 VD_IMGBACKEND_VERSION,
9151 /* pszBackendName */
9152 "VMDK",
9153 /* uBackendCaps */
9154 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
9155 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
9156 | VD_CAP_VFS | VD_CAP_PREFERRED,
9157 /* paFileExtensions */
9158 s_aVmdkFileExtensions,
9159 /* paConfigInfo */
9160 s_aVmdkConfigInfo,
9161 /* pfnProbe */
9162 vmdkProbe,
9163 /* pfnOpen */
9164 vmdkOpen,
9165 /* pfnCreate */
9166 vmdkCreate,
9167 /* pfnRename */
9168 vmdkRename,
9169 /* pfnClose */
9170 vmdkClose,
9171 /* pfnRead */
9172 vmdkRead,
9173 /* pfnWrite */
9174 vmdkWrite,
9175 /* pfnFlush */
9176 vmdkFlush,
9177 /* pfnDiscard */
9178 NULL,
9179 /* pfnGetVersion */
9180 vmdkGetVersion,
9181 /* pfnGetFileSize */
9182 vmdkGetFileSize,
9183 /* pfnGetPCHSGeometry */
9184 vmdkGetPCHSGeometry,
9185 /* pfnSetPCHSGeometry */
9186 vmdkSetPCHSGeometry,
9187 /* pfnGetLCHSGeometry */
9188 vmdkGetLCHSGeometry,
9189 /* pfnSetLCHSGeometry */
9190 vmdkSetLCHSGeometry,
9191 /* pfnQueryRegions */
9192 vmdkQueryRegions,
9193 /* pfnRegionListRelease */
9194 vmdkRegionListRelease,
9195 /* pfnGetImageFlags */
9196 vmdkGetImageFlags,
9197 /* pfnGetOpenFlags */
9198 vmdkGetOpenFlags,
9199 /* pfnSetOpenFlags */
9200 vmdkSetOpenFlags,
9201 /* pfnGetComment */
9202 vmdkGetComment,
9203 /* pfnSetComment */
9204 vmdkSetComment,
9205 /* pfnGetUuid */
9206 vmdkGetUuid,
9207 /* pfnSetUuid */
9208 vmdkSetUuid,
9209 /* pfnGetModificationUuid */
9210 vmdkGetModificationUuid,
9211 /* pfnSetModificationUuid */
9212 vmdkSetModificationUuid,
9213 /* pfnGetParentUuid */
9214 vmdkGetParentUuid,
9215 /* pfnSetParentUuid */
9216 vmdkSetParentUuid,
9217 /* pfnGetParentModificationUuid */
9218 vmdkGetParentModificationUuid,
9219 /* pfnSetParentModificationUuid */
9220 vmdkSetParentModificationUuid,
9221 /* pfnDump */
9222 vmdkDump,
9223 /* pfnGetTimestamp */
9224 NULL,
9225 /* pfnGetParentTimestamp */
9226 NULL,
9227 /* pfnSetParentTimestamp */
9228 NULL,
9229 /* pfnGetParentFilename */
9230 NULL,
9231 /* pfnSetParentFilename */
9232 NULL,
9233 /* pfnComposeLocation */
9234 genericFileComposeLocation,
9235 /* pfnComposeName */
9236 genericFileComposeName,
9237 /* pfnCompact */
9238 NULL,
9239 /* pfnResize */
9240 vmdkResize,
9241 /* pfnRepair */
9242 NULL,
9243 /* pfnTraverseMetadata */
9244 NULL,
9245 /* u32VersionEnd */
9246 VD_IMGBACKEND_VERSION
9247};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette