VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 107861

Last change on this file since 107861 was 107738, checked in by vboxsync, 5 weeks ago

Storage/VMDK.cpp: Fix parfait warning about unused assignment, we should return an error if memory allocation fails + todos, bugref:3409 [scm]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 378.8 KB
Line 
1/* $Id: VMDK.cpp 107738 2025-01-14 09:28:52Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_VMDK
33#include <VBox/log.h> /* before VBox/vd-ifs.h */
34#include <VBox/vd-plugin.h>
35#include <VBox/err.h>
36
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/base64.h>
40#include <iprt/ctype.h>
41#include <iprt/crc.h>
42#include <iprt/dvm.h>
43#include <iprt/uuid.h>
44#include <iprt/path.h>
45#include <iprt/rand.h>
46#include <iprt/sg.h>
47#include <iprt/sort.h>
48#include <iprt/string.h>
49#include <iprt/zip.h>
50#include <iprt/asm.h>
51#include <iprt/zero.h>
52#ifdef RT_OS_WINDOWS
53# include <iprt/utf16.h>
54# include <iprt/uni.h>
55# include <iprt/uni.h>
56# include <iprt/nt/nt-and-windows.h>
57# include <winioctl.h>
58#endif
59#ifdef RT_OS_LINUX
60# include <errno.h>
61# include <sys/stat.h>
62# include <iprt/dir.h>
63# include <iprt/symlink.h>
64# include <iprt/linux/sysfs.h>
65#endif
66#ifdef RT_OS_FREEBSD
67#include <libgeom.h>
68#include <sys/stat.h>
69#include <stdlib.h>
70#endif
71#ifdef RT_OS_SOLARIS
72#include <sys/dkio.h>
73#include <sys/vtoc.h>
74#include <sys/efi_partition.h>
75#include <unistd.h>
76#include <errno.h>
77#endif
78#ifdef RT_OS_DARWIN
79# include <sys/stat.h>
80# include <sys/disk.h>
81# include <errno.h>
82/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
83 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
84 While we could try include the header from the Kernel.framework, it's a lot
85 easier to just add the structure and 4 defines here. */
86typedef struct
87{
88 uint64_t offset;
89 uint64_t length;
90 uint8_t reserved0128[12];
91 dev_t dev;
92} dk_physical_extent_t;
93# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
94# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
95# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
96# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
97#endif /* RT_OS_DARWIN */
98
99#include "VDBackends.h"
100
101
102/*********************************************************************************************************************************
103* Constants And Macros, Structures and Typedefs *
104*********************************************************************************************************************************/
105
106/** Maximum encoded string size (including NUL) we allow for VMDK images.
107 * Deliberately not set high to avoid running out of descriptor space. */
108#define VMDK_ENCODED_COMMENT_MAX 1024
109
110/** VMDK descriptor DDB entry for PCHS cylinders. */
111#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
112
113/** VMDK descriptor DDB entry for PCHS heads. */
114#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
115
116/** VMDK descriptor DDB entry for PCHS sectors. */
117#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
118
119/** VMDK descriptor DDB entry for LCHS cylinders. */
120#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
121
122/** VMDK descriptor DDB entry for LCHS heads. */
123#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
124
125/** VMDK descriptor DDB entry for LCHS sectors. */
126#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
127
128/** VMDK descriptor DDB entry for image UUID. */
129#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
130
131/** VMDK descriptor DDB entry for image modification UUID. */
132#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
133
134/** VMDK descriptor DDB entry for parent image UUID. */
135#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
136
137/** VMDK descriptor DDB entry for parent image modification UUID. */
138#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
139
140/** No compression for streamOptimized files. */
141#define VMDK_COMPRESSION_NONE 0
142
143/** Deflate compression for streamOptimized files. */
144#define VMDK_COMPRESSION_DEFLATE 1
145
146/** Marker that the actual GD value is stored in the footer. */
147#define VMDK_GD_AT_END 0xffffffffffffffffULL
148
149/** Marker for end-of-stream in streamOptimized images. */
150#define VMDK_MARKER_EOS 0
151
152/** Marker for grain table block in streamOptimized images. */
153#define VMDK_MARKER_GT 1
154
155/** Marker for grain directory block in streamOptimized images. */
156#define VMDK_MARKER_GD 2
157
158/** Marker for footer in streamOptimized images. */
159#define VMDK_MARKER_FOOTER 3
160
161/** Marker for unknown purpose in streamOptimized images.
162 * Shows up in very recent images created by vSphere, but only sporadically.
163 * They "forgot" to document that one in the VMDK specification. */
164#define VMDK_MARKER_UNSPECIFIED 4
165
166/** Dummy marker for "don't check the marker value". */
167#define VMDK_MARKER_IGNORE 0xffffffffU
168
169/**
170 * Magic number for hosted images created by VMware Workstation 4, VMware
171 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
172 */
173#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
174
175/** VMDK sector size in bytes. */
176#define VMDK_SECTOR_SIZE 512
177/** Max string buffer size for uint64_t with null term */
178#define UINT64_MAX_BUFF_SIZE 21
179/** Grain directory entry size in bytes */
180#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
181/** Grain table size in bytes */
182#define VMDK_GRAIN_TABLE_SIZE 2048
183
184/**
185 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
186 * this header is also used for monolithic flat images.
187 */
188#pragma pack(1)
189typedef struct SparseExtentHeader
190{
191 uint32_t magicNumber;
192 uint32_t version;
193 uint32_t flags;
194 uint64_t capacity;
195 uint64_t grainSize;
196 uint64_t descriptorOffset;
197 uint64_t descriptorSize;
198 uint32_t numGTEsPerGT;
199 uint64_t rgdOffset;
200 uint64_t gdOffset;
201 uint64_t overHead;
202 bool uncleanShutdown;
203 char singleEndLineChar;
204 char nonEndLineChar;
205 char doubleEndLineChar1;
206 char doubleEndLineChar2;
207 uint16_t compressAlgorithm;
208 uint8_t pad[433];
209} SparseExtentHeader;
210#pragma pack()
211
212/** The maximum allowed descriptor size in the extent header in sectors. */
213#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
214
215/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
216 * divisible by the default grain size (64K) */
217#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
218
219/** VMDK streamOptimized file format marker. The type field may or may not
220 * be actually valid, but there's always data to read there. */
221#pragma pack(1)
222typedef struct VMDKMARKER
223{
224 uint64_t uSector;
225 uint32_t cbSize;
226 uint32_t uType;
227} VMDKMARKER, *PVMDKMARKER;
228#pragma pack()
229
230
231/** Convert sector number/size to byte offset/size. */
232#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
233
234/** Convert byte offset/size to sector number/size. */
235#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
236
237/**
238 * VMDK extent type.
239 */
240typedef enum VMDKETYPE
241{
242 /** Hosted sparse extent. */
243 VMDKETYPE_HOSTED_SPARSE = 1,
244 /** Flat extent. */
245 VMDKETYPE_FLAT,
246 /** Zero extent. */
247 VMDKETYPE_ZERO,
248 /** VMFS extent, used by ESX. */
249 VMDKETYPE_VMFS
250} VMDKETYPE, *PVMDKETYPE;
251
252/**
253 * VMDK access type for a extent.
254 */
255typedef enum VMDKACCESS
256{
257 /** No access allowed. */
258 VMDKACCESS_NOACCESS = 0,
259 /** Read-only access. */
260 VMDKACCESS_READONLY,
261 /** Read-write access. */
262 VMDKACCESS_READWRITE
263} VMDKACCESS, *PVMDKACCESS;
264
265/** Forward declaration for PVMDKIMAGE. */
266typedef struct VMDKIMAGE *PVMDKIMAGE;
267
268/**
269 * Extents files entry. Used for opening a particular file only once.
270 */
271typedef struct VMDKFILE
272{
273 /** Pointer to file path. Local copy. */
274 const char *pszFilename;
275 /** Pointer to base name. Local copy. */
276 const char *pszBasename;
277 /** File open flags for consistency checking. */
278 unsigned fOpen;
279 /** Handle for sync/async file abstraction.*/
280 PVDIOSTORAGE pStorage;
281 /** Reference counter. */
282 unsigned uReferences;
283 /** Flag whether the file should be deleted on last close. */
284 bool fDelete;
285 /** Pointer to the image we belong to (for debugging purposes). */
286 PVMDKIMAGE pImage;
287 /** Pointer to next file descriptor. */
288 struct VMDKFILE *pNext;
289 /** Pointer to the previous file descriptor. */
290 struct VMDKFILE *pPrev;
291} VMDKFILE, *PVMDKFILE;
292
293/**
294 * VMDK extent data structure.
295 */
296typedef struct VMDKEXTENT
297{
298 /** File handle. */
299 PVMDKFILE pFile;
300 /** Base name of the image extent. */
301 const char *pszBasename;
302 /** Full name of the image extent. */
303 const char *pszFullname;
304 /** Number of sectors in this extent. */
305 uint64_t cSectors;
306 /** Number of sectors per block (grain in VMDK speak). */
307 uint64_t cSectorsPerGrain;
308 /** Starting sector number of descriptor. */
309 uint64_t uDescriptorSector;
310 /** Size of descriptor in sectors. */
311 uint64_t cDescriptorSectors;
312 /** Starting sector number of grain directory. */
313 uint64_t uSectorGD;
314 /** Starting sector number of redundant grain directory. */
315 uint64_t uSectorRGD;
316 /** Total number of metadata sectors. */
317 uint64_t cOverheadSectors;
318 /** Nominal size (i.e. as described by the descriptor) of this extent. */
319 uint64_t cNominalSectors;
320 /** Sector offset (i.e. as described by the descriptor) of this extent. */
321 uint64_t uSectorOffset;
322 /** Number of entries in a grain table. */
323 uint32_t cGTEntries;
324 /** Number of sectors reachable via a grain directory entry. */
325 uint32_t cSectorsPerGDE;
326 /** Number of entries in the grain directory. */
327 uint32_t cGDEntries;
328 /** Pointer to the next free sector. Legacy information. Do not use. */
329 uint32_t uFreeSector;
330 /** Number of this extent in the list of images. */
331 uint32_t uExtent;
332 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
333 char *pDescData;
334 /** Pointer to the grain directory. */
335 uint32_t *pGD;
336 /** Pointer to the redundant grain directory. */
337 uint32_t *pRGD;
338 /** VMDK version of this extent. 1=1.0/1.1 */
339 uint32_t uVersion;
340 /** Type of this extent. */
341 VMDKETYPE enmType;
342 /** Access to this extent. */
343 VMDKACCESS enmAccess;
344 /** Flag whether this extent is marked as unclean. */
345 bool fUncleanShutdown;
346 /** Flag whether the metadata in the extent header needs to be updated. */
347 bool fMetaDirty;
348 /** Flag whether there is a footer in this extent. */
349 bool fFooter;
350 /** Compression type for this extent. */
351 uint16_t uCompression;
352 /** Append position for writing new grain. Only for sparse extents. */
353 uint64_t uAppendPosition;
354 /** Last grain which was accessed. Only for streamOptimized extents. */
355 uint32_t uLastGrainAccess;
356 /** Starting sector corresponding to the grain buffer. */
357 uint32_t uGrainSectorAbs;
358 /** Grain number corresponding to the grain buffer. */
359 uint32_t uGrain;
360 /** Actual size of the compressed data, only valid for reading. */
361 uint32_t cbGrainStreamRead;
362 /** Size of compressed grain buffer for streamOptimized extents. */
363 size_t cbCompGrain;
364 /** Compressed grain buffer for streamOptimized extents, with marker. */
365 void *pvCompGrain;
366 /** Decompressed grain buffer for streamOptimized extents. */
367 void *pvGrain;
368 /** Reference to the image in which this extent is used. Do not use this
369 * on a regular basis to avoid passing pImage references to functions
370 * explicitly. */
371 struct VMDKIMAGE *pImage;
372} VMDKEXTENT, *PVMDKEXTENT;
373
374/**
375 * Grain table cache size. Allocated per image.
376 */
377#define VMDK_GT_CACHE_SIZE 256
378
379/**
380 * Grain table block size. Smaller than an actual grain table block to allow
381 * more grain table blocks to be cached without having to allocate excessive
382 * amounts of memory for the cache.
383 */
384#define VMDK_GT_CACHELINE_SIZE 128
385
386
387/**
388 * Maximum number of lines in a descriptor file. Not worth the effort of
389 * making it variable. Descriptor files are generally very short (~20 lines),
390 * with the exception of sparse files split in 2G chunks, which need for the
391 * maximum size (almost 2T) exactly 1025 lines for the disk database.
392 */
393#define VMDK_DESCRIPTOR_LINES_MAX 1100U
394
395/**
396 * Parsed descriptor information. Allows easy access and update of the
397 * descriptor (whether separate file or not). Free form text files suck.
398 */
399typedef struct VMDKDESCRIPTOR
400{
401 /** Line number of first entry of the disk descriptor. */
402 unsigned uFirstDesc;
403 /** Line number of first entry in the extent description. */
404 unsigned uFirstExtent;
405 /** Line number of first disk database entry. */
406 unsigned uFirstDDB;
407 /** Total number of lines. */
408 unsigned cLines;
409 /** Total amount of memory available for the descriptor. */
410 size_t cbDescAlloc;
411 /** Set if descriptor has been changed and not yet written to disk. */
412 bool fDirty;
413 /** Array of pointers to the data in the descriptor. */
414 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
415 /** Array of line indices pointing to the next non-comment line. */
416 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
417} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
418
419
420/**
421 * Cache entry for translating extent/sector to a sector number in that
422 * extent.
423 */
424typedef struct VMDKGTCACHEENTRY
425{
426 /** Extent number for which this entry is valid. */
427 uint32_t uExtent;
428 /** GT data block number. */
429 uint64_t uGTBlock;
430 /** Data part of the cache entry. */
431 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
432} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
433
434/**
435 * Cache data structure for blocks of grain table entries. For now this is a
436 * fixed size direct mapping cache, but this should be adapted to the size of
437 * the sparse image and maybe converted to a set-associative cache. The
438 * implementation below implements a write-through cache with write allocate.
439 */
440typedef struct VMDKGTCACHE
441{
442 /** Cache entries. */
443 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
444 /** Number of cache entries (currently unused). */
445 unsigned cEntries;
446} VMDKGTCACHE, *PVMDKGTCACHE;
447
448/**
449 * Complete VMDK image data structure. Mainly a collection of extents and a few
450 * extra global data fields.
451 */
452typedef struct VMDKIMAGE
453{
454 /** Image name. */
455 const char *pszFilename;
456 /** Descriptor file if applicable. */
457 PVMDKFILE pFile;
458
459 /** Pointer to the per-disk VD interface list. */
460 PVDINTERFACE pVDIfsDisk;
461 /** Pointer to the per-image VD interface list. */
462 PVDINTERFACE pVDIfsImage;
463
464 /** Error interface. */
465 PVDINTERFACEERROR pIfError;
466 /** I/O interface. */
467 PVDINTERFACEIOINT pIfIo;
468
469
470 /** Pointer to the image extents. */
471 PVMDKEXTENT pExtents;
472 /** Number of image extents. */
473 unsigned cExtents;
474 /** Pointer to the files list, for opening a file referenced multiple
475 * times only once (happens mainly with raw partition access). */
476 PVMDKFILE pFiles;
477
478 /**
479 * Pointer to an array of segment entries for async I/O.
480 * This is an optimization because the task number to submit is not known
481 * and allocating/freeing an array in the read/write functions every time
482 * is too expensive.
483 */
484 PRTSGSEG paSegments;
485 /** Entries available in the segments array. */
486 unsigned cSegments;
487
488 /** Open flags passed by VBoxHD layer. */
489 unsigned uOpenFlags;
490 /** Image flags defined during creation or determined during open. */
491 unsigned uImageFlags;
492 /** Total size of the image. */
493 uint64_t cbSize;
494 /** Physical geometry of this image. */
495 VDGEOMETRY PCHSGeometry;
496 /** Logical geometry of this image. */
497 VDGEOMETRY LCHSGeometry;
498 /** Image UUID. */
499 RTUUID ImageUuid;
500 /** Image modification UUID. */
501 RTUUID ModificationUuid;
502 /** Parent image UUID. */
503 RTUUID ParentUuid;
504 /** Parent image modification UUID. */
505 RTUUID ParentModificationUuid;
506
507 /** Pointer to grain table cache, if this image contains sparse extents. */
508 PVMDKGTCACHE pGTCache;
509 /** Pointer to the descriptor (NULL if no separate descriptor file). */
510 char *pDescData;
511 /** Allocation size of the descriptor file. */
512 size_t cbDescAlloc;
513 /** Parsed descriptor file content. */
514 VMDKDESCRIPTOR Descriptor;
515 /** The static region list. */
516 VDREGIONLIST RegionList;
517} VMDKIMAGE;
518
519
520/** State for the input/output callout of the inflate reader/deflate writer. */
521typedef struct VMDKCOMPRESSIO
522{
523 /* Image this operation relates to. */
524 PVMDKIMAGE pImage;
525 /* Current read position. */
526 ssize_t iOffset;
527 /* Size of the compressed grain buffer (available data). */
528 size_t cbCompGrain;
529 /* Pointer to the compressed grain buffer. */
530 void *pvCompGrain;
531} VMDKCOMPRESSIO;
532
533
534/** Tracks async grain allocation. */
535typedef struct VMDKGRAINALLOCASYNC
536{
537 /** Flag whether the allocation failed. */
538 bool fIoErr;
539 /** Current number of transfers pending.
540 * If reached 0 and there is an error the old state is restored. */
541 unsigned cIoXfersPending;
542 /** Sector number */
543 uint64_t uSector;
544 /** Flag whether the grain table needs to be updated. */
545 bool fGTUpdateNeeded;
546 /** Extent the allocation happens. */
547 PVMDKEXTENT pExtent;
548 /** Position of the new grain, required for the grain table update. */
549 uint64_t uGrainOffset;
550 /** Grain table sector. */
551 uint64_t uGTSector;
552 /** Backup grain table sector. */
553 uint64_t uRGTSector;
554} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
555
556/**
557 * State information for vmdkRename() and helpers.
558 */
559typedef struct VMDKRENAMESTATE
560{
561 /** Array of old filenames. */
562 char **apszOldName;
563 /** Array of new filenames. */
564 char **apszNewName;
565 /** Array of new lines in the extent descriptor. */
566 char **apszNewLines;
567 /** Name of the old descriptor file if not a sparse image. */
568 char *pszOldDescName;
569 /** Flag whether we called vmdkFreeImage(). */
570 bool fImageFreed;
571 /** Flag whther the descriptor is embedded in the image (sparse) or
572 * in a separate file. */
573 bool fEmbeddedDesc;
574 /** Number of extents in the image. */
575 unsigned cExtents;
576 /** New base filename. */
577 char *pszNewBaseName;
578 /** The old base filename. */
579 char *pszOldBaseName;
580 /** New full filename. */
581 char *pszNewFullName;
582 /** Old full filename. */
583 char *pszOldFullName;
584 /** The old image name. */
585 const char *pszOldImageName;
586 /** Copy of the original VMDK descriptor. */
587 VMDKDESCRIPTOR DescriptorCopy;
588 /** Copy of the extent state for sparse images. */
589 VMDKEXTENT ExtentCopy;
590} VMDKRENAMESTATE;
591/** Pointer to a VMDK rename state. */
592typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
593
594
595/*********************************************************************************************************************************
596* Static Variables *
597*********************************************************************************************************************************/
598
599/** NULL-terminated array of supported file extensions. */
600static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
601{
602 {"vmdk", VDTYPE_HDD},
603 {NULL, VDTYPE_INVALID}
604};
605
606/** NULL-terminated array of configuration option. */
607static const VDCONFIGINFO s_aVmdkConfigInfo[] =
608{
609 /* Options for VMDK raw disks */
610 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
611 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
612 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
613 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
614
615 /* End of options list */
616 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
617};
618
619
620/*********************************************************************************************************************************
621* Internal Functions *
622*********************************************************************************************************************************/
623
624static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
625static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
626 bool fDelete);
627
628static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
629static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
630static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
631static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
632
633static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
634 void *pvUser, int rcReq);
635
636/**
637 * Internal: open a file (using a file descriptor cache to ensure each file
638 * is only opened once - anything else can cause locking problems).
639 */
640static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
641 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
642{
643 int rc = VINF_SUCCESS;
644 PVMDKFILE pVmdkFile;
645
646 for (pVmdkFile = pImage->pFiles;
647 pVmdkFile != NULL;
648 pVmdkFile = pVmdkFile->pNext)
649 {
650 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
651 {
652 Assert(fOpen == pVmdkFile->fOpen);
653 pVmdkFile->uReferences++;
654
655 *ppVmdkFile = pVmdkFile;
656
657 return rc;
658 }
659 }
660
661 /* If we get here, there's no matching entry in the cache. */
662 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
663 if (!pVmdkFile)
664 {
665 *ppVmdkFile = NULL;
666 return VERR_NO_MEMORY;
667 }
668
669 pVmdkFile->pszFilename = RTStrDup(pszFilename);
670 if (!pVmdkFile->pszFilename)
671 {
672 RTMemFree(pVmdkFile);
673 *ppVmdkFile = NULL;
674 return VERR_NO_MEMORY;
675 }
676
677 if (pszBasename)
678 {
679 pVmdkFile->pszBasename = RTStrDup(pszBasename);
680 if (!pVmdkFile->pszBasename)
681 {
682 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
683 RTMemFree(pVmdkFile);
684 *ppVmdkFile = NULL;
685 return VERR_NO_MEMORY;
686 }
687 }
688
689 pVmdkFile->fOpen = fOpen;
690
691 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
692 &pVmdkFile->pStorage);
693 if (RT_SUCCESS(rc))
694 {
695 pVmdkFile->uReferences = 1;
696 pVmdkFile->pImage = pImage;
697 pVmdkFile->pNext = pImage->pFiles;
698 if (pImage->pFiles)
699 pImage->pFiles->pPrev = pVmdkFile;
700 pImage->pFiles = pVmdkFile;
701 *ppVmdkFile = pVmdkFile;
702 }
703 else
704 {
705 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
706 RTMemFree(pVmdkFile);
707 *ppVmdkFile = NULL;
708 }
709
710 return rc;
711}
712
713/**
714 * Internal: close a file, updating the file descriptor cache.
715 */
716static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
717{
718 int rc = VINF_SUCCESS;
719 PVMDKFILE pVmdkFile = *ppVmdkFile;
720
721 AssertPtr(pVmdkFile);
722
723 pVmdkFile->fDelete |= fDelete;
724 Assert(pVmdkFile->uReferences);
725 pVmdkFile->uReferences--;
726 if (pVmdkFile->uReferences == 0)
727 {
728 PVMDKFILE pPrev;
729 PVMDKFILE pNext;
730
731 /* Unchain the element from the list. */
732 pPrev = pVmdkFile->pPrev;
733 pNext = pVmdkFile->pNext;
734
735 if (pNext)
736 pNext->pPrev = pPrev;
737 if (pPrev)
738 pPrev->pNext = pNext;
739 else
740 pImage->pFiles = pNext;
741
742 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
743
744 bool fFileDel = pVmdkFile->fDelete;
745 if ( pVmdkFile->pszBasename
746 && fFileDel)
747 {
748 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
749 if ( RTPathHasPath(pVmdkFile->pszBasename)
750 || !pszSuffix
751 || ( strcmp(pszSuffix, ".vmdk")
752 && strcmp(pszSuffix, ".bin")
753 && strcmp(pszSuffix, ".img")))
754 fFileDel = false;
755 }
756
757 if (fFileDel)
758 {
759 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
760 if (RT_SUCCESS(rc))
761 rc = rc2;
762 }
763 else if (pVmdkFile->fDelete)
764 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
765 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
766 if (pVmdkFile->pszBasename)
767 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
768 RTMemFree(pVmdkFile);
769 }
770
771 *ppVmdkFile = NULL;
772 return rc;
773}
774
775/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
776#ifndef VMDK_USE_BLOCK_DECOMP_API
777static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
778{
779 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
780 size_t cbInjected = 0;
781
782 Assert(cbBuf);
783 if (pInflateState->iOffset < 0)
784 {
785 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
786 pvBuf = (uint8_t *)pvBuf + 1;
787 cbBuf--;
788 cbInjected = 1;
789 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
790 }
791 if (!cbBuf)
792 {
793 if (pcbBuf)
794 *pcbBuf = cbInjected;
795 return VINF_SUCCESS;
796 }
797 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
798 memcpy(pvBuf,
799 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
800 cbBuf);
801 pInflateState->iOffset += cbBuf;
802 Assert(pcbBuf);
803 *pcbBuf = cbBuf + cbInjected;
804 return VINF_SUCCESS;
805}
806#endif
807
808/**
809 * Internal: read from a file and inflate the compressed data,
810 * distinguishing between async and normal operation
811 */
812DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
813 uint64_t uOffset, void *pvBuf,
814 size_t cbToRead, const void *pcvMarker,
815 uint64_t *puLBA, uint32_t *pcbMarkerData)
816{
817 int rc;
818#ifndef VMDK_USE_BLOCK_DECOMP_API
819 PRTZIPDECOMP pZip = NULL;
820#endif
821 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
822 size_t cbCompSize, cbActuallyRead;
823
824 if (!pcvMarker)
825 {
826 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
827 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
828 if (RT_FAILURE(rc))
829 return rc;
830 }
831 else
832 {
833 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
834 /* pcvMarker endianness has already been partially transformed, fix it */
835 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
836 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
837 }
838
839 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
840 if (cbCompSize == 0)
841 {
842 AssertMsgFailed(("VMDK: corrupted marker\n"));
843 return VERR_VD_VMDK_INVALID_FORMAT;
844 }
845
846 /* Sanity check - the expansion ratio should be much less than 2. */
847 Assert(cbCompSize < 2 * cbToRead);
848 if (cbCompSize >= 2 * cbToRead)
849 return VERR_VD_VMDK_INVALID_FORMAT;
850
851 /* Compressed grain marker. Data follows immediately. */
852 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
853 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
854 (uint8_t *)pExtent->pvCompGrain
855 + RT_UOFFSETOF(VMDKMARKER, uType),
856 RT_ALIGN_Z( cbCompSize
857 + RT_UOFFSETOF(VMDKMARKER, uType),
858 512)
859 - RT_UOFFSETOF(VMDKMARKER, uType));
860 if (RT_FAILURE(rc))
861 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Failed to read data from compressed image '%s'"), pExtent->pszFullname);
862
863 if (puLBA)
864 *puLBA = RT_LE2H_U64(pMarker->uSector);
865 if (pcbMarkerData)
866 *pcbMarkerData = RT_ALIGN( cbCompSize
867 + RT_UOFFSETOF(VMDKMARKER, uType),
868 512);
869
870#ifdef VMDK_USE_BLOCK_DECOMP_API
871 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
872 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
873 pvBuf, cbToRead, &cbActuallyRead);
874#else
875 VMDKCOMPRESSIO InflateState;
876 InflateState.pImage = pImage;
877 InflateState.iOffset = -1;
878 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
879 InflateState.pvCompGrain = pExtent->pvCompGrain;
880
881 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
882 if (RT_FAILURE(rc))
883 return rc;
884 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
885 RTZipDecompDestroy(pZip);
886#endif /* !VMDK_USE_BLOCK_DECOMP_API */
887 if (RT_FAILURE(rc))
888 {
889 if (rc == VERR_ZIP_CORRUPTED)
890 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
891 return rc;
892 }
893 if (cbActuallyRead != cbToRead)
894 rc = VERR_VD_VMDK_INVALID_FORMAT;
895 return rc;
896}
897
898static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
899{
900 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
901
902 Assert(cbBuf);
903 if (pDeflateState->iOffset < 0)
904 {
905 pvBuf = (const uint8_t *)pvBuf + 1;
906 cbBuf--;
907 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
908 }
909 if (!cbBuf)
910 return VINF_SUCCESS;
911 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
912 return VERR_BUFFER_OVERFLOW;
913 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
914 pvBuf, cbBuf);
915 pDeflateState->iOffset += cbBuf;
916 return VINF_SUCCESS;
917}
918
919/**
920 * Internal: deflate the uncompressed data and write to a file,
921 * distinguishing between async and normal operation
922 */
923DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
924 uint64_t uOffset, const void *pvBuf,
925 size_t cbToWrite, uint64_t uLBA,
926 uint32_t *pcbMarkerData)
927{
928 int rc;
929 PRTZIPCOMP pZip = NULL;
930 VMDKCOMPRESSIO DeflateState;
931
932 DeflateState.pImage = pImage;
933 DeflateState.iOffset = -1;
934 DeflateState.cbCompGrain = pExtent->cbCompGrain;
935 DeflateState.pvCompGrain = pExtent->pvCompGrain;
936
937 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
938 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
939 if (RT_FAILURE(rc))
940 return rc;
941 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
942 if (RT_SUCCESS(rc))
943 rc = RTZipCompFinish(pZip);
944 RTZipCompDestroy(pZip);
945 if (RT_SUCCESS(rc))
946 {
947 Assert( DeflateState.iOffset > 0
948 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
949
950 /* pad with zeroes to get to a full sector size */
951 uint32_t uSize = DeflateState.iOffset;
952 if (uSize % 512)
953 {
954 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
955 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
956 uSizeAlign - uSize);
957 uSize = uSizeAlign;
958 }
959
960 if (pcbMarkerData)
961 *pcbMarkerData = uSize;
962
963 /* Compressed grain marker. Data follows immediately. */
964 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
965 pMarker->uSector = RT_H2LE_U64(uLBA);
966 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
967 - RT_UOFFSETOF(VMDKMARKER, uType));
968 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
969 uOffset, pMarker, uSize);
970 if (RT_FAILURE(rc))
971 return rc;
972 }
973 return rc;
974}
975
976
977/**
978 * Internal: check if all files are closed, prevent leaking resources.
979 */
980static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
981{
982 int rc = VINF_SUCCESS, rc2;
983 PVMDKFILE pVmdkFile;
984
985 Assert(pImage->pFiles == NULL);
986 for (pVmdkFile = pImage->pFiles;
987 pVmdkFile != NULL;
988 pVmdkFile = pVmdkFile->pNext)
989 {
990 LogRel(("VMDK: leaking reference to file \"%s\"\n",
991 pVmdkFile->pszFilename));
992 pImage->pFiles = pVmdkFile->pNext;
993
994 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
995
996 if (RT_SUCCESS(rc))
997 rc = rc2;
998 }
999 return rc;
1000}
1001
1002/**
1003 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1004 * critical non-ASCII characters.
1005 */
1006static char *vmdkEncodeString(const char *psz)
1007{
1008 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1009 char *pszDst = szEnc;
1010
1011 AssertPtr(psz);
1012
1013 for (; *psz; psz = RTStrNextCp(psz))
1014 {
1015 char *pszDstPrev = pszDst;
1016 RTUNICP Cp = RTStrGetCp(psz);
1017 if (Cp == '\\')
1018 {
1019 pszDst = RTStrPutCp(pszDst, Cp);
1020 pszDst = RTStrPutCp(pszDst, Cp);
1021 }
1022 else if (Cp == '\n')
1023 {
1024 pszDst = RTStrPutCp(pszDst, '\\');
1025 pszDst = RTStrPutCp(pszDst, 'n');
1026 }
1027 else if (Cp == '\r')
1028 {
1029 pszDst = RTStrPutCp(pszDst, '\\');
1030 pszDst = RTStrPutCp(pszDst, 'r');
1031 }
1032 else
1033 pszDst = RTStrPutCp(pszDst, Cp);
1034 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1035 {
1036 pszDst = pszDstPrev;
1037 break;
1038 }
1039 }
1040 *pszDst = '\0';
1041 return RTStrDup(szEnc);
1042}
1043
1044/**
1045 * Internal: decode a string and store it into the specified string.
1046 */
1047static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1048{
1049 int rc = VINF_SUCCESS;
1050 char szBuf[4];
1051
1052 if (!cb)
1053 return VERR_BUFFER_OVERFLOW;
1054
1055 AssertPtr(psz);
1056
1057 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1058 {
1059 char *pszDst = szBuf;
1060 RTUNICP Cp = RTStrGetCp(pszEncoded);
1061 if (Cp == '\\')
1062 {
1063 pszEncoded = RTStrNextCp(pszEncoded);
1064 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1065 if (CpQ == 'n')
1066 RTStrPutCp(pszDst, '\n');
1067 else if (CpQ == 'r')
1068 RTStrPutCp(pszDst, '\r');
1069 else if (CpQ == '\0')
1070 {
1071 rc = VERR_VD_VMDK_INVALID_HEADER;
1072 break;
1073 }
1074 else
1075 RTStrPutCp(pszDst, CpQ);
1076 }
1077 else
1078 pszDst = RTStrPutCp(pszDst, Cp);
1079
1080 /* Need to leave space for terminating NUL. */
1081 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1082 {
1083 rc = VERR_BUFFER_OVERFLOW;
1084 break;
1085 }
1086 memcpy(psz, szBuf, pszDst - szBuf);
1087 psz += pszDst - szBuf;
1088 }
1089 *psz = '\0';
1090 return rc;
1091}
1092
1093/**
1094 * Internal: free all buffers associated with grain directories.
1095 */
1096static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1097{
1098 if (pExtent->pGD)
1099 {
1100 RTMemFree(pExtent->pGD);
1101 pExtent->pGD = NULL;
1102 }
1103 if (pExtent->pRGD)
1104 {
1105 RTMemFree(pExtent->pRGD);
1106 pExtent->pRGD = NULL;
1107 }
1108}
1109
1110/**
1111 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1112 * images.
1113 */
1114static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1115{
1116 int rc = VINF_SUCCESS;
1117
1118 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1119 {
1120 /* streamOptimized extents need a compressed grain buffer, which must
1121 * be big enough to hold uncompressible data (which needs ~8 bytes
1122 * more than the uncompressed data), the marker and padding. */
1123 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1124 + 8 + sizeof(VMDKMARKER), 512);
1125 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1126 if (RT_LIKELY(pExtent->pvCompGrain))
1127 {
1128 /* streamOptimized extents need a decompressed grain buffer. */
1129 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1130 if (!pExtent->pvGrain)
1131 rc = VERR_NO_MEMORY;
1132 }
1133 else
1134 rc = VERR_NO_MEMORY;
1135 }
1136
1137 if (RT_FAILURE(rc))
1138 vmdkFreeStreamBuffers(pExtent);
1139 return rc;
1140}
1141
1142/**
1143 * Internal: allocate all buffers associated with grain directories.
1144 */
1145static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1146{
1147 RT_NOREF1(pImage);
1148 int rc = VINF_SUCCESS;
1149 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1150
1151 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1152 if (RT_LIKELY(pExtent->pGD))
1153 {
1154 if (pExtent->uSectorRGD)
1155 {
1156 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1157 if (RT_UNLIKELY(!pExtent->pRGD))
1158 rc = VERR_NO_MEMORY;
1159 }
1160 }
1161 else
1162 rc = VERR_NO_MEMORY;
1163
1164 if (RT_FAILURE(rc))
1165 vmdkFreeGrainDirectory(pExtent);
1166 return rc;
1167}
1168
1169/**
1170 * Converts the grain directory from little to host endianess.
1171 *
1172 * @param pGD The grain directory.
1173 * @param cGDEntries Number of entries in the grain directory to convert.
1174 */
1175DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1176{
1177 uint32_t *pGDTmp = pGD;
1178
1179 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1180 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1181}
1182
1183/**
1184 * Read the grain directory and allocated grain tables verifying them against
1185 * their back up copies if available.
1186 *
1187 * @returns VBox status code.
1188 * @param pImage Image instance data.
1189 * @param pExtent The VMDK extent.
1190 */
1191static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1192{
1193 int rc = VINF_SUCCESS;
1194 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1195
1196 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1197 && pExtent->uSectorGD != VMDK_GD_AT_END
1198 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1199
1200 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1201 if (RT_SUCCESS(rc))
1202 {
1203 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1204 * but in reality they are not compressed. */
1205 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1206 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1207 pExtent->pGD, cbGD);
1208 if (RT_SUCCESS(rc))
1209 {
1210 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1211
1212 if ( pExtent->uSectorRGD
1213 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1214 {
1215 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1216 * but in reality they are not compressed. */
1217 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1218 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1219 pExtent->pRGD, cbGD);
1220 if (RT_SUCCESS(rc))
1221 {
1222 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1223
1224 /* Check grain table and redundant grain table for consistency. */
1225 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1226 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1227 size_t cbGTBuffersMax = _1M;
1228
1229 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1230 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1231
1232 if ( !pTmpGT1
1233 || !pTmpGT2)
1234 rc = VERR_NO_MEMORY;
1235
1236 size_t i = 0;
1237 uint32_t *pGDTmp = pExtent->pGD;
1238 uint32_t *pRGDTmp = pExtent->pRGD;
1239
1240 /* Loop through all entries. */
1241 while (i < pExtent->cGDEntries)
1242 {
1243 uint32_t uGTStart = *pGDTmp;
1244 uint32_t uRGTStart = *pRGDTmp;
1245 size_t cbGTRead = cbGT;
1246
1247 /* If no grain table is allocated skip the entry. */
1248 if (*pGDTmp == 0 && *pRGDTmp == 0)
1249 {
1250 i++;
1251 continue;
1252 }
1253
1254 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1255 {
1256 /* Just one grain directory entry refers to a not yet allocated
1257 * grain table or both grain directory copies refer to the same
1258 * grain table. Not allowed. */
1259 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1260 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1261 break;
1262 }
1263
1264 i++;
1265 pGDTmp++;
1266 pRGDTmp++;
1267
1268 /*
1269 * Read a few tables at once if adjacent to decrease the number
1270 * of I/O requests. Read at maximum 1MB at once.
1271 */
1272 while ( i < pExtent->cGDEntries
1273 && cbGTRead < cbGTBuffersMax)
1274 {
1275 /* If no grain table is allocated skip the entry. */
1276 if (*pGDTmp == 0 && *pRGDTmp == 0)
1277 {
1278 i++;
1279 continue;
1280 }
1281
1282 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1283 {
1284 /* Just one grain directory entry refers to a not yet allocated
1285 * grain table or both grain directory copies refer to the same
1286 * grain table. Not allowed. */
1287 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1288 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1289 break;
1290 }
1291
1292 /* Check that the start offsets are adjacent.*/
1293 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1294 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1295 break;
1296
1297 i++;
1298 pGDTmp++;
1299 pRGDTmp++;
1300 cbGTRead += cbGT;
1301 }
1302
1303 /* Increase buffers if required. */
1304 if ( RT_SUCCESS(rc)
1305 && cbGTBuffers < cbGTRead)
1306 {
1307 uint32_t *pTmp;
1308 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1309 if (pTmp)
1310 {
1311 pTmpGT1 = pTmp;
1312 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1313 if (pTmp)
1314 pTmpGT2 = pTmp;
1315 else
1316 rc = VERR_NO_MEMORY;
1317 }
1318 else
1319 rc = VERR_NO_MEMORY;
1320
1321 if (rc == VERR_NO_MEMORY)
1322 {
1323 /* Reset to the old values. */
1324 rc = VINF_SUCCESS;
1325 i -= cbGTRead / cbGT;
1326 cbGTRead = cbGT;
1327
1328 /* Don't try to increase the buffer again in the next run. */
1329 cbGTBuffersMax = cbGTBuffers;
1330 }
1331 }
1332
1333 if (RT_SUCCESS(rc))
1334 {
1335 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1336 * but in reality they are not compressed. */
1337 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1338 VMDK_SECTOR2BYTE(uGTStart),
1339 pTmpGT1, cbGTRead);
1340 if (RT_FAILURE(rc))
1341 {
1342 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1343 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1344 break;
1345 }
1346 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1347 * but in reality they are not compressed. */
1348 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1349 VMDK_SECTOR2BYTE(uRGTStart),
1350 pTmpGT2, cbGTRead);
1351 if (RT_FAILURE(rc))
1352 {
1353 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1354 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1355 break;
1356 }
1357 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1358 {
1359 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1360 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1361 break;
1362 }
1363 }
1364 } /* while (i < pExtent->cGDEntries) */
1365
1366 /** @todo figure out what to do for unclean VMDKs. */
1367 if (pTmpGT1)
1368 RTMemFree(pTmpGT1);
1369 if (pTmpGT2)
1370 RTMemFree(pTmpGT2);
1371 }
1372 else
1373 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1374 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1375 }
1376 }
1377 else
1378 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1379 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1380 }
1381
1382 if (RT_FAILURE(rc))
1383 vmdkFreeGrainDirectory(pExtent);
1384 return rc;
1385}
1386
1387/**
1388 * Creates a new grain directory for the given extent at the given start sector.
1389 *
1390 * @returns VBox status code.
1391 * @param pImage Image instance data.
1392 * @param pExtent The VMDK extent.
1393 * @param uStartSector Where the grain directory should be stored in the image.
1394 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1395 */
1396static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1397 uint64_t uStartSector, bool fPreAlloc)
1398{
1399 int rc = VINF_SUCCESS;
1400 unsigned i;
1401 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1402 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1403 size_t cbGTRounded;
1404 uint64_t cbOverhead;
1405
1406 if (fPreAlloc)
1407 {
1408 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1409 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1410 }
1411 else
1412 {
1413 /* Use a dummy start sector for layout computation. */
1414 if (uStartSector == VMDK_GD_AT_END)
1415 uStartSector = 1;
1416 cbGTRounded = 0;
1417 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1418 }
1419
1420 /* For streamOptimized extents there is only one grain directory,
1421 * and for all others take redundant grain directory into account. */
1422 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1423 {
1424 cbOverhead = RT_ALIGN_64(cbOverhead,
1425 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1426 }
1427 else
1428 {
1429 cbOverhead += cbGDRounded + cbGTRounded;
1430 cbOverhead = RT_ALIGN_64(cbOverhead,
1431 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1432 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1433 }
1434
1435 if (RT_SUCCESS(rc))
1436 {
1437 pExtent->uAppendPosition = cbOverhead;
1438 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1439
1440 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1441 {
1442 pExtent->uSectorRGD = 0;
1443 pExtent->uSectorGD = uStartSector;
1444 }
1445 else
1446 {
1447 pExtent->uSectorRGD = uStartSector;
1448 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1449 }
1450
1451 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1452 if (RT_SUCCESS(rc))
1453 {
1454 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1455 if ( RT_SUCCESS(rc)
1456 && fPreAlloc)
1457 {
1458 uint32_t uGTSectorLE;
1459 uint64_t uOffsetSectors;
1460
1461 if (pExtent->pRGD)
1462 {
1463 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1464 for (i = 0; i < pExtent->cGDEntries; i++)
1465 {
1466 pExtent->pRGD[i] = uOffsetSectors;
1467 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1468 /* Write the redundant grain directory entry to disk. */
1469 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1470 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1471 &uGTSectorLE, sizeof(uGTSectorLE));
1472 if (RT_FAILURE(rc))
1473 {
1474 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1475 break;
1476 }
1477 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1478 }
1479 }
1480
1481 if (RT_SUCCESS(rc))
1482 {
1483 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1484 for (i = 0; i < pExtent->cGDEntries; i++)
1485 {
1486 pExtent->pGD[i] = uOffsetSectors;
1487 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1488 /* Write the grain directory entry to disk. */
1489 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1490 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1491 &uGTSectorLE, sizeof(uGTSectorLE));
1492 if (RT_FAILURE(rc))
1493 {
1494 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1495 break;
1496 }
1497 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1498 }
1499 }
1500 }
1501 }
1502 }
1503
1504 if (RT_FAILURE(rc))
1505 vmdkFreeGrainDirectory(pExtent);
1506 return rc;
1507}
1508
1509/**
1510 * Unquotes the given string returning the result in a separate buffer.
1511 *
1512 * @returns VBox status code.
1513 * @param pImage The VMDK image state.
1514 * @param pszStr The string to unquote.
1515 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1516 * free.
1517 * @param ppszNext Where to store the pointer to any character following
1518 * the quoted value, optional.
1519 */
1520static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1521 char **ppszUnquoted, char **ppszNext)
1522{
1523 const char *pszStart = pszStr;
1524 char *pszQ;
1525 char *pszUnquoted;
1526
1527 /* Skip over whitespace. */
1528 while (*pszStr == ' ' || *pszStr == '\t')
1529 pszStr++;
1530
1531 if (*pszStr != '"')
1532 {
1533 pszQ = (char *)pszStr;
1534 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1535 pszQ++;
1536 }
1537 else
1538 {
1539 pszStr++;
1540 pszQ = (char *)strchr(pszStr, '"');
1541 if (pszQ == NULL)
1542 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1543 pImage->pszFilename, pszStart);
1544 }
1545
1546 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1547 if (!pszUnquoted)
1548 return VERR_NO_MEMORY;
1549 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1550 pszUnquoted[pszQ - pszStr] = '\0';
1551 *ppszUnquoted = pszUnquoted;
1552 if (ppszNext)
1553 *ppszNext = pszQ + 1;
1554 return VINF_SUCCESS;
1555}
1556
1557static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1558 const char *pszLine)
1559{
1560 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1561 ssize_t cbDiff = strlen(pszLine) + 1;
1562
1563 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1564 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1565 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1566
1567 memcpy(pEnd, pszLine, cbDiff);
1568 pDescriptor->cLines++;
1569 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1570 pDescriptor->fDirty = true;
1571
1572 return VINF_SUCCESS;
1573}
1574
1575static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1576 const char *pszKey, const char **ppszValue)
1577{
1578 size_t cbKey = strlen(pszKey);
1579 const char *pszValue;
1580
1581 while (uStart != 0)
1582 {
1583 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1584 {
1585 /* Key matches, check for a '=' (preceded by whitespace). */
1586 pszValue = pDescriptor->aLines[uStart] + cbKey;
1587 while (*pszValue == ' ' || *pszValue == '\t')
1588 pszValue++;
1589 if (*pszValue == '=')
1590 {
1591 *ppszValue = pszValue + 1;
1592 break;
1593 }
1594 }
1595 uStart = pDescriptor->aNextLines[uStart];
1596 }
1597 return !!uStart;
1598}
1599
1600static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1601 unsigned uStart,
1602 const char *pszKey, const char *pszValue)
1603{
1604 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1605 size_t cbKey = strlen(pszKey);
1606 unsigned uLast = 0;
1607
1608 while (uStart != 0)
1609 {
1610 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1611 {
1612 /* Key matches, check for a '=' (preceded by whitespace). */
1613 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1614 while (*pszTmp == ' ' || *pszTmp == '\t')
1615 pszTmp++;
1616 if (*pszTmp == '=')
1617 {
1618 pszTmp++;
1619 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1620 * bloat and potentially out of space error? */
1621 while (*pszTmp == ' ' || *pszTmp == '\t')
1622 pszTmp++;
1623 break;
1624 }
1625 }
1626 if (!pDescriptor->aNextLines[uStart])
1627 uLast = uStart;
1628 uStart = pDescriptor->aNextLines[uStart];
1629 }
1630 if (uStart)
1631 {
1632 if (pszValue)
1633 {
1634 /* Key already exists, replace existing value. */
1635 size_t cbOldVal = strlen(pszTmp);
1636 size_t cbNewVal = strlen(pszValue);
1637 ssize_t cbDiff = cbNewVal - cbOldVal;
1638 /* Check for buffer overflow. */
1639 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1640 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1641 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1642
1643 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1644 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1645 memcpy(pszTmp, pszValue, cbNewVal + 1);
1646 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1647 pDescriptor->aLines[i] += cbDiff;
1648 }
1649 else
1650 {
1651 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1652 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1653 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1654 {
1655 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1656 if (pDescriptor->aNextLines[i])
1657 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1658 else
1659 pDescriptor->aNextLines[i-1] = 0;
1660 }
1661 pDescriptor->cLines--;
1662 /* Adjust starting line numbers of following descriptor sections. */
1663 if (uStart < pDescriptor->uFirstExtent)
1664 pDescriptor->uFirstExtent--;
1665 if (uStart < pDescriptor->uFirstDDB)
1666 pDescriptor->uFirstDDB--;
1667 }
1668 }
1669 else
1670 {
1671 /* Key doesn't exist, append after the last entry in this category. */
1672 if (!pszValue)
1673 {
1674 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1675 return VINF_SUCCESS;
1676 }
1677 cbKey = strlen(pszKey);
1678 size_t cbValue = strlen(pszValue);
1679 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1680 /* Check for buffer overflow. */
1681 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1682 || ( pDescriptor->aLines[pDescriptor->cLines]
1683 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1684 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1685 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1686 {
1687 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1688 if (pDescriptor->aNextLines[i - 1])
1689 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1690 else
1691 pDescriptor->aNextLines[i] = 0;
1692 }
1693 uStart = uLast + 1;
1694 pDescriptor->aNextLines[uLast] = uStart;
1695 pDescriptor->aNextLines[uStart] = 0;
1696 pDescriptor->cLines++;
1697 pszTmp = pDescriptor->aLines[uStart];
1698 memmove(pszTmp + cbDiff, pszTmp,
1699 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1700 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1701 pDescriptor->aLines[uStart][cbKey] = '=';
1702 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1703 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1704 pDescriptor->aLines[i] += cbDiff;
1705
1706 /* Adjust starting line numbers of following descriptor sections. */
1707 if (uStart <= pDescriptor->uFirstExtent)
1708 pDescriptor->uFirstExtent++;
1709 if (uStart <= pDescriptor->uFirstDDB)
1710 pDescriptor->uFirstDDB++;
1711 }
1712 pDescriptor->fDirty = true;
1713 return VINF_SUCCESS;
1714}
1715
1716static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1717 uint32_t *puValue)
1718{
1719 const char *pszValue;
1720
1721 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1722 &pszValue))
1723 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1724 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1725}
1726
1727/**
1728 * Returns the value of the given key as a string allocating the necessary memory.
1729 *
1730 * @returns VBox status code.
1731 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1732 * @param pImage The VMDK image state.
1733 * @param pDescriptor The descriptor to fetch the value from.
1734 * @param pszKey The key to get the value from.
1735 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1736 * free.
1737 */
1738static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1739 const char *pszKey, char **ppszValue)
1740{
1741 const char *pszValue;
1742 char *pszValueUnquoted;
1743
1744 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1745 &pszValue))
1746 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1747 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1748 if (RT_FAILURE(rc))
1749 return rc;
1750 *ppszValue = pszValueUnquoted;
1751 return rc;
1752}
1753
1754static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1755 const char *pszKey, const char *pszValue)
1756{
1757 char *pszValueQuoted;
1758
1759 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1760 if (!pszValueQuoted)
1761 return VERR_NO_STR_MEMORY;
1762 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1763 pszValueQuoted);
1764 RTStrFree(pszValueQuoted);
1765 return rc;
1766}
1767
1768static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1769 PVMDKDESCRIPTOR pDescriptor)
1770{
1771 RT_NOREF1(pImage);
1772 unsigned uEntry = pDescriptor->uFirstExtent;
1773 ssize_t cbDiff;
1774
1775 if (!uEntry)
1776 return;
1777
1778 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1779 /* Move everything including \0 in the entry marking the end of buffer. */
1780 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1781 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1782 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1783 {
1784 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1785 if (pDescriptor->aNextLines[i])
1786 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1787 else
1788 pDescriptor->aNextLines[i - 1] = 0;
1789 }
1790 pDescriptor->cLines--;
1791 if (pDescriptor->uFirstDDB)
1792 pDescriptor->uFirstDDB--;
1793
1794 return;
1795}
1796
1797static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage,
1798 PVMDKDESCRIPTOR pDescriptor, unsigned uLine)
1799{
1800 RT_NOREF1(pImage);
1801 unsigned uEntry = uLine;
1802 ssize_t cbDiff;
1803 if (!uEntry)
1804 return;
1805 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1806 /* Move everything including \0 in the entry marking the end of buffer. */
1807 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1808 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1809 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++)
1810 {
1811 if (i != uEntry)
1812 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1813 if (pDescriptor->aNextLines[i])
1814 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1815 else
1816 pDescriptor->aNextLines[i - 1] = 0;
1817 }
1818 pDescriptor->cLines--;
1819 if (pDescriptor->uFirstDDB)
1820 pDescriptor->uFirstDDB--;
1821 return;
1822}
1823
1824static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1825 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1826 VMDKETYPE enmType, const char *pszBasename,
1827 uint64_t uSectorOffset)
1828{
1829 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1830 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1831 char *pszTmp;
1832 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1833 char szExt[1024];
1834 ssize_t cbDiff;
1835
1836 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1837 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1838
1839 /* Find last entry in extent description. */
1840 while (uStart)
1841 {
1842 if (!pDescriptor->aNextLines[uStart])
1843 uLast = uStart;
1844 uStart = pDescriptor->aNextLines[uStart];
1845 }
1846
1847 if (enmType == VMDKETYPE_ZERO)
1848 {
1849 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1850 cNominalSectors, apszType[enmType]);
1851 }
1852 else if (enmType == VMDKETYPE_FLAT)
1853 {
1854 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1855 apszAccess[enmAccess], cNominalSectors,
1856 apszType[enmType], pszBasename, uSectorOffset);
1857 }
1858 else
1859 {
1860 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1861 apszAccess[enmAccess], cNominalSectors,
1862 apszType[enmType], pszBasename);
1863 }
1864 cbDiff = strlen(szExt) + 1;
1865
1866 /* Check for buffer overflow. */
1867 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1868 || ( pDescriptor->aLines[pDescriptor->cLines]
1869 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1870 {
1871 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
1872 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1))
1873 {
1874 pImage->cbDescAlloc *= 2;
1875 pDescriptor->cbDescAlloc *= 2;
1876 }
1877 else
1878 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1879 }
1880
1881 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1882 {
1883 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1884 if (pDescriptor->aNextLines[i - 1])
1885 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1886 else
1887 pDescriptor->aNextLines[i] = 0;
1888 }
1889 uStart = uLast + 1;
1890 pDescriptor->aNextLines[uLast] = uStart;
1891 pDescriptor->aNextLines[uStart] = 0;
1892 pDescriptor->cLines++;
1893 pszTmp = pDescriptor->aLines[uStart];
1894 memmove(pszTmp + cbDiff, pszTmp,
1895 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1896 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1897 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1898 pDescriptor->aLines[i] += cbDiff;
1899
1900 /* Adjust starting line numbers of following descriptor sections. */
1901 if (uStart <= pDescriptor->uFirstDDB)
1902 pDescriptor->uFirstDDB++;
1903
1904 pDescriptor->fDirty = true;
1905 return VINF_SUCCESS;
1906}
1907
1908/**
1909 * Returns the value of the given key from the DDB as a string allocating
1910 * the necessary memory.
1911 *
1912 * @returns VBox status code.
1913 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1914 * @param pImage The VMDK image state.
1915 * @param pDescriptor The descriptor to fetch the value from.
1916 * @param pszKey The key to get the value from.
1917 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1918 * free.
1919 */
1920static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1921 const char *pszKey, char **ppszValue)
1922{
1923 const char *pszValue;
1924 char *pszValueUnquoted;
1925
1926 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1927 &pszValue))
1928 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1929 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1930 if (RT_FAILURE(rc))
1931 return rc;
1932 *ppszValue = pszValueUnquoted;
1933 return rc;
1934}
1935
1936static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1937 const char *pszKey, uint32_t *puValue)
1938{
1939 const char *pszValue;
1940 char *pszValueUnquoted;
1941
1942 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1943 &pszValue))
1944 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1945 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1946 if (RT_FAILURE(rc))
1947 return rc;
1948 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1949 RTMemTmpFree(pszValueUnquoted);
1950 return rc;
1951}
1952
1953static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1954 const char *pszKey, PRTUUID pUuid)
1955{
1956 const char *pszValue;
1957 char *pszValueUnquoted;
1958
1959 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1960 &pszValue))
1961 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1962 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1963 if (RT_FAILURE(rc))
1964 return rc;
1965 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1966 RTMemTmpFree(pszValueUnquoted);
1967 return rc;
1968}
1969
1970static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1971 const char *pszKey, const char *pszVal)
1972{
1973 int rc;
1974 char *pszValQuoted;
1975
1976 if (pszVal)
1977 {
1978 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1979 if (!pszValQuoted)
1980 return VERR_NO_STR_MEMORY;
1981 }
1982 else
1983 pszValQuoted = NULL;
1984 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1985 pszValQuoted);
1986 if (pszValQuoted)
1987 RTStrFree(pszValQuoted);
1988 return rc;
1989}
1990
1991static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1992 const char *pszKey, PCRTUUID pUuid)
1993{
1994 char *pszUuid;
1995
1996 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1997 if (!pszUuid)
1998 return VERR_NO_STR_MEMORY;
1999 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
2000 pszUuid);
2001 RTStrFree(pszUuid);
2002 return rc;
2003}
2004
2005static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
2006 const char *pszKey, uint32_t uValue)
2007{
2008 char *pszValue;
2009
2010 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
2011 if (!pszValue)
2012 return VERR_NO_STR_MEMORY;
2013 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
2014 pszValue);
2015 RTStrFree(pszValue);
2016 return rc;
2017}
2018
2019/**
2020 * Splits the descriptor data into individual lines checking for correct line
2021 * endings and descriptor size.
2022 *
2023 * @returns VBox status code.
2024 * @param pImage The image instance.
2025 * @param pDesc The descriptor.
2026 * @param pszTmp The raw descriptor data from the image.
2027 */
2028static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
2029{
2030 unsigned cLine = 0;
2031 int rc = VINF_SUCCESS;
2032
2033 while ( RT_SUCCESS(rc)
2034 && *pszTmp != '\0')
2035 {
2036 pDesc->aLines[cLine++] = pszTmp;
2037 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
2038 {
2039 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2040 rc = VERR_VD_VMDK_INVALID_HEADER;
2041 break;
2042 }
2043
2044 while (*pszTmp != '\0' && *pszTmp != '\n')
2045 {
2046 if (*pszTmp == '\r')
2047 {
2048 if (*(pszTmp + 1) != '\n')
2049 {
2050 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2051 break;
2052 }
2053 else
2054 {
2055 /* Get rid of CR character. */
2056 *pszTmp = '\0';
2057 }
2058 }
2059 pszTmp++;
2060 }
2061
2062 if (RT_FAILURE(rc))
2063 break;
2064
2065 /* Get rid of LF character. */
2066 if (*pszTmp == '\n')
2067 {
2068 *pszTmp = '\0';
2069 pszTmp++;
2070 }
2071 }
2072
2073 if (RT_SUCCESS(rc))
2074 {
2075 pDesc->cLines = cLine;
2076 /* Pointer right after the end of the used part of the buffer. */
2077 pDesc->aLines[cLine] = pszTmp;
2078 }
2079
2080 return rc;
2081}
2082
2083static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
2084 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2085{
2086 pDescriptor->cbDescAlloc = cbDescData;
2087 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
2088 if (RT_SUCCESS(rc))
2089 {
2090 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2091 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2092 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2093 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2094 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2095 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2096 else
2097 {
2098 unsigned uLastNonEmptyLine = 0;
2099
2100 /* Initialize those, because we need to be able to reopen an image. */
2101 pDescriptor->uFirstDesc = 0;
2102 pDescriptor->uFirstExtent = 0;
2103 pDescriptor->uFirstDDB = 0;
2104 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2105 {
2106 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2107 {
2108 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2109 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2110 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2111 {
2112 /* An extent descriptor. */
2113 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2114 {
2115 /* Incorrect ordering of entries. */
2116 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2117 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2118 break;
2119 }
2120 if (!pDescriptor->uFirstExtent)
2121 {
2122 pDescriptor->uFirstExtent = i;
2123 uLastNonEmptyLine = 0;
2124 }
2125 }
2126 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2127 {
2128 /* A disk database entry. */
2129 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2130 {
2131 /* Incorrect ordering of entries. */
2132 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2133 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2134 break;
2135 }
2136 if (!pDescriptor->uFirstDDB)
2137 {
2138 pDescriptor->uFirstDDB = i;
2139 uLastNonEmptyLine = 0;
2140 }
2141 }
2142 else
2143 {
2144 /* A normal entry. */
2145 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2146 {
2147 /* Incorrect ordering of entries. */
2148 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2149 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2150 break;
2151 }
2152 if (!pDescriptor->uFirstDesc)
2153 {
2154 pDescriptor->uFirstDesc = i;
2155 uLastNonEmptyLine = 0;
2156 }
2157 }
2158 if (uLastNonEmptyLine)
2159 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2160 uLastNonEmptyLine = i;
2161 }
2162 }
2163 }
2164 }
2165
2166 return rc;
2167}
2168
2169static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2170 PCVDGEOMETRY pPCHSGeometry)
2171{
2172 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2173 VMDK_DDB_GEO_PCHS_CYLINDERS,
2174 pPCHSGeometry->cCylinders);
2175 if (RT_FAILURE(rc))
2176 return rc;
2177 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2178 VMDK_DDB_GEO_PCHS_HEADS,
2179 pPCHSGeometry->cHeads);
2180 if (RT_FAILURE(rc))
2181 return rc;
2182 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2183 VMDK_DDB_GEO_PCHS_SECTORS,
2184 pPCHSGeometry->cSectors);
2185 return rc;
2186}
2187
2188static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2189 PCVDGEOMETRY pLCHSGeometry)
2190{
2191 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2192 VMDK_DDB_GEO_LCHS_CYLINDERS,
2193 pLCHSGeometry->cCylinders);
2194 if (RT_FAILURE(rc))
2195 return rc;
2196 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2197 VMDK_DDB_GEO_LCHS_HEADS,
2198
2199 pLCHSGeometry->cHeads);
2200 if (RT_FAILURE(rc))
2201 return rc;
2202 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2203 VMDK_DDB_GEO_LCHS_SECTORS,
2204 pLCHSGeometry->cSectors);
2205 return rc;
2206}
2207
2208static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2209 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2210{
2211 pDescriptor->uFirstDesc = 0;
2212 pDescriptor->uFirstExtent = 0;
2213 pDescriptor->uFirstDDB = 0;
2214 pDescriptor->cLines = 0;
2215 pDescriptor->cbDescAlloc = cbDescData;
2216 pDescriptor->fDirty = false;
2217 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2218 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2219
2220 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2221 if (RT_SUCCESS(rc))
2222 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2223 if (RT_SUCCESS(rc))
2224 {
2225 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2226 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2227 }
2228 if (RT_SUCCESS(rc))
2229 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2230 if (RT_SUCCESS(rc))
2231 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2232 if (RT_SUCCESS(rc))
2233 {
2234 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2235 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2236 }
2237 if (RT_SUCCESS(rc))
2238 {
2239 /* The trailing space is created by VMware, too. */
2240 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2241 }
2242 if (RT_SUCCESS(rc))
2243 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2244 if (RT_SUCCESS(rc))
2245 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2246 if (RT_SUCCESS(rc))
2247 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2248 if (RT_SUCCESS(rc))
2249 {
2250 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2251
2252 /* Now that the framework is in place, use the normal functions to insert
2253 * the remaining keys. */
2254 char szBuf[9];
2255 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2256 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2257 "CID", szBuf);
2258 }
2259 if (RT_SUCCESS(rc))
2260 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2261 "parentCID", "ffffffff");
2262 if (RT_SUCCESS(rc))
2263 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2264
2265 return rc;
2266}
2267
2268static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2269{
2270 int rc;
2271 unsigned cExtents;
2272 unsigned uLine;
2273 unsigned i;
2274
2275 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2276 &pImage->Descriptor);
2277 if (RT_FAILURE(rc))
2278 return rc;
2279
2280 /* Check version, must be 1. */
2281 uint32_t uVersion;
2282 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2283 if (RT_FAILURE(rc))
2284 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2285 if (uVersion != 1)
2286 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2287
2288 /* Get image creation type and determine image flags. */
2289 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2290 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2291 &pszCreateType);
2292 if (RT_FAILURE(rc))
2293 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2294 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2295 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2296 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2297 else if ( !strcmp(pszCreateType, "partitionedDevice")
2298 || !strcmp(pszCreateType, "fullDevice"))
2299 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2300 else if (!strcmp(pszCreateType, "streamOptimized"))
2301 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2302 else if (!strcmp(pszCreateType, "vmfs"))
2303 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2304 RTMemTmpFree(pszCreateType);
2305
2306 /* Count the number of extent config entries. */
2307 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2308 uLine != 0;
2309 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2310 /* nothing */;
2311
2312 if (!pImage->pDescData && cExtents != 1)
2313 {
2314 /* Monolithic image, must have only one extent (already opened). */
2315 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2316 }
2317
2318 if (pImage->pDescData)
2319 {
2320 /* Non-monolithic image, extents need to be allocated. */
2321 rc = vmdkCreateExtents(pImage, cExtents);
2322 if (RT_FAILURE(rc))
2323 return rc;
2324 }
2325
2326 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2327 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2328 {
2329 char *pszLine = pImage->Descriptor.aLines[uLine];
2330
2331 /* Access type of the extent. */
2332 if (!strncmp(pszLine, "RW", 2))
2333 {
2334 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2335 pszLine += 2;
2336 }
2337 else if (!strncmp(pszLine, "RDONLY", 6))
2338 {
2339 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2340 pszLine += 6;
2341 }
2342 else if (!strncmp(pszLine, "NOACCESS", 8))
2343 {
2344 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2345 pszLine += 8;
2346 }
2347 else
2348 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2349 if (*pszLine++ != ' ')
2350 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2351
2352 /* Nominal size of the extent. */
2353 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2354 &pImage->pExtents[i].cNominalSectors);
2355 if (RT_FAILURE(rc))
2356 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2357 if (*pszLine++ != ' ')
2358 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2359
2360 /* Type of the extent. */
2361 if (!strncmp(pszLine, "SPARSE", 6))
2362 {
2363 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2364 pszLine += 6;
2365 }
2366 else if (!strncmp(pszLine, "FLAT", 4))
2367 {
2368 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2369 pszLine += 4;
2370 }
2371 else if (!strncmp(pszLine, "ZERO", 4))
2372 {
2373 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2374 pszLine += 4;
2375 }
2376 else if (!strncmp(pszLine, "VMFS", 4))
2377 {
2378 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2379 pszLine += 4;
2380 }
2381 else
2382 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2383
2384 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2385 {
2386 /* This one has no basename or offset. */
2387 if (*pszLine == ' ')
2388 pszLine++;
2389 if (*pszLine != '\0')
2390 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2391 pImage->pExtents[i].pszBasename = NULL;
2392 }
2393 else
2394 {
2395 /* All other extent types have basename and optional offset. */
2396 if (*pszLine++ != ' ')
2397 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2398
2399 /* Basename of the image. Surrounded by quotes. */
2400 char *pszBasename;
2401 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2402 if (RT_FAILURE(rc))
2403 return rc;
2404 pImage->pExtents[i].pszBasename = pszBasename;
2405 if (*pszLine == ' ')
2406 {
2407 pszLine++;
2408 if (*pszLine != '\0')
2409 {
2410 /* Optional offset in extent specified. */
2411 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2412 &pImage->pExtents[i].uSectorOffset);
2413 if (RT_FAILURE(rc))
2414 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2415 }
2416 }
2417
2418 if (*pszLine != '\0')
2419 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2420 }
2421 }
2422
2423 /* Determine PCHS geometry (autogenerate if necessary). */
2424 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2425 VMDK_DDB_GEO_PCHS_CYLINDERS,
2426 &pImage->PCHSGeometry.cCylinders);
2427 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2428 pImage->PCHSGeometry.cCylinders = 0;
2429 else if (RT_FAILURE(rc))
2430 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2431 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2432 VMDK_DDB_GEO_PCHS_HEADS,
2433 &pImage->PCHSGeometry.cHeads);
2434 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2435 pImage->PCHSGeometry.cHeads = 0;
2436 else if (RT_FAILURE(rc))
2437 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2438 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2439 VMDK_DDB_GEO_PCHS_SECTORS,
2440 &pImage->PCHSGeometry.cSectors);
2441 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2442 pImage->PCHSGeometry.cSectors = 0;
2443 else if (RT_FAILURE(rc))
2444 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2445 if ( pImage->PCHSGeometry.cCylinders == 0
2446 || pImage->PCHSGeometry.cHeads == 0
2447 || pImage->PCHSGeometry.cHeads > 16
2448 || pImage->PCHSGeometry.cSectors == 0
2449 || pImage->PCHSGeometry.cSectors > 63)
2450 {
2451 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2452 * as the total image size isn't known yet). */
2453 pImage->PCHSGeometry.cCylinders = 0;
2454 pImage->PCHSGeometry.cHeads = 16;
2455 pImage->PCHSGeometry.cSectors = 63;
2456 }
2457
2458 /* Determine LCHS geometry (set to 0 if not specified). */
2459 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2460 VMDK_DDB_GEO_LCHS_CYLINDERS,
2461 &pImage->LCHSGeometry.cCylinders);
2462 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2463 pImage->LCHSGeometry.cCylinders = 0;
2464 else if (RT_FAILURE(rc))
2465 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2466 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2467 VMDK_DDB_GEO_LCHS_HEADS,
2468 &pImage->LCHSGeometry.cHeads);
2469 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2470 pImage->LCHSGeometry.cHeads = 0;
2471 else if (RT_FAILURE(rc))
2472 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2473 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2474 VMDK_DDB_GEO_LCHS_SECTORS,
2475 &pImage->LCHSGeometry.cSectors);
2476 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2477 pImage->LCHSGeometry.cSectors = 0;
2478 else if (RT_FAILURE(rc))
2479 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2480 if ( pImage->LCHSGeometry.cCylinders == 0
2481 || pImage->LCHSGeometry.cHeads == 0
2482 || pImage->LCHSGeometry.cSectors == 0)
2483 {
2484 pImage->LCHSGeometry.cCylinders = 0;
2485 pImage->LCHSGeometry.cHeads = 0;
2486 pImage->LCHSGeometry.cSectors = 0;
2487 }
2488
2489 /* Get image UUID. */
2490 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2491 &pImage->ImageUuid);
2492 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2493 {
2494 /* Image without UUID. Probably created by VMware and not yet used
2495 * by VirtualBox. Can only be added for images opened in read/write
2496 * mode, so don't bother producing a sensible UUID otherwise. */
2497 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2498 RTUuidClear(&pImage->ImageUuid);
2499 else
2500 {
2501 rc = RTUuidCreate(&pImage->ImageUuid);
2502 if (RT_FAILURE(rc))
2503 return rc;
2504 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2505 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2506 if (RT_FAILURE(rc))
2507 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2508 }
2509 }
2510 else if (RT_FAILURE(rc))
2511 return rc;
2512
2513 /* Get image modification UUID. */
2514 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2515 VMDK_DDB_MODIFICATION_UUID,
2516 &pImage->ModificationUuid);
2517 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2518 {
2519 /* Image without UUID. Probably created by VMware and not yet used
2520 * by VirtualBox. Can only be added for images opened in read/write
2521 * mode, so don't bother producing a sensible UUID otherwise. */
2522 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2523 RTUuidClear(&pImage->ModificationUuid);
2524 else
2525 {
2526 rc = RTUuidCreate(&pImage->ModificationUuid);
2527 if (RT_FAILURE(rc))
2528 return rc;
2529 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2530 VMDK_DDB_MODIFICATION_UUID,
2531 &pImage->ModificationUuid);
2532 if (RT_FAILURE(rc))
2533 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2534 }
2535 }
2536 else if (RT_FAILURE(rc))
2537 return rc;
2538
2539 /* Get UUID of parent image. */
2540 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2541 &pImage->ParentUuid);
2542 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2543 {
2544 /* Image without UUID. Probably created by VMware and not yet used
2545 * by VirtualBox. Can only be added for images opened in read/write
2546 * mode, so don't bother producing a sensible UUID otherwise. */
2547 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2548 RTUuidClear(&pImage->ParentUuid);
2549 else
2550 {
2551 rc = RTUuidClear(&pImage->ParentUuid);
2552 if (RT_FAILURE(rc))
2553 return rc;
2554 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2555 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2556 if (RT_FAILURE(rc))
2557 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2558 }
2559 }
2560 else if (RT_FAILURE(rc))
2561 return rc;
2562
2563 /* Get parent image modification UUID. */
2564 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2565 VMDK_DDB_PARENT_MODIFICATION_UUID,
2566 &pImage->ParentModificationUuid);
2567 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2568 {
2569 /* Image without UUID. Probably created by VMware and not yet used
2570 * by VirtualBox. Can only be added for images opened in read/write
2571 * mode, so don't bother producing a sensible UUID otherwise. */
2572 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2573 RTUuidClear(&pImage->ParentModificationUuid);
2574 else
2575 {
2576 RTUuidClear(&pImage->ParentModificationUuid);
2577 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2578 VMDK_DDB_PARENT_MODIFICATION_UUID,
2579 &pImage->ParentModificationUuid);
2580 if (RT_FAILURE(rc))
2581 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2582 }
2583 }
2584 else if (RT_FAILURE(rc))
2585 return rc;
2586
2587 return VINF_SUCCESS;
2588}
2589
2590/**
2591 * Internal : Prepares the descriptor to write to the image.
2592 */
2593static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2594 void **ppvData, size_t *pcbData)
2595{
2596 int rc = VINF_SUCCESS;
2597
2598 /*
2599 * Allocate temporary descriptor buffer.
2600 * In case there is no limit allocate a default
2601 * and increase if required.
2602 */
2603 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2604 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2605 size_t offDescriptor = 0;
2606
2607 if (!pszDescriptor)
2608 return VERR_NO_MEMORY;
2609
2610 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2611 {
2612 const char *psz = pImage->Descriptor.aLines[i];
2613 size_t cb = strlen(psz);
2614
2615 /*
2616 * Increase the descriptor if there is no limit and
2617 * there is not enough room left for this line.
2618 */
2619 if (offDescriptor + cb + 1 > cbDescriptor)
2620 {
2621 if (cbLimit)
2622 {
2623 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2624 break;
2625 }
2626 else
2627 {
2628 char *pszDescriptorNew = NULL;
2629 LogFlow(("Increasing descriptor cache\n"));
2630
2631 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2632 if (!pszDescriptorNew)
2633 {
2634 rc = VERR_NO_MEMORY;
2635 break;
2636 }
2637 pszDescriptor = pszDescriptorNew;
2638 cbDescriptor += cb + 4 * _1K;
2639 }
2640 }
2641
2642 if (cb > 0)
2643 {
2644 memcpy(pszDescriptor + offDescriptor, psz, cb);
2645 offDescriptor += cb;
2646 }
2647
2648 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2649 offDescriptor++;
2650 }
2651
2652 if (RT_SUCCESS(rc))
2653 {
2654 *ppvData = pszDescriptor;
2655 *pcbData = offDescriptor;
2656 }
2657 else if (pszDescriptor)
2658 RTMemFree(pszDescriptor);
2659
2660 return rc;
2661}
2662
2663/**
2664 * Internal: write/update the descriptor part of the image.
2665 */
2666static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2667{
2668 int rc = VINF_SUCCESS;
2669 uint64_t cbLimit;
2670 uint64_t uOffset;
2671 PVMDKFILE pDescFile;
2672 void *pvDescriptor = NULL;
2673 size_t cbDescriptor;
2674
2675 if (pImage->pDescData)
2676 {
2677 /* Separate descriptor file. */
2678 uOffset = 0;
2679 cbLimit = 0;
2680 pDescFile = pImage->pFile;
2681 }
2682 else
2683 {
2684 /* Embedded descriptor file. */
2685 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2686 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2687 pDescFile = pImage->pExtents[0].pFile;
2688 }
2689 /* Bail out if there is no file to write to. */
2690 if (pDescFile == NULL)
2691 return VERR_INVALID_PARAMETER;
2692
2693 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2694 if (RT_SUCCESS(rc))
2695 {
2696 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2697 uOffset, pvDescriptor,
2698 cbLimit ? cbLimit : cbDescriptor,
2699 pIoCtx, NULL, NULL);
2700 if ( RT_FAILURE(rc)
2701 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2702 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2703 }
2704
2705 if (RT_SUCCESS(rc) && !cbLimit)
2706 {
2707 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2708 if (RT_FAILURE(rc))
2709 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2710 }
2711
2712 if (RT_SUCCESS(rc))
2713 pImage->Descriptor.fDirty = false;
2714
2715 if (pvDescriptor)
2716 RTMemFree(pvDescriptor);
2717 return rc;
2718
2719}
2720
2721/**
2722 * Internal: validate the consistency check values in a binary header.
2723 */
2724static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2725{
2726 int rc = VINF_SUCCESS;
2727 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2728 {
2729 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2730 return rc;
2731 }
2732 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2733 {
2734 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2735 return rc;
2736 }
2737 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2738 && ( pHeader->singleEndLineChar != '\n'
2739 || pHeader->nonEndLineChar != ' '
2740 || pHeader->doubleEndLineChar1 != '\r'
2741 || pHeader->doubleEndLineChar2 != '\n') )
2742 {
2743 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2744 return rc;
2745 }
2746 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2747 {
2748 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2749 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2750 return rc;
2751 }
2752 return rc;
2753}
2754
2755/**
2756 * Internal: read metadata belonging to an extent with binary header, i.e.
2757 * as found in monolithic files.
2758 */
2759static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2760 bool fMagicAlreadyRead)
2761{
2762 SparseExtentHeader Header;
2763 int rc;
2764
2765 if (!fMagicAlreadyRead)
2766 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2767 &Header, sizeof(Header));
2768 else
2769 {
2770 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2771 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2772 RT_UOFFSETOF(SparseExtentHeader, version),
2773 &Header.version,
2774 sizeof(Header)
2775 - RT_UOFFSETOF(SparseExtentHeader, version));
2776 }
2777
2778 if (RT_SUCCESS(rc))
2779 {
2780 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2781 if (RT_SUCCESS(rc))
2782 {
2783 uint64_t cbFile = 0;
2784
2785 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2786 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2787 pExtent->fFooter = true;
2788
2789 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2790 || ( pExtent->fFooter
2791 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2792 {
2793 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2794 if (RT_FAILURE(rc))
2795 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2796 }
2797
2798 if (RT_SUCCESS(rc))
2799 {
2800 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2801 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2802
2803 if ( pExtent->fFooter
2804 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2805 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2806 {
2807 /* Read the footer, which comes before the end-of-stream marker. */
2808 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2809 cbFile - 2*512, &Header,
2810 sizeof(Header));
2811 if (RT_FAILURE(rc))
2812 {
2813 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2814 rc = VERR_VD_VMDK_INVALID_HEADER;
2815 }
2816
2817 if (RT_SUCCESS(rc))
2818 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2819 /* Prohibit any writes to this extent. */
2820 pExtent->uAppendPosition = 0;
2821 }
2822
2823 if (RT_SUCCESS(rc))
2824 {
2825 pExtent->uVersion = RT_LE2H_U32(Header.version);
2826 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2827 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2828 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2829 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2830 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2831 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2832 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2833 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2834 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2835 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2836 {
2837 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2838 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2839 }
2840 else
2841 {
2842 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2843 pExtent->uSectorRGD = 0;
2844 }
2845
2846 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2847 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2848 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2849
2850 if ( RT_SUCCESS(rc)
2851 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2852 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2853 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2854 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2855 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2856 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2857
2858 if (RT_SUCCESS(rc))
2859 {
2860 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2861 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2862 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2863 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2864 else
2865 {
2866 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2867 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2868
2869 /* Fix up the number of descriptor sectors, as some flat images have
2870 * really just one, and this causes failures when inserting the UUID
2871 * values and other extra information. */
2872 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2873 {
2874 /* Do it the easy way - just fix it for flat images which have no
2875 * other complicated metadata which needs space too. */
2876 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2877 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2878 pExtent->cDescriptorSectors = 4;
2879 }
2880 }
2881 }
2882 }
2883 }
2884 }
2885 }
2886 else
2887 {
2888 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2889 rc = VERR_VD_VMDK_INVALID_HEADER;
2890 }
2891
2892 if (RT_FAILURE(rc))
2893 vmdkFreeExtentData(pImage, pExtent, false);
2894
2895 return rc;
2896}
2897
2898/**
2899 * Internal: read additional metadata belonging to an extent. For those
2900 * extents which have no additional metadata just verify the information.
2901 */
2902static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2903{
2904 int rc = VINF_SUCCESS;
2905
2906/* disabled the check as there are too many truncated vmdk images out there */
2907#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2908 uint64_t cbExtentSize;
2909 /* The image must be a multiple of a sector in size and contain the data
2910 * area (flat images only). If not, it means the image is at least
2911 * truncated, or even seriously garbled. */
2912 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2913 if (RT_FAILURE(rc))
2914 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2915 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2916 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2917 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2918 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2919#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2920 if ( RT_SUCCESS(rc)
2921 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2922 {
2923 /* The spec says that this must be a power of two and greater than 8,
2924 * but probably they meant not less than 8. */
2925 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2926 || pExtent->cSectorsPerGrain < 8)
2927 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2928 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2929 else
2930 {
2931 /* This code requires that a grain table must hold a power of two multiple
2932 * of the number of entries per GT cache entry. */
2933 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2934 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2935 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2936 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2937 else
2938 {
2939 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2940 if (RT_SUCCESS(rc))
2941 {
2942 /* Prohibit any writes to this streamOptimized extent. */
2943 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2944 pExtent->uAppendPosition = 0;
2945
2946 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2947 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2948 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2949 rc = vmdkReadGrainDirectory(pImage, pExtent);
2950 else
2951 {
2952 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2953 pExtent->cbGrainStreamRead = 0;
2954 }
2955 }
2956 }
2957 }
2958 }
2959
2960 if (RT_FAILURE(rc))
2961 vmdkFreeExtentData(pImage, pExtent, false);
2962
2963 return rc;
2964}
2965
2966/**
2967 * Internal: write/update the metadata for a sparse extent.
2968 */
2969static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2970 uint64_t uOffset, PVDIOCTX pIoCtx)
2971{
2972 SparseExtentHeader Header;
2973
2974 memset(&Header, '\0', sizeof(Header));
2975 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2976 Header.version = RT_H2LE_U32(pExtent->uVersion);
2977 Header.flags = RT_H2LE_U32(RT_BIT(0));
2978 if (pExtent->pRGD)
2979 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2980 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2981 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2982 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2983 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2984 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2985 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2986 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2987 if (pExtent->fFooter && uOffset == 0)
2988 {
2989 if (pExtent->pRGD)
2990 {
2991 Assert(pExtent->uSectorRGD);
2992 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2993 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2994 }
2995 else
2996 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2997 }
2998 else
2999 {
3000 if (pExtent->pRGD)
3001 {
3002 Assert(pExtent->uSectorRGD);
3003 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3004 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3005 }
3006 else
3007 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3008 }
3009 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3010 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3011 Header.singleEndLineChar = '\n';
3012 Header.nonEndLineChar = ' ';
3013 Header.doubleEndLineChar1 = '\r';
3014 Header.doubleEndLineChar2 = '\n';
3015 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3016
3017 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
3018 uOffset, &Header, sizeof(Header),
3019 pIoCtx, NULL, NULL);
3020 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3021 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3022 return rc;
3023}
3024
3025/**
3026 * Internal: free the buffers used for streamOptimized images.
3027 */
3028static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
3029{
3030 if (pExtent->pvCompGrain)
3031 {
3032 RTMemFree(pExtent->pvCompGrain);
3033 pExtent->pvCompGrain = NULL;
3034 }
3035 if (pExtent->pvGrain)
3036 {
3037 RTMemFree(pExtent->pvGrain);
3038 pExtent->pvGrain = NULL;
3039 }
3040}
3041
3042/**
3043 * Internal: free the memory used by the extent data structure, optionally
3044 * deleting the referenced files.
3045 *
3046 * @returns VBox status code.
3047 * @param pImage Pointer to the image instance data.
3048 * @param pExtent The extent to free.
3049 * @param fDelete Flag whether to delete the backing storage.
3050 */
3051static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3052 bool fDelete)
3053{
3054 int rc = VINF_SUCCESS;
3055
3056 vmdkFreeGrainDirectory(pExtent);
3057 if (pExtent->pDescData)
3058 {
3059 RTMemFree(pExtent->pDescData);
3060 pExtent->pDescData = NULL;
3061 }
3062 if (pExtent->pFile != NULL)
3063 {
3064 /* Do not delete raw extents, these have full and base names equal. */
3065 rc = vmdkFileClose(pImage, &pExtent->pFile,
3066 fDelete
3067 && pExtent->pszFullname
3068 && pExtent->pszBasename
3069 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3070 }
3071 if (pExtent->pszBasename)
3072 {
3073 RTMemTmpFree((void *)pExtent->pszBasename);
3074 pExtent->pszBasename = NULL;
3075 }
3076 if (pExtent->pszFullname)
3077 {
3078 RTStrFree((char *)(void *)pExtent->pszFullname);
3079 pExtent->pszFullname = NULL;
3080 }
3081 vmdkFreeStreamBuffers(pExtent);
3082
3083 return rc;
3084}
3085
3086/**
3087 * Internal: allocate grain table cache if necessary for this image.
3088 */
3089static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3090{
3091 PVMDKEXTENT pExtent;
3092
3093 /* Allocate grain table cache if any sparse extent is present. */
3094 for (unsigned i = 0; i < pImage->cExtents; i++)
3095 {
3096 pExtent = &pImage->pExtents[i];
3097 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3098 {
3099 /* Allocate grain table cache. */
3100 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3101 if (!pImage->pGTCache)
3102 return VERR_NO_MEMORY;
3103 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3104 {
3105 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3106 pGCE->uExtent = UINT32_MAX;
3107 }
3108 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3109 break;
3110 }
3111 }
3112
3113 return VINF_SUCCESS;
3114}
3115
3116/**
3117 * Internal: allocate the given number of extents.
3118 */
3119static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3120{
3121 int rc = VINF_SUCCESS;
3122 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3123 if (pExtents)
3124 {
3125 for (unsigned i = 0; i < cExtents; i++)
3126 {
3127 pExtents[i].pFile = NULL;
3128 pExtents[i].pszBasename = NULL;
3129 pExtents[i].pszFullname = NULL;
3130 pExtents[i].pGD = NULL;
3131 pExtents[i].pRGD = NULL;
3132 pExtents[i].pDescData = NULL;
3133 pExtents[i].uVersion = 1;
3134 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3135 pExtents[i].uExtent = i;
3136 pExtents[i].pImage = pImage;
3137 }
3138 pImage->pExtents = pExtents;
3139 pImage->cExtents = cExtents;
3140 }
3141 else
3142 rc = VERR_NO_MEMORY;
3143
3144 return rc;
3145}
3146
3147/**
3148 * Internal: Create an additional file backed extent in split images.
3149 * Supports split sparse and flat images.
3150 *
3151 * @returns VBox status code.
3152 * @param pImage VMDK image instance.
3153 * @param cbSize Desiried size in bytes of new extent.
3154 */
3155static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
3156{
3157 int rc = VINF_SUCCESS;
3158 unsigned uImageFlags = pImage->uImageFlags;
3159
3160 /* Check for unsupported image type. */
3161 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3162 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3163 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3164 {
3165 return VERR_NOT_SUPPORTED;
3166 }
3167
3168 /* Allocate array of extents and copy existing extents to it. */
3169 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
3170 if (!pNewExtents)
3171 {
3172 return VERR_NO_MEMORY;
3173 }
3174
3175 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
3176
3177 /* Locate newly created extent and populate default metadata. */
3178 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
3179
3180 pExtent->pFile = NULL;
3181 pExtent->pszBasename = NULL;
3182 pExtent->pszFullname = NULL;
3183 pExtent->pGD = NULL;
3184 pExtent->pRGD = NULL;
3185 pExtent->pDescData = NULL;
3186 pExtent->uVersion = 1;
3187 pExtent->uCompression = VMDK_COMPRESSION_NONE;
3188 pExtent->uExtent = pImage->cExtents;
3189 pExtent->pImage = pImage;
3190 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3191 pExtent->enmAccess = VMDKACCESS_READWRITE;
3192 pExtent->uSectorOffset = 0;
3193 pExtent->fMetaDirty = true;
3194
3195 /* Apply image type specific meta data. */
3196 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3197 {
3198 pExtent->enmType = VMDKETYPE_FLAT;
3199 }
3200 else
3201 {
3202 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3203 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3204 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3205 pExtent->cGTEntries = 512;
3206
3207 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3208 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3209 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3210 }
3211
3212 /* Allocate and set file name for extent. */
3213 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3214 AssertPtr(pszBasenameSubstr);
3215
3216 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3217 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3218 RTPathStripSuffix(pszBasenameBase);
3219 char *pszTmp;
3220 size_t cbTmp;
3221
3222 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
3223 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3224 pExtent->uExtent + 1, pszBasenameSuff);
3225 else
3226 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
3227 pszBasenameSuff);
3228
3229 RTStrFree(pszBasenameBase);
3230 if (!pszTmp)
3231 return VERR_NO_STR_MEMORY;
3232 cbTmp = strlen(pszTmp) + 1;
3233 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3234 if (!pszBasename)
3235 {
3236 RTStrFree(pszTmp);
3237 return VERR_NO_MEMORY;
3238 }
3239
3240 memcpy(pszBasename, pszTmp, cbTmp);
3241 RTStrFree(pszTmp);
3242
3243 pExtent->pszBasename = pszBasename;
3244
3245 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3246 if (!pszBasedirectory)
3247 return VERR_NO_STR_MEMORY;
3248 RTPathStripFilename(pszBasedirectory);
3249 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3250 RTStrFree(pszBasedirectory);
3251 if (!pszFullname)
3252 return VERR_NO_STR_MEMORY;
3253 pExtent->pszFullname = pszFullname;
3254
3255 /* Create file for extent. */
3256 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3257 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3258 true /* fCreate */));
3259 if (RT_FAILURE(rc))
3260 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3261
3262 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3263 {
3264 /* For flat images: Pre allocate file space. */
3265 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
3266 0 /* fFlags */, NULL, 0, 0);
3267 if (RT_FAILURE(rc))
3268 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3269 }
3270 else
3271 {
3272 /* For sparse images: Allocate new grain directories/tables. */
3273 /* fPreAlloc should never be false because VMware can't use such images. */
3274 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3275 RT_MAX( pExtent->uDescriptorSector
3276 + pExtent->cDescriptorSectors,
3277 1),
3278 true /* fPreAlloc */);
3279 if (RT_FAILURE(rc))
3280 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3281 }
3282
3283 /* Insert new extent into descriptor file. */
3284 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3285 pExtent->cNominalSectors, pExtent->enmType,
3286 pExtent->pszBasename, pExtent->uSectorOffset);
3287 if (RT_FAILURE(rc))
3288 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3289
3290 pImage->pExtents = pNewExtents;
3291 pImage->cExtents++;
3292
3293 return rc;
3294}
3295
3296/**
3297 * Reads and processes the descriptor embedded in sparse images.
3298 *
3299 * @returns VBox status code.
3300 * @param pImage VMDK image instance.
3301 * @param pFile The sparse file handle.
3302 */
3303static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3304{
3305 /* It's a hosted single-extent image. */
3306 int rc = vmdkCreateExtents(pImage, 1);
3307 if (RT_SUCCESS(rc))
3308 {
3309 /* The opened file is passed to the extent. No separate descriptor
3310 * file, so no need to keep anything open for the image. */
3311 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3312 pExtent->pFile = pFile;
3313 pImage->pFile = NULL;
3314 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3315 if (RT_LIKELY(pExtent->pszFullname))
3316 {
3317 /* As we're dealing with a monolithic image here, there must
3318 * be a descriptor embedded in the image file. */
3319 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3320 if ( RT_SUCCESS(rc)
3321 && pExtent->uDescriptorSector
3322 && pExtent->cDescriptorSectors)
3323 {
3324 /* HACK: extend the descriptor if it is unusually small and it fits in
3325 * the unused space after the image header. Allows opening VMDK files
3326 * with extremely small descriptor in read/write mode.
3327 *
3328 * The previous version introduced a possible regression for VMDK stream
3329 * optimized images from VMware which tend to have only a single sector sized
3330 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3331 * entries required to make it work with VBox but for stream optimized images
3332 * the updated binary header wasn't written to the disk creating a mismatch
3333 * between advertised and real descriptor size.
3334 *
3335 * The descriptor size will be increased even if opened readonly now if there
3336 * enough room but the new value will not be written back to the image.
3337 */
3338 if ( pExtent->cDescriptorSectors < 3
3339 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3340 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3341 {
3342 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3343
3344 pExtent->cDescriptorSectors = 4;
3345 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3346 {
3347 /*
3348 * Update the on disk number now to make sure we don't introduce inconsistencies
3349 * in case of stream optimized images from VMware where the descriptor is just
3350 * one sector big (the binary header is not written to disk for complete
3351 * stream optimized images in vmdkFlushImage()).
3352 */
3353 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3354 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3355 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3356 &u64DescSizeNew, sizeof(u64DescSizeNew));
3357 if (RT_FAILURE(rc))
3358 {
3359 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3360 /* Restore the old size and carry on. */
3361 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3362 }
3363 }
3364 }
3365 /* Read the descriptor from the extent. */
3366 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3367 if (RT_LIKELY(pExtent->pDescData))
3368 {
3369 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3370 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3371 pExtent->pDescData,
3372 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3373 if (RT_SUCCESS(rc))
3374 {
3375 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3376 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3377 if ( RT_SUCCESS(rc)
3378 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3379 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3380 {
3381 rc = vmdkReadMetaExtent(pImage, pExtent);
3382 if (RT_SUCCESS(rc))
3383 {
3384 /* Mark the extent as unclean if opened in read-write mode. */
3385 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3386 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3387 {
3388 pExtent->fUncleanShutdown = true;
3389 pExtent->fMetaDirty = true;
3390 }
3391 }
3392 }
3393 else if (RT_SUCCESS(rc))
3394 rc = VERR_NOT_SUPPORTED;
3395 }
3396 else
3397 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3398 }
3399 else
3400 rc = VERR_NO_MEMORY;
3401 }
3402 else if (RT_SUCCESS(rc))
3403 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3404 }
3405 else
3406 rc = VERR_NO_MEMORY;
3407 }
3408
3409 return rc;
3410}
3411
3412/**
3413 * Reads the descriptor from a pure text file.
3414 *
3415 * @returns VBox status code.
3416 * @param pImage VMDK image instance.
3417 * @param pFile The descriptor file handle.
3418 */
3419static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3420{
3421 /* Allocate at least 10K, and make sure that there is 5K free space
3422 * in case new entries need to be added to the descriptor. Never
3423 * allocate more than 128K, because that's no valid descriptor file
3424 * and will result in the correct "truncated read" error handling. */
3425 uint64_t cbFileSize;
3426 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3427 if ( RT_SUCCESS(rc)
3428 && cbFileSize >= 50)
3429 {
3430 uint64_t cbSize = cbFileSize;
3431 if (cbSize % VMDK_SECTOR2BYTE(10))
3432 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3433 else
3434 cbSize += VMDK_SECTOR2BYTE(10);
3435 cbSize = RT_MIN(cbSize, _128K);
3436 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3437 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3438 if (RT_LIKELY(pImage->pDescData))
3439 {
3440 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3441 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3442 if (RT_SUCCESS(rc))
3443 {
3444#if 0 /** @todo Revisit */
3445 cbRead += sizeof(u32Magic);
3446 if (cbRead == pImage->cbDescAlloc)
3447 {
3448 /* Likely the read is truncated. Better fail a bit too early
3449 * (normally the descriptor is much smaller than our buffer). */
3450 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3451 goto out;
3452 }
3453#endif
3454 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3455 pImage->cbDescAlloc);
3456 if (RT_SUCCESS(rc))
3457 {
3458 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3459 {
3460 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3461 if (pExtent->pszBasename)
3462 {
3463 /* Hack to figure out whether the specified name in the
3464 * extent descriptor is absolute. Doesn't always work, but
3465 * should be good enough for now. */
3466 char *pszFullname;
3467 /** @todo implement proper path absolute check. */
3468 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3469 {
3470 pszFullname = RTStrDup(pExtent->pszBasename);
3471 if (!pszFullname)
3472 {
3473 rc = VERR_NO_MEMORY;
3474 break;
3475 }
3476 }
3477 else
3478 {
3479 char *pszDirname = RTStrDup(pImage->pszFilename);
3480 if (!pszDirname)
3481 {
3482 rc = VERR_NO_MEMORY;
3483 break;
3484 }
3485 RTPathStripFilename(pszDirname);
3486 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3487 RTStrFree(pszDirname);
3488 if (!pszFullname)
3489 {
3490 rc = VERR_NO_STR_MEMORY;
3491 break;
3492 }
3493 }
3494 pExtent->pszFullname = pszFullname;
3495 }
3496 else
3497 pExtent->pszFullname = NULL;
3498
3499 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3500 switch (pExtent->enmType)
3501 {
3502 case VMDKETYPE_HOSTED_SPARSE:
3503 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3504 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3505 if (RT_FAILURE(rc))
3506 {
3507 /* Do NOT signal an appropriate error here, as the VD
3508 * layer has the choice of retrying the open if it
3509 * failed. */
3510 break;
3511 }
3512 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3513 false /* fMagicAlreadyRead */);
3514 if (RT_FAILURE(rc))
3515 break;
3516 rc = vmdkReadMetaExtent(pImage, pExtent);
3517 if (RT_FAILURE(rc))
3518 break;
3519
3520 /* Mark extent as unclean if opened in read-write mode. */
3521 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3522 {
3523 pExtent->fUncleanShutdown = true;
3524 pExtent->fMetaDirty = true;
3525 }
3526 break;
3527 case VMDKETYPE_VMFS:
3528 case VMDKETYPE_FLAT:
3529 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3530 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3531 if (RT_FAILURE(rc))
3532 {
3533 /* Do NOT signal an appropriate error here, as the VD
3534 * layer has the choice of retrying the open if it
3535 * failed. */
3536 break;
3537 }
3538 break;
3539 case VMDKETYPE_ZERO:
3540 /* Nothing to do. */
3541 break;
3542 default:
3543 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3544 }
3545 }
3546 }
3547 }
3548 else
3549 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3550 }
3551 else
3552 rc = VERR_NO_MEMORY;
3553 }
3554 else if (RT_SUCCESS(rc))
3555 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3556
3557 return rc;
3558}
3559
3560/**
3561 * Read and process the descriptor based on the image type.
3562 *
3563 * @returns VBox status code.
3564 * @param pImage VMDK image instance.
3565 * @param pFile VMDK file handle.
3566 */
3567static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3568{
3569 uint32_t u32Magic;
3570
3571 /* Read magic (if present). */
3572 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3573 &u32Magic, sizeof(u32Magic));
3574 if (RT_SUCCESS(rc))
3575 {
3576 /* Handle the file according to its magic number. */
3577 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3578 rc = vmdkDescriptorReadSparse(pImage, pFile);
3579 else
3580 rc = vmdkDescriptorReadAscii(pImage, pFile);
3581 }
3582 else
3583 {
3584 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3585 rc = VERR_VD_VMDK_INVALID_HEADER;
3586 }
3587
3588 return rc;
3589}
3590
3591/**
3592 * Internal: Open an image, constructing all necessary data structures.
3593 */
3594static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3595{
3596 pImage->uOpenFlags = uOpenFlags;
3597 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3598 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3599 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3600
3601 /*
3602 * Open the image.
3603 * We don't have to check for asynchronous access because
3604 * we only support raw access and the opened file is a description
3605 * file were no data is stored.
3606 */
3607 PVMDKFILE pFile;
3608 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3609 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3610 if (RT_SUCCESS(rc))
3611 {
3612 pImage->pFile = pFile;
3613
3614 rc = vmdkDescriptorRead(pImage, pFile);
3615 if (RT_SUCCESS(rc))
3616 {
3617 /* Determine PCHS geometry if not set. */
3618 if (pImage->PCHSGeometry.cCylinders == 0)
3619 {
3620 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3621 / pImage->PCHSGeometry.cHeads
3622 / pImage->PCHSGeometry.cSectors;
3623 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3624 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3625 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3626 {
3627 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3628 AssertRC(rc);
3629 }
3630 }
3631
3632 /* Update the image metadata now in case has changed. */
3633 rc = vmdkFlushImage(pImage, NULL);
3634 if (RT_SUCCESS(rc))
3635 {
3636 /* Figure out a few per-image constants from the extents. */
3637 pImage->cbSize = 0;
3638 for (unsigned i = 0; i < pImage->cExtents; i++)
3639 {
3640 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3641 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3642 {
3643 /* Here used to be a check whether the nominal size of an extent
3644 * is a multiple of the grain size. The spec says that this is
3645 * always the case, but unfortunately some files out there in the
3646 * wild violate the spec (e.g. ReactOS 0.3.1). */
3647 }
3648 else if ( pExtent->enmType == VMDKETYPE_FLAT
3649 || pExtent->enmType == VMDKETYPE_ZERO)
3650 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3651
3652 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3653 }
3654
3655 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3656 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3657 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3658 rc = vmdkAllocateGrainTableCache(pImage);
3659 }
3660 }
3661 }
3662 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3663 * choice of retrying the open if it failed. */
3664
3665 if (RT_SUCCESS(rc))
3666 {
3667 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3668 pImage->RegionList.fFlags = 0;
3669 pImage->RegionList.cRegions = 1;
3670
3671 pRegion->offRegion = 0; /* Disk start. */
3672 pRegion->cbBlock = 512;
3673 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3674 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3675 pRegion->cbData = 512;
3676 pRegion->cbMetadata = 0;
3677 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3678 }
3679 else
3680 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3681 return rc;
3682}
3683
3684/**
3685 * Frees a raw descriptor.
3686 * @internal
3687 */
3688static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3689{
3690 if (!pRawDesc)
3691 return VINF_SUCCESS;
3692
3693 RTStrFree(pRawDesc->pszRawDisk);
3694 pRawDesc->pszRawDisk = NULL;
3695
3696 /* Partitions: */
3697 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3698 {
3699 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3700 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3701
3702 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3703 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3704 }
3705
3706 RTMemFree(pRawDesc->pPartDescs);
3707 pRawDesc->pPartDescs = NULL;
3708
3709 RTMemFree(pRawDesc);
3710 return VINF_SUCCESS;
3711}
3712
3713/**
3714 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3715 * returning the pointer to the first new entry.
3716 * @internal
3717 */
3718static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3719{
3720 uint32_t const cOld = pRawDesc->cPartDescs;
3721 uint32_t const cNew = cOld + cToAdd;
3722 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3723 cOld * sizeof(pRawDesc->pPartDescs[0]),
3724 cNew * sizeof(pRawDesc->pPartDescs[0]));
3725 if (paNew)
3726 {
3727 pRawDesc->cPartDescs = cNew;
3728 pRawDesc->pPartDescs = paNew;
3729
3730 *ppRet = &paNew[cOld];
3731 return VINF_SUCCESS;
3732 }
3733 *ppRet = NULL;
3734 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3735 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3736 pImage->pszFilename, cOld, cNew);
3737}
3738
3739/**
3740 * @callback_method_impl{FNRTSORTCMP}
3741 */
3742static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3743{
3744 RT_NOREF(pvUser);
3745 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3746 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3747}
3748
3749/**
3750 * Post processes the partition descriptors.
3751 *
3752 * Sorts them and check that they don't overlap.
3753 */
3754static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3755{
3756 /*
3757 * Sort data areas in ascending order of start.
3758 */
3759 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3760
3761 /*
3762 * Check that we don't have overlapping descriptors. If we do, that's an
3763 * indication that the drive is corrupt or that the RTDvm code is buggy.
3764 */
3765 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3766 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3767 {
3768 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3769 if (offLast <= paPartDescs[i].offStartInVDisk)
3770 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3771 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3772 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3773 paPartDescs[i].pvPartitionData ? " (data)" : "");
3774 offLast -= 1;
3775
3776 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3777 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3778 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3779 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3780 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3781 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3782 if (offLast >= cbSize)
3783 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3784 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3785 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3786 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3787 }
3788
3789 return VINF_SUCCESS;
3790}
3791
3792
3793#ifdef RT_OS_LINUX
3794/**
3795 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3796 * 'dev' file matching @a uDevToLocate.
3797 *
3798 * This is used both
3799 *
3800 * @returns IPRT status code, errors have been reported properly.
3801 * @param pImage For error reporting.
3802 * @param pszBlockDevDir Input: Path to the directory search under.
3803 * Output: Path to the directory containing information
3804 * for @a uDevToLocate.
3805 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3806 * @param uDevToLocate The device number of the block device info dir to
3807 * locate.
3808 * @param pszDevToLocate For error reporting.
3809 */
3810static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3811 dev_t uDevToLocate, const char *pszDevToLocate)
3812{
3813 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3814 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3815
3816 RTDIR hDir = NIL_RTDIR;
3817 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3818 if (RT_SUCCESS(rc))
3819 {
3820 for (;;)
3821 {
3822 RTDIRENTRY Entry;
3823 rc = RTDirRead(hDir, &Entry, NULL);
3824 if (RT_SUCCESS(rc))
3825 {
3826 /* We're interested in directories and symlinks. */
3827 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3828 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3829 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3830 {
3831 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3832 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3833
3834 dev_t uThisDevNo = ~uDevToLocate;
3835 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3836 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3837 break;
3838 }
3839 }
3840 else
3841 {
3842 pszBlockDevDir[cchDir] = '\0';
3843 if (rc == VERR_NO_MORE_FILES)
3844 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3845 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3846 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3847 else
3848 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3849 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3850 pImage->pszFilename, pszBlockDevDir, rc);
3851 break;
3852 }
3853 }
3854 RTDirClose(hDir);
3855 }
3856 else
3857 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3858 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3859 pImage->pszFilename, pszBlockDevDir, rc);
3860 return rc;
3861}
3862#endif /* RT_OS_LINUX */
3863
3864#ifdef RT_OS_FREEBSD
3865
3866
3867/**
3868 * Reads the config data from the provider and returns offset and size
3869 *
3870 * @return IPRT status code
3871 * @param pProvider GEOM provider representing partition
3872 * @param pcbOffset Placeholder for the offset of the partition
3873 * @param pcbSize Placeholder for the size of the partition
3874 */
3875static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3876{
3877 gconfig *pConfEntry;
3878 int rc = VERR_NOT_FOUND;
3879
3880 /*
3881 * Required parameters are located in the list containing key/value pairs.
3882 * Both key and value are in text form. Manuals tells nothing about the fact
3883 * that the both parameters should be present in the list. Thus, there are
3884 * cases when only one parameter is presented. To handle such cases we treat
3885 * absent params as zero allowing the caller decide the case is either correct
3886 * or an error.
3887 */
3888 uint64_t cbOffset = 0;
3889 uint64_t cbSize = 0;
3890 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3891 {
3892 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3893 {
3894 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3895 rc = VINF_SUCCESS;
3896 }
3897 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3898 {
3899 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3900 rc = VINF_SUCCESS;
3901 }
3902 }
3903 if (RT_SUCCESS(rc))
3904 {
3905 *pcbOffset = cbOffset;
3906 *pcbSize = cbSize;
3907 }
3908 return rc;
3909}
3910
3911
3912/**
3913 * Searches the partition specified by name and calculates its size and absolute offset.
3914 *
3915 * @return IPRT status code.
3916 * @param pParentClass Class containing pParentGeom
3917 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3918 * @param pszProviderName Name of the provider we are looking for
3919 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3920 * @param psbSize Placeholder for the size of the partition.
3921 */
3922static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3923 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3924{
3925 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3926 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3927 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3928 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3929 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3930
3931 ggeom *pParentGeom;
3932 int rc = VERR_NOT_FOUND;
3933 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3934 {
3935 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3936 {
3937 rc = VINF_SUCCESS;
3938 break;
3939 }
3940 }
3941 if (RT_FAILURE(rc))
3942 return rc;
3943
3944 gprovider *pProvider;
3945 /*
3946 * First, go over providers without handling EBR or BSDLabel
3947 * partitions for case when looking provider is child
3948 * of the givng geom, to reduce searching time
3949 */
3950 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3951 {
3952 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3953 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3954 }
3955
3956 /*
3957 * No provider found. Go over the parent geom again
3958 * and make recursions if geom represents EBR or BSDLabel.
3959 * In this case given parent geom contains only EBR or BSDLabel
3960 * partition itself and their own partitions are in the separate
3961 * geoms. Also, partition offsets are relative to geom, so
3962 * we have to add offset from child provider with parent geoms
3963 * provider
3964 */
3965
3966 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3967 {
3968 uint64_t cbOffset = 0;
3969 uint64_t cbSize = 0;
3970 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3971 if (RT_FAILURE(rc))
3972 return rc;
3973
3974 uint64_t cbProviderOffset = 0;
3975 uint64_t cbProviderSize = 0;
3976 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3977 if (RT_SUCCESS(rc))
3978 {
3979 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3980 *pcbSize = cbProviderSize;
3981 return rc;
3982 }
3983 }
3984
3985 return VERR_NOT_FOUND;
3986}
3987#endif
3988
3989
3990/**
3991 * Attempts to verify the raw partition path.
3992 *
3993 * We don't want to trust RTDvm and the partition device node morphing blindly.
3994 */
3995static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3996 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3997{
3998 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3999
4000 /*
4001 * Try open the raw partition device.
4002 */
4003 RTFILE hRawPart = NIL_RTFILE;
4004 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4005 if (RT_FAILURE(rc))
4006 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4007 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
4008 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
4009
4010 /*
4011 * Compare the partition UUID if we can get it.
4012 */
4013#ifdef RT_OS_WINDOWS
4014 DWORD cbReturned;
4015
4016 /* 1. Get the device numbers for both handles, they should have the same disk. */
4017 STORAGE_DEVICE_NUMBER DevNum1;
4018 RT_ZERO(DevNum1);
4019 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4020 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
4021 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4022 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4023 pImage->pszFilename, pszRawDrive, GetLastError());
4024
4025 STORAGE_DEVICE_NUMBER DevNum2;
4026 RT_ZERO(DevNum2);
4027 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4028 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
4029 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4030 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4031 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
4032 if ( RT_SUCCESS(rc)
4033 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
4034 || DevNum1.DeviceType != DevNum2.DeviceType))
4035 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4036 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
4037 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4038 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
4039 if (RT_SUCCESS(rc))
4040 {
4041 /* Get the partitions from the raw drive and match up with the volume info
4042 from RTDvm. The partition number is found in DevNum2. */
4043 DWORD cbNeeded = 0;
4044 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4045 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
4046 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
4047 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
4048 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
4049 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
4050 if (pLayout)
4051 {
4052 cbReturned = 0;
4053 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4054 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
4055 {
4056 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
4057 unsigned iEntry = 0;
4058 while ( iEntry < pLayout->PartitionCount
4059 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
4060 iEntry++;
4061 if (iEntry < pLayout->PartitionCount)
4062 {
4063 /* Compare the basics */
4064 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
4065 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
4066 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4067 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
4068 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4069 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
4070 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
4071 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4072 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
4073 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4074 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
4075 /** @todo We could compare the MBR type, GPT type and ID. */
4076 RT_NOREF(hVol);
4077 }
4078 else
4079 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4080 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
4081 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4082 DevNum2.PartitionNumber, pLayout->PartitionCount);
4083# ifndef LOG_ENABLED
4084 if (RT_FAILURE(rc))
4085# endif
4086 {
4087 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
4088 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
4089 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
4090 {
4091 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
4092 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
4093 pEntry->PartitionStyle, pEntry->RewritePartition));
4094 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
4095 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
4096 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
4097 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
4098 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
4099 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
4100 else
4101 LogRel(("\n"));
4102 }
4103 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
4104 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
4105 }
4106 }
4107 else
4108 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4109 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
4110 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
4111 RTMemTmpFree(pLayout);
4112 }
4113 else
4114 rc = VERR_NO_TMP_MEMORY;
4115 }
4116
4117#elif defined(RT_OS_LINUX)
4118 RT_NOREF(hVol);
4119
4120 /* Stat the two devices first to get their device numbers. (We probably
4121 could make some assumptions here about the major & minor number assignments
4122 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
4123 struct stat StDrive, StPart;
4124 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4125 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4126 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4127 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
4128 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4129 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
4130 else
4131 {
4132 /* Scan the directories immediately under /sys/block/ for one with a
4133 'dev' file matching the drive's device number: */
4134 char szSysPath[RTPATH_MAX];
4135 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
4136 AssertRCReturn(rc, rc); /* this shall not fail */
4137 if (RTDirExists(szSysPath))
4138 {
4139 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
4140
4141 /* Now, scan the directories under that again for a partition device
4142 matching the hRawPart device's number: */
4143 if (RT_SUCCESS(rc))
4144 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
4145
4146 /* Having found the /sys/block/device/partition/ path, we can finally
4147 read the partition attributes and compare with hVol. */
4148 if (RT_SUCCESS(rc))
4149 {
4150 /* partition number: */
4151 int64_t iLnxPartition = 0;
4152 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
4153 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
4154 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4155 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
4156 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
4157 /* else: ignore failure? */
4158
4159 /* start offset: */
4160 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
4161 if (RT_SUCCESS(rc))
4162 {
4163 int64_t offLnxStart = -1;
4164 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
4165 offLnxStart *= cbLnxSector;
4166 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
4167 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4168 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4169 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
4170 /* else: ignore failure? */
4171 }
4172
4173 /* the size: */
4174 if (RT_SUCCESS(rc))
4175 {
4176 int64_t cbLnxData = -1;
4177 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
4178 cbLnxData *= cbLnxSector;
4179 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
4180 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4181 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4182 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
4183 /* else: ignore failure? */
4184 }
4185 }
4186 }
4187 /* else: We've got nothing to work on, so only do content comparison. */
4188 }
4189
4190#elif defined(RT_OS_FREEBSD)
4191 char szDriveDevName[256];
4192 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
4193 if (pszDevName == NULL)
4194 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4195 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
4196 char szPartDevName[256];
4197 if (RT_SUCCESS(rc))
4198 {
4199 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
4200 if (pszDevName == NULL)
4201 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4202 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
4203 }
4204 if (RT_SUCCESS(rc))
4205 {
4206 gmesh geomMesh;
4207 int err = geom_gettree(&geomMesh);
4208 if (err == 0)
4209 {
4210 /* Find root class containg partitions info */
4211 gclass* pPartClass;
4212 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
4213 {
4214 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
4215 break;
4216 }
4217 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
4218 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
4219 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
4220
4221
4222 if (RT_SUCCESS(rc))
4223 {
4224 /* Find provider representing partition device */
4225 uint64_t cbOffset;
4226 uint64_t cbSize;
4227 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
4228 if (RT_SUCCESS(rc))
4229 {
4230 if (cbOffset != pPartDesc->offStartInVDisk)
4231 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4232 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4233 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4234 if (cbSize != pPartDesc->cbData)
4235 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4236 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4237 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4238 }
4239 else
4240 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4241 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
4242 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
4243 }
4244
4245 geom_deletetree(&geomMesh);
4246 }
4247 else
4248 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
4249 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
4250 }
4251
4252#elif defined(RT_OS_SOLARIS)
4253 RT_NOREF(hVol);
4254
4255 dk_cinfo dkiDriveInfo;
4256 dk_cinfo dkiPartInfo;
4257 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
4258 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4259 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4260 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
4261 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4262 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4263 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
4264 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
4265 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
4266 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
4267 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
4268 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4269 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
4270 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4271 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
4272 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
4273 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
4274 else
4275 {
4276 uint64_t cbOffset = 0;
4277 uint64_t cbSize = 0;
4278 dk_gpt *pEfi = NULL;
4279 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
4280 if (idxEfiPart >= 0)
4281 {
4282 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
4283 {
4284 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
4285 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
4286 }
4287 else
4288 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4289 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4290 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4291 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
4292 efi_free(pEfi);
4293 }
4294 else
4295 {
4296 /*
4297 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
4298 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
4299 * real error or just no EFI table found. Therefore, let's try to obtain partition info
4300 * using another way. If there is an error, it returns errno which will be handled below.
4301 */
4302
4303 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
4304 if (numPartition > NDKMAP)
4305 numPartition -= NDKMAP;
4306 if (numPartition != idxPartition)
4307 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4308 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4309 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4310 idxPartition, numPartition);
4311 else
4312 {
4313 dk_minfo_ext mediaInfo;
4314 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
4315 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4316 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4317 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4318 else
4319 {
4320 extpart_info extPartInfo;
4321 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
4322 {
4323 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
4324 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
4325 }
4326 else
4327 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4328 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4329 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4330 }
4331 }
4332 }
4333 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
4334 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4335 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4336 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4337
4338 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
4339 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4340 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4341 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4342 }
4343
4344#elif defined(RT_OS_DARWIN)
4345 /* Stat the drive get its device number. */
4346 struct stat StDrive;
4347 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4348 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4349 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
4350 else
4351 {
4352 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
4353 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4354 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
4355 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4356 else
4357 {
4358 uint32_t cbBlockSize = 0;
4359 uint64_t cbOffset = 0;
4360 uint64_t cbSize = 0;
4361 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
4362 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4363 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
4364 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4365 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
4366 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4367 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
4368 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4369 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
4370 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4371 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
4372 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4373 else
4374 {
4375 cbSize *= (uint64_t)cbBlockSize;
4376 dk_physical_extent_t dkPartExtent = {0};
4377 dkPartExtent.offset = 0;
4378 dkPartExtent.length = cbSize;
4379 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
4380 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4381 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
4382 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4383 else
4384 {
4385 if (dkPartExtent.dev != StDrive.st_rdev)
4386 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4387 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
4388 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
4389 else if (cbOffset != pPartDesc->offStartInVDisk)
4390 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4391 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4392 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4393 else if (cbSize != pPartDesc->cbData)
4394 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4395 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4396 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4397 }
4398 }
4399
4400 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
4401 {
4402 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4403 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
4404 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4405 if (RT_SUCCESS(rc))
4406 rc = rc2;
4407 }
4408 }
4409 }
4410
4411#else
4412 RT_NOREF(hVol); /* PORTME */
4413 rc = VERR_NOT_SUPPORTED;
4414#endif
4415 if (RT_SUCCESS(rc))
4416 {
4417 /*
4418 * Compare the first 32 sectors of the partition.
4419 *
4420 * This might not be conclusive, but for partitions formatted with the more
4421 * common file systems it should be as they have a superblock copy at or near
4422 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
4423 */
4424 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
4425 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
4426 if (pbSector1 != NULL)
4427 {
4428 uint8_t *pbSector2 = pbSector1 + cbToCompare;
4429
4430 /* Do the comparing, we repeat if it fails and the data might be volatile. */
4431 uint64_t uPrevCrc1 = 0;
4432 uint64_t uPrevCrc2 = 0;
4433 uint32_t cStable = 0;
4434 for (unsigned iTry = 0; iTry < 256; iTry++)
4435 {
4436 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
4437 if (RT_SUCCESS(rc))
4438 {
4439 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
4440 if (RT_SUCCESS(rc))
4441 {
4442 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
4443 {
4444 rc = VERR_MISMATCH;
4445
4446 /* Do data stability checks before repeating: */
4447 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4448 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4449 if ( uPrevCrc1 != uCrc1
4450 || uPrevCrc2 != uCrc2)
4451 cStable = 0;
4452 else if (++cStable > 4)
4453 break;
4454 uPrevCrc1 = uCrc1;
4455 uPrevCrc2 = uCrc2;
4456 continue;
4457 }
4458 rc = VINF_SUCCESS;
4459 }
4460 else
4461 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4462 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4463 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4464 }
4465 else
4466 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4467 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4468 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4469 break;
4470 }
4471 if (rc == VERR_MISMATCH)
4472 {
4473 /* Find the first mismatching bytes: */
4474 size_t offMissmatch = 0;
4475 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4476 offMissmatch++;
4477 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4478
4479 if (cStable > 0)
4480 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4481 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4482 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4483 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4484 else
4485 {
4486 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4487 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4488 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4489 rc = -rc;
4490 }
4491 }
4492
4493 RTMemTmpFree(pbSector1);
4494 }
4495 else
4496 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4497 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4498 pImage->pszFilename, cbToCompare * 2);
4499 }
4500 RTFileClose(hRawPart);
4501 return rc;
4502}
4503
4504#ifdef RT_OS_WINDOWS
4505/**
4506 * Construct the device name for the given partition number.
4507 */
4508static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4509 char **ppszRawPartition)
4510{
4511 int rc = VINF_SUCCESS;
4512 DWORD cbReturned = 0;
4513 STORAGE_DEVICE_NUMBER DevNum;
4514 RT_ZERO(DevNum);
4515 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4516 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4517 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4518 else
4519 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4520 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4521 pImage->pszFilename, pszRawDrive, GetLastError());
4522 return rc;
4523}
4524#endif /* RT_OS_WINDOWS */
4525
4526/**
4527 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4528 * 'Partitions' configuration value is present.
4529 *
4530 * @returns VBox status code, error message has been set on failure.
4531 *
4532 * @note Caller is assumed to clean up @a pRawDesc and release
4533 * @a *phVolToRelease.
4534 * @internal
4535 */
4536static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4537 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4538 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4539 PRTDVMVOLUME phVolToRelease)
4540{
4541 *phVolToRelease = NIL_RTDVMVOLUME;
4542
4543 /* Check sanity/understanding. */
4544 Assert(fPartitions);
4545 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4546
4547 /*
4548 * Allocate on descriptor for each volume up front.
4549 */
4550 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4551
4552 PVDISKRAWPARTDESC paPartDescs = NULL;
4553 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4554 AssertRCReturn(rc, rc);
4555
4556 /*
4557 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4558 */
4559 uint32_t fPartitionsLeft = fPartitions;
4560 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4561 for (uint32_t i = 0; i < cVolumes; i++)
4562 {
4563 /*
4564 * Get the next/first volume and release the current.
4565 */
4566 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4567 if (i == 0)
4568 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4569 else
4570 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4571 if (RT_FAILURE(rc))
4572 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4573 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4574 pImage->pszFilename, i, pszRawDrive, rc);
4575 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4576 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4577 *phVolToRelease = hVol = hVolNext;
4578
4579 /*
4580 * Depending on the fPartitions selector and associated read-only mask,
4581 * the guest either gets read-write or read-only access (bits set)
4582 * or no access (selector bit clear, access directed to the VMDK).
4583 */
4584 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4585
4586 uint64_t offVolumeEndIgnored = 0;
4587 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4588 if (RT_FAILURE(rc))
4589 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4590 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4591 pImage->pszFilename, i, pszRawDrive, rc);
4592 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4593
4594 /* Note! The index must match IHostDrivePartition::number. */
4595 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4596 if ( idxPartition < 32
4597 && (fPartitions & RT_BIT_32(idxPartition)))
4598 {
4599 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4600 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4601 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4602
4603 if (!fRelative)
4604 {
4605 /*
4606 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4607 */
4608 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4609 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4610 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4611 }
4612 else
4613 {
4614 /*
4615 * Relative means access the partition data via the device node for that
4616 * partition, allowing the sysadmin/OS to allow a user access to individual
4617 * partitions without necessarily being able to compromise the host OS.
4618 * Obviously, the creation of the VMDK requires read access to the main
4619 * device node for the drive, but that's a one-time thing and can be done
4620 * by the sysadmin. Here data starts at offset zero in the device node.
4621 */
4622 paPartDescs[i].offStartInDevice = 0;
4623
4624#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4625 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4626 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4627#elif defined(RT_OS_LINUX)
4628 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4629 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4630 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4631#elif defined(RT_OS_WINDOWS)
4632 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4633 AssertRCReturn(rc, rc);
4634#elif defined(RT_OS_SOLARIS)
4635 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4636 {
4637 /*
4638 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4639 * where X is the controller,
4640 * Y is target (SCSI device number),
4641 * Z is disk number,
4642 * K is partition number,
4643 * where p0 is the whole disk
4644 * p1-pN are the partitions of the disk
4645 */
4646 const char *pszRawDrivePath = pszRawDrive;
4647 char szDrivePath[RTPATH_MAX];
4648 size_t cbRawDrive = strlen(pszRawDrive);
4649 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4650 {
4651 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4652 szDrivePath[cbRawDrive - 2] = '\0';
4653 pszRawDrivePath = szDrivePath;
4654 }
4655 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4656 }
4657 else /* GPT */
4658 {
4659 /*
4660 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4661 * where X is the controller,
4662 * Y is target (SCSI device number),
4663 * Z is disk number,
4664 * K is partition number, zero based. Can be only from 0 to 6.
4665 * Thus, only partitions numbered 0 through 6 have device nodes.
4666 */
4667 if (idxPartition > 7)
4668 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4669 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4670 pImage->pszFilename, idxPartition, pszRawDrive);
4671 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4672 }
4673#else
4674 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4675#endif
4676 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4677
4678 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4679 AssertRCReturn(rc, rc);
4680 }
4681 }
4682 else
4683 {
4684 /* Not accessible to the guest. */
4685 paPartDescs[i].offStartInDevice = 0;
4686 paPartDescs[i].pszRawDevice = NULL;
4687 }
4688 } /* for each volume */
4689
4690 RTDvmVolumeRelease(hVol);
4691 *phVolToRelease = NIL_RTDVMVOLUME;
4692
4693 /*
4694 * Check that we found all the partitions the user selected.
4695 */
4696 if (fPartitionsLeft)
4697 {
4698 char szLeft[3 * sizeof(fPartitions) * 8];
4699 size_t cchLeft = 0;
4700 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4701 if (fPartitionsLeft & RT_BIT_32(i))
4702 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4703 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4704 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4705 pImage->pszFilename, pszRawDrive, szLeft);
4706 }
4707
4708 return VINF_SUCCESS;
4709}
4710
4711/**
4712 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4713 * of the partition tables and associated padding areas when the 'Partitions'
4714 * configuration value is present.
4715 *
4716 * The guest is not allowed access to the partition tables, however it needs
4717 * them to be able to access the drive. So, create descriptors for each of the
4718 * tables and attach the current disk content. vmdkCreateRawImage() will later
4719 * write the content to the VMDK. Any changes the guest later makes to the
4720 * partition tables will then go to the VMDK copy, rather than the host drive.
4721 *
4722 * @returns VBox status code, error message has been set on failure.
4723 *
4724 * @note Caller is assumed to clean up @a pRawDesc
4725 * @internal
4726 */
4727static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4728 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4729{
4730 /*
4731 * Query the locations.
4732 */
4733 /* Determin how many locations there are: */
4734 size_t cLocations = 0;
4735 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4736 if (rc != VERR_BUFFER_OVERFLOW)
4737 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4738 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4739 pImage->pszFilename, pszRawDrive, rc);
4740 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4741
4742 /* We can allocate the partition descriptors here to save an intentation level. */
4743 PVDISKRAWPARTDESC paPartDescs = NULL;
4744 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4745 AssertRCReturn(rc, rc);
4746
4747 /* Allocate the result table and repeat the location table query: */
4748 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4749 if (!paLocations)
4750 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4751 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4752 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4753 if (RT_SUCCESS(rc))
4754 {
4755 /*
4756 * Translate them into descriptors.
4757 *
4758 * We restrict the amount of partition alignment padding to 4MiB as more
4759 * will just be a waste of space. The use case for including the padding
4760 * are older boot loaders and boot manager (including one by a team member)
4761 * that put data and code in the 62 sectors between the MBR and the first
4762 * partition (total of 63). Later CHS was abandond and partition started
4763 * being aligned on power of two sector boundraries (typically 64KiB or
4764 * 1MiB depending on the media size).
4765 */
4766 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4767 {
4768 Assert(paLocations[i].cb > 0);
4769 if (paLocations[i].cb <= _64M)
4770 {
4771 /* Create the partition descriptor entry: */
4772 //paPartDescs[i].pszRawDevice = NULL;
4773 //paPartDescs[i].offStartInDevice = 0;
4774 //paPartDescs[i].uFlags = 0;
4775 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4776 paPartDescs[i].cbData = paLocations[i].cb;
4777 if (paPartDescs[i].cbData < _4M)
4778 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4779 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4780 if (paPartDescs[i].pvPartitionData)
4781 {
4782 /* Read the content from the drive: */
4783 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4784 (size_t)paPartDescs[i].cbData, NULL);
4785 if (RT_SUCCESS(rc))
4786 {
4787 /* Do we have custom boot sector code? */
4788 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4789 {
4790 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4791 Instead we fail as we weren't able to do what the user requested us to do.
4792 Better if the user knows than starts questioning why the guest isn't
4793 booting as expected. */
4794 if (cbBootSector <= paPartDescs[i].cbData)
4795 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4796 else
4797 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4798 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4799 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4800 }
4801 }
4802 else
4803 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4804 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4805 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4806 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4807 }
4808 else
4809 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4810 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4811 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4812 }
4813 else
4814 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4815 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4816 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4817 }
4818 }
4819 else
4820 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4821 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4822 pImage->pszFilename, pszRawDrive, rc);
4823 RTMemFree(paLocations);
4824 return rc;
4825}
4826
4827/**
4828 * Opens the volume manager for the raw drive when in selected-partition mode.
4829 *
4830 * @param pImage The VMDK image (for errors).
4831 * @param hRawDrive The raw drive handle.
4832 * @param pszRawDrive The raw drive device path (for errors).
4833 * @param cbSector The sector size.
4834 * @param phVolMgr Where to return the handle to the volume manager on
4835 * success.
4836 * @returns VBox status code, errors have been reported.
4837 * @internal
4838 */
4839static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4840{
4841 *phVolMgr = NIL_RTDVM;
4842
4843 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4844 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4845 if (RT_FAILURE(rc))
4846 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4847 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4848 pImage->pszFilename, pszRawDrive, rc);
4849
4850 RTDVM hVolMgr = NIL_RTDVM;
4851 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4852
4853 RTVfsFileRelease(hVfsFile);
4854
4855 if (RT_FAILURE(rc))
4856 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4857 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4858 pImage->pszFilename, pszRawDrive, rc);
4859
4860 rc = RTDvmMapOpen(hVolMgr);
4861 if (RT_SUCCESS(rc))
4862 {
4863 *phVolMgr = hVolMgr;
4864 return VINF_SUCCESS;
4865 }
4866 RTDvmRelease(hVolMgr);
4867 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4868 pImage->pszFilename, pszRawDrive, rc);
4869}
4870
4871/**
4872 * Opens the raw drive device and get the sizes for it.
4873 *
4874 * @param pImage The image (for error reporting).
4875 * @param pszRawDrive The device/whatever to open.
4876 * @param phRawDrive Where to return the file handle.
4877 * @param pcbRawDrive Where to return the size.
4878 * @param pcbSector Where to return the sector size.
4879 * @returns IPRT status code, errors have been reported.
4880 * @internal
4881 */
4882static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4883 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4884{
4885 /*
4886 * Open the device for the raw drive.
4887 */
4888 RTFILE hRawDrive = NIL_RTFILE;
4889 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4890 if (RT_FAILURE(rc))
4891 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4892 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4893 pImage->pszFilename, pszRawDrive, rc);
4894
4895 /*
4896 * Get the sector size.
4897 */
4898 uint32_t cbSector = 0;
4899 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4900 if (RT_SUCCESS(rc))
4901 {
4902 /* sanity checks */
4903 if ( cbSector >= 512
4904 && cbSector <= _64K
4905 && RT_IS_POWER_OF_TWO(cbSector))
4906 {
4907 /*
4908 * Get the size.
4909 */
4910 uint64_t cbRawDrive = 0;
4911 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4912 if (RT_SUCCESS(rc))
4913 {
4914 /* Check whether cbSize is actually sensible. */
4915 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4916 {
4917 *phRawDrive = hRawDrive;
4918 *pcbRawDrive = cbRawDrive;
4919 *pcbSector = cbSector;
4920 return VINF_SUCCESS;
4921 }
4922 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4923 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4924 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4925 }
4926 else
4927 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4928 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4929 pImage->pszFilename, pszRawDrive, rc);
4930 }
4931 else
4932 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4933 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4934 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4935 }
4936 else
4937 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4938 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4939 pImage->pszFilename, pszRawDrive, rc);
4940 RTFileClose(hRawDrive);
4941 return rc;
4942}
4943
4944/**
4945 * Reads the raw disk configuration, leaving initalization and cleanup to the
4946 * caller (regardless of return status).
4947 *
4948 * @returns VBox status code, errors properly reported.
4949 * @internal
4950 */
4951static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4952 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4953 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4954 char **ppszFreeMe)
4955{
4956 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4957 if (!pImgCfg)
4958 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4959 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4960
4961 /*
4962 * RawDrive = path
4963 */
4964 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4965 if (RT_FAILURE(rc))
4966 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4967 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4968 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4969
4970 /*
4971 * Partitions=n[r][,...]
4972 */
4973 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4974 *pfPartitions = *pfPartitionsReadOnly = 0;
4975
4976 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4977 if (RT_SUCCESS(rc))
4978 {
4979 char *psz = *ppszFreeMe;
4980 while (*psz != '\0')
4981 {
4982 char *pszNext;
4983 uint32_t u32;
4984 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4985 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4986 rc = -rc;
4987 if (RT_FAILURE(rc))
4988 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4989 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4990 pImage->pszFilename, rc, psz);
4991 if (u32 >= cMaxPartitionBits)
4992 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4993 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4994 pImage->pszFilename, u32, cMaxPartitionBits);
4995 *pfPartitions |= RT_BIT_32(u32);
4996 psz = pszNext;
4997 if (*psz == 'r')
4998 {
4999 *pfPartitionsReadOnly |= RT_BIT_32(u32);
5000 psz++;
5001 }
5002 if (*psz == ',')
5003 psz++;
5004 else if (*psz != '\0')
5005 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5006 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
5007 pImage->pszFilename, psz);
5008 }
5009
5010 RTStrFree(*ppszFreeMe);
5011 *ppszFreeMe = NULL;
5012 }
5013 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5014 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5015 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5016
5017 /*
5018 * BootSector=base64
5019 */
5020 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
5021 if (RT_SUCCESS(rc))
5022 {
5023 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
5024 if (cbBootSector < 0)
5025 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
5026 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
5027 pImage->pszFilename, *ppszRawDrive);
5028 if (cbBootSector == 0)
5029 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5030 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
5031 pImage->pszFilename, *ppszRawDrive);
5032 if (cbBootSector > _4M) /* this is just a preliminary max */
5033 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5034 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
5035 pImage->pszFilename, *ppszRawDrive, cbBootSector);
5036
5037 /* Refuse the boot sector if whole-drive. This used to be done quietly,
5038 however, bird disagrees and thinks the user should be told that what
5039 he/she/it tries to do isn't possible. There should be less head
5040 scratching this way when the guest doesn't do the expected thing. */
5041 if (!*pfPartitions)
5042 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5043 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
5044 pImage->pszFilename, *ppszRawDrive);
5045
5046 *pcbBootSector = (size_t)cbBootSector;
5047 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
5048 if (!*ppvBootSector)
5049 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5050 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
5051 pImage->pszFilename, cbBootSector, *ppszRawDrive);
5052
5053 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
5054 if (RT_FAILURE(rc))
5055 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5056 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
5057 pImage->pszFilename, *ppszRawDrive, rc);
5058
5059 RTStrFree(*ppszFreeMe);
5060 *ppszFreeMe = NULL;
5061 }
5062 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5063 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5064 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5065
5066 /*
5067 * Relative=0/1
5068 */
5069 *pfRelative = false;
5070 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
5071 if (RT_SUCCESS(rc))
5072 {
5073 if (!*pfPartitions && *pfRelative != false)
5074 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5075 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
5076 pImage->pszFilename);
5077#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
5078 if (*pfRelative == true)
5079 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5080 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
5081 pImage->pszFilename);
5082#endif
5083 }
5084 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5085 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5086 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5087 else
5088#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
5089 *pfRelative = true;
5090#else
5091 *pfRelative = false;
5092#endif
5093
5094 return VINF_SUCCESS;
5095}
5096
5097/**
5098 * Creates a raw drive (nee disk) descriptor.
5099 *
5100 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
5101 * here much later. That's one of the reasons why we produce a descriptor just
5102 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
5103 *
5104 * @returns VBox status code.
5105 * @param pImage The image.
5106 * @param ppRaw Where to return the raw drive descriptor. Caller must
5107 * free it using vmdkRawDescFree regardless of the status
5108 * code.
5109 * @internal
5110 */
5111static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
5112{
5113 /* Make sure it's NULL. */
5114 *ppRaw = NULL;
5115
5116 /*
5117 * Read the configuration.
5118 */
5119 char *pszRawDrive = NULL;
5120 uint32_t fPartitions = 0; /* zero if whole-drive */
5121 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
5122 void *pvBootSector = NULL;
5123 size_t cbBootSector = 0;
5124 bool fRelative = false;
5125 char *pszFreeMe = NULL; /* lazy bird cleanup. */
5126 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
5127 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
5128 RTStrFree(pszFreeMe);
5129 if (RT_SUCCESS(rc))
5130 {
5131 /*
5132 * Open the device, getting the sector size and drive size.
5133 */
5134 uint64_t cbSize = 0;
5135 uint32_t cbSector = 0;
5136 RTFILE hRawDrive = NIL_RTFILE;
5137 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
5138 if (RT_SUCCESS(rc))
5139 {
5140 pImage->cbSize = cbSize;
5141 /*
5142 * Create the raw-drive descriptor
5143 */
5144 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
5145 if (pRawDesc)
5146 {
5147 pRawDesc->szSignature[0] = 'R';
5148 pRawDesc->szSignature[1] = 'A';
5149 pRawDesc->szSignature[2] = 'W';
5150 //pRawDesc->szSignature[3] = '\0';
5151 if (!fPartitions)
5152 {
5153 /*
5154 * It's simple for when doing the whole drive.
5155 */
5156 pRawDesc->uFlags = VDISKRAW_DISK;
5157 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
5158 }
5159 else
5160 {
5161 /*
5162 * In selected partitions mode we've got a lot more work ahead of us.
5163 */
5164 pRawDesc->uFlags = VDISKRAW_NORMAL;
5165 //pRawDesc->pszRawDisk = NULL;
5166 //pRawDesc->cPartDescs = 0;
5167 //pRawDesc->pPartDescs = NULL;
5168
5169 /* We need to parse the partition map to complete the descriptor: */
5170 RTDVM hVolMgr = NIL_RTDVM;
5171 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
5172 if (RT_SUCCESS(rc))
5173 {
5174 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
5175 if ( enmFormatType == RTDVMFORMATTYPE_MBR
5176 || enmFormatType == RTDVMFORMATTYPE_GPT)
5177 {
5178 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
5179 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
5180
5181 /* Add copies of the partition tables: */
5182 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
5183 pvBootSector, cbBootSector);
5184 if (RT_SUCCESS(rc))
5185 {
5186 /* Add descriptors for the partitions/volumes, indicating which
5187 should be accessible and how to access them: */
5188 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
5189 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
5190 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
5191 RTDvmVolumeRelease(hVolRelease);
5192
5193 /* Finally, sort the partition and check consistency (overlaps, etc): */
5194 if (RT_SUCCESS(rc))
5195 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
5196 }
5197 }
5198 else
5199 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5200 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
5201 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
5202 RTDvmRelease(hVolMgr);
5203 }
5204 }
5205 if (RT_SUCCESS(rc))
5206 {
5207 /*
5208 * We succeeded.
5209 */
5210 *ppRaw = pRawDesc;
5211 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
5212 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
5213 if (pRawDesc->cPartDescs)
5214 {
5215 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
5216 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
5217 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
5218 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
5219 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
5220 }
5221 }
5222 else
5223 vmdkRawDescFree(pRawDesc);
5224 }
5225 else
5226 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5227 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
5228 pImage->pszFilename, sizeof(*pRawDesc));
5229 RTFileClose(hRawDrive);
5230 }
5231 }
5232 RTStrFree(pszRawDrive);
5233 RTMemFree(pvBootSector);
5234 return rc;
5235}
5236
5237/**
5238 * Internal: create VMDK images for raw disk/partition access.
5239 */
5240static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
5241 uint64_t cbSize)
5242{
5243 int rc = VINF_SUCCESS;
5244 PVMDKEXTENT pExtent;
5245
5246 if (pRaw->uFlags & VDISKRAW_DISK)
5247 {
5248 /* Full raw disk access. This requires setting up a descriptor
5249 * file and open the (flat) raw disk. */
5250 rc = vmdkCreateExtents(pImage, 1);
5251 if (RT_FAILURE(rc))
5252 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5253 pExtent = &pImage->pExtents[0];
5254 /* Create raw disk descriptor file. */
5255 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5256 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5257 true /* fCreate */));
5258 if (RT_FAILURE(rc))
5259 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5260
5261 /* Set up basename for extent description. Cannot use StrDup. */
5262 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
5263 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5264 if (!pszBasename)
5265 return VERR_NO_MEMORY;
5266 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
5267 pExtent->pszBasename = pszBasename;
5268 /* For raw disks the full name is identical to the base name. */
5269 pExtent->pszFullname = RTStrDup(pszBasename);
5270 if (!pExtent->pszFullname)
5271 return VERR_NO_MEMORY;
5272 pExtent->enmType = VMDKETYPE_FLAT;
5273 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5274 pExtent->uSectorOffset = 0;
5275 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5276 pExtent->fMetaDirty = false;
5277
5278 /* Open flat image, the raw disk. */
5279 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5280 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5281 false /* fCreate */));
5282 if (RT_FAILURE(rc))
5283 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
5284 }
5285 else
5286 {
5287 /* Raw partition access. This requires setting up a descriptor
5288 * file, write the partition information to a flat extent and
5289 * open all the (flat) raw disk partitions. */
5290
5291 /* First pass over the partition data areas to determine how many
5292 * extents we need. One data area can require up to 2 extents, as
5293 * it might be necessary to skip over unpartitioned space. */
5294 unsigned cExtents = 0;
5295 uint64_t uStart = 0;
5296 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5297 {
5298 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5299 if (uStart > pPart->offStartInVDisk)
5300 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5301 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
5302
5303 if (uStart < pPart->offStartInVDisk)
5304 cExtents++;
5305 uStart = pPart->offStartInVDisk + pPart->cbData;
5306 cExtents++;
5307 }
5308 /* Another extent for filling up the rest of the image. */
5309 if (uStart != cbSize)
5310 cExtents++;
5311
5312 rc = vmdkCreateExtents(pImage, cExtents);
5313 if (RT_FAILURE(rc))
5314 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5315
5316 /* Create raw partition descriptor file. */
5317 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5318 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5319 true /* fCreate */));
5320 if (RT_FAILURE(rc))
5321 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5322
5323 /* Create base filename for the partition table extent. */
5324 /** @todo remove fixed buffer without creating memory leaks. */
5325 char pszPartition[1024];
5326 const char *pszBase = RTPathFilename(pImage->pszFilename);
5327 const char *pszSuff = RTPathSuffix(pszBase);
5328 if (pszSuff == NULL)
5329 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
5330 char *pszBaseBase = RTStrDup(pszBase);
5331 if (!pszBaseBase)
5332 return VERR_NO_MEMORY;
5333 RTPathStripSuffix(pszBaseBase);
5334 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
5335 pszBaseBase, pszSuff);
5336 RTStrFree(pszBaseBase);
5337
5338 /* Second pass over the partitions, now define all extents. */
5339 uint64_t uPartOffset = 0;
5340 cExtents = 0;
5341 uStart = 0;
5342 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5343 {
5344 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5345 pExtent = &pImage->pExtents[cExtents++];
5346
5347 if (uStart < pPart->offStartInVDisk)
5348 {
5349 pExtent->pszBasename = NULL;
5350 pExtent->pszFullname = NULL;
5351 pExtent->enmType = VMDKETYPE_ZERO;
5352 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
5353 pExtent->uSectorOffset = 0;
5354 pExtent->enmAccess = VMDKACCESS_READWRITE;
5355 pExtent->fMetaDirty = false;
5356 /* go to next extent */
5357 pExtent = &pImage->pExtents[cExtents++];
5358 }
5359 uStart = pPart->offStartInVDisk + pPart->cbData;
5360
5361 if (pPart->pvPartitionData)
5362 {
5363 /* Set up basename for extent description. Can't use StrDup. */
5364 size_t cbBasename = strlen(pszPartition) + 1;
5365 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5366 if (!pszBasename)
5367 return VERR_NO_MEMORY;
5368 memcpy(pszBasename, pszPartition, cbBasename);
5369 pExtent->pszBasename = pszBasename;
5370
5371 /* Set up full name for partition extent. */
5372 char *pszDirname = RTStrDup(pImage->pszFilename);
5373 if (!pszDirname)
5374 return VERR_NO_STR_MEMORY;
5375 RTPathStripFilename(pszDirname);
5376 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
5377 RTStrFree(pszDirname);
5378 if (!pszFullname)
5379 return VERR_NO_STR_MEMORY;
5380 pExtent->pszFullname = pszFullname;
5381 pExtent->enmType = VMDKETYPE_FLAT;
5382 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5383 pExtent->uSectorOffset = uPartOffset;
5384 pExtent->enmAccess = VMDKACCESS_READWRITE;
5385 pExtent->fMetaDirty = false;
5386
5387 /* Create partition table flat image. */
5388 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5389 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5390 true /* fCreate */));
5391 if (RT_FAILURE(rc))
5392 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
5393 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5394 VMDK_SECTOR2BYTE(uPartOffset),
5395 pPart->pvPartitionData,
5396 pPart->cbData);
5397 if (RT_FAILURE(rc))
5398 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
5399 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
5400 }
5401 else
5402 {
5403 if (pPart->pszRawDevice)
5404 {
5405 /* Set up basename for extent descr. Can't use StrDup. */
5406 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
5407 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5408 if (!pszBasename)
5409 return VERR_NO_MEMORY;
5410 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
5411 pExtent->pszBasename = pszBasename;
5412 /* For raw disks full name is identical to base name. */
5413 pExtent->pszFullname = RTStrDup(pszBasename);
5414 if (!pExtent->pszFullname)
5415 return VERR_NO_MEMORY;
5416 pExtent->enmType = VMDKETYPE_FLAT;
5417 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5418 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
5419 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5420 pExtent->fMetaDirty = false;
5421
5422 /* Open flat image, the raw partition. */
5423 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5424 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5425 false /* fCreate */));
5426 if (RT_FAILURE(rc))
5427 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
5428 }
5429 else
5430 {
5431 pExtent->pszBasename = NULL;
5432 pExtent->pszFullname = NULL;
5433 pExtent->enmType = VMDKETYPE_ZERO;
5434 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5435 pExtent->uSectorOffset = 0;
5436 pExtent->enmAccess = VMDKACCESS_READWRITE;
5437 pExtent->fMetaDirty = false;
5438 }
5439 }
5440 }
5441 /* Another extent for filling up the rest of the image. */
5442 if (uStart != cbSize)
5443 {
5444 pExtent = &pImage->pExtents[cExtents++];
5445 pExtent->pszBasename = NULL;
5446 pExtent->pszFullname = NULL;
5447 pExtent->enmType = VMDKETYPE_ZERO;
5448 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
5449 pExtent->uSectorOffset = 0;
5450 pExtent->enmAccess = VMDKACCESS_READWRITE;
5451 pExtent->fMetaDirty = false;
5452 }
5453 }
5454
5455 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5456 (pRaw->uFlags & VDISKRAW_DISK) ?
5457 "fullDevice" : "partitionedDevice");
5458 if (RT_FAILURE(rc))
5459 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5460 return rc;
5461}
5462
5463/**
5464 * Internal: create a regular (i.e. file-backed) VMDK image.
5465 */
5466static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
5467 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
5468 unsigned uPercentStart, unsigned uPercentSpan)
5469{
5470 int rc = VINF_SUCCESS;
5471 unsigned cExtents = 1;
5472 uint64_t cbOffset = 0;
5473 uint64_t cbRemaining = cbSize;
5474
5475 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5476 {
5477 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
5478 /* Do proper extent computation: need one smaller extent if the total
5479 * size isn't evenly divisible by the split size. */
5480 if (cbSize % VMDK_2G_SPLIT_SIZE)
5481 cExtents++;
5482 }
5483 rc = vmdkCreateExtents(pImage, cExtents);
5484 if (RT_FAILURE(rc))
5485 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5486
5487 /* Basename strings needed for constructing the extent names. */
5488 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5489 AssertPtr(pszBasenameSubstr);
5490 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5491
5492 /* Create separate descriptor file if necessary. */
5493 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
5494 {
5495 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5496 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5497 true /* fCreate */));
5498 if (RT_FAILURE(rc))
5499 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
5500 }
5501 else
5502 pImage->pFile = NULL;
5503
5504 /* Set up all extents. */
5505 for (unsigned i = 0; i < cExtents; i++)
5506 {
5507 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5508 uint64_t cbExtent = cbRemaining;
5509
5510 /* Set up fullname/basename for extent description. Cannot use StrDup
5511 * for basename, as it is not guaranteed that the memory can be freed
5512 * with RTMemTmpFree, which must be used as in other code paths
5513 * StrDup is not usable. */
5514 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5515 {
5516 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5517 if (!pszBasename)
5518 return VERR_NO_MEMORY;
5519 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5520 pExtent->pszBasename = pszBasename;
5521 }
5522 else
5523 {
5524 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5525 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5526 RTPathStripSuffix(pszBasenameBase);
5527 char *pszTmp;
5528 size_t cbTmp;
5529 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5530 {
5531 if (cExtents == 1)
5532 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5533 pszBasenameSuff);
5534 else
5535 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5536 i+1, pszBasenameSuff);
5537 }
5538 else
5539 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5540 pszBasenameSuff);
5541 RTStrFree(pszBasenameBase);
5542 if (!pszTmp)
5543 return VERR_NO_STR_MEMORY;
5544 cbTmp = strlen(pszTmp) + 1;
5545 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5546 if (!pszBasename)
5547 {
5548 RTStrFree(pszTmp);
5549 return VERR_NO_MEMORY;
5550 }
5551 memcpy(pszBasename, pszTmp, cbTmp);
5552 RTStrFree(pszTmp);
5553 pExtent->pszBasename = pszBasename;
5554 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5555 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5556 }
5557 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5558 if (!pszBasedirectory)
5559 return VERR_NO_STR_MEMORY;
5560 RTPathStripFilename(pszBasedirectory);
5561 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5562 RTStrFree(pszBasedirectory);
5563 if (!pszFullname)
5564 return VERR_NO_STR_MEMORY;
5565 pExtent->pszFullname = pszFullname;
5566
5567 /* Create file for extent. */
5568 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5569 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5570 true /* fCreate */));
5571 if (RT_FAILURE(rc))
5572 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5573 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5574 {
5575 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5576 0 /* fFlags */, pIfProgress,
5577 uPercentStart + cbOffset * uPercentSpan / cbSize,
5578 cbExtent * uPercentSpan / cbSize);
5579 if (RT_FAILURE(rc))
5580 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5581 }
5582
5583 /* Place descriptor file information (where integrated). */
5584 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5585 {
5586 pExtent->uDescriptorSector = 1;
5587 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5588 /* The descriptor is part of the (only) extent. */
5589 pExtent->pDescData = pImage->pDescData;
5590 pImage->pDescData = NULL;
5591 }
5592
5593 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5594 {
5595 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5596 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5597 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5598 pExtent->cGTEntries = 512;
5599
5600 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5601 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5602 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5603 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5604 {
5605 /* The spec says version is 1 for all VMDKs, but the vast
5606 * majority of streamOptimized VMDKs actually contain
5607 * version 3 - so go with the majority. Both are accepted. */
5608 pExtent->uVersion = 3;
5609 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5610 }
5611 }
5612 else
5613 {
5614 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5615 pExtent->enmType = VMDKETYPE_VMFS;
5616 else
5617 pExtent->enmType = VMDKETYPE_FLAT;
5618 }
5619
5620 pExtent->enmAccess = VMDKACCESS_READWRITE;
5621 pExtent->fUncleanShutdown = true;
5622 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5623 pExtent->uSectorOffset = 0;
5624 pExtent->fMetaDirty = true;
5625
5626 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5627 {
5628 /* fPreAlloc should never be false because VMware can't use such images. */
5629 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5630 RT_MAX( pExtent->uDescriptorSector
5631 + pExtent->cDescriptorSectors,
5632 1),
5633 true /* fPreAlloc */);
5634 if (RT_FAILURE(rc))
5635 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5636 }
5637
5638 cbOffset += cbExtent;
5639
5640 if (RT_SUCCESS(rc))
5641 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5642
5643 cbRemaining -= cbExtent;
5644 }
5645
5646 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5647 {
5648 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5649 * controller type is set in an image. */
5650 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5651 if (RT_FAILURE(rc))
5652 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5653 }
5654
5655 const char *pszDescType = NULL;
5656 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5657 {
5658 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5659 pszDescType = "vmfs";
5660 else
5661 pszDescType = (cExtents == 1)
5662 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5663 }
5664 else
5665 {
5666 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5667 pszDescType = "streamOptimized";
5668 else
5669 {
5670 pszDescType = (cExtents == 1)
5671 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5672 }
5673 }
5674 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5675 pszDescType);
5676 if (RT_FAILURE(rc))
5677 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5678 return rc;
5679}
5680
5681/**
5682 * Internal: Create a real stream optimized VMDK using only linear writes.
5683 */
5684static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5685{
5686 int rc = vmdkCreateExtents(pImage, 1);
5687 if (RT_FAILURE(rc))
5688 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5689
5690 /* Basename strings needed for constructing the extent names. */
5691 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5692 AssertPtr(pszBasenameSubstr);
5693 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5694
5695 /* No separate descriptor file. */
5696 pImage->pFile = NULL;
5697
5698 /* Set up all extents. */
5699 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5700
5701 /* Set up fullname/basename for extent description. Cannot use StrDup
5702 * for basename, as it is not guaranteed that the memory can be freed
5703 * with RTMemTmpFree, which must be used as in other code paths
5704 * StrDup is not usable. */
5705 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5706 if (!pszBasename)
5707 return VERR_NO_MEMORY;
5708 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5709 pExtent->pszBasename = pszBasename;
5710
5711 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5712 RTPathStripFilename(pszBasedirectory);
5713 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5714 RTStrFree(pszBasedirectory);
5715 if (!pszFullname)
5716 return VERR_NO_STR_MEMORY;
5717 pExtent->pszFullname = pszFullname;
5718
5719 /* Create file for extent. Make it write only, no reading allowed. */
5720 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5721 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5722 true /* fCreate */)
5723 & ~RTFILE_O_READ);
5724 if (RT_FAILURE(rc))
5725 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5726
5727 /* Place descriptor file information. */
5728 pExtent->uDescriptorSector = 1;
5729 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5730 /* The descriptor is part of the (only) extent. */
5731 pExtent->pDescData = pImage->pDescData;
5732 pImage->pDescData = NULL;
5733
5734 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5735 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5736 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5737 pExtent->cGTEntries = 512;
5738
5739 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5740 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5741 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5742
5743 /* The spec says version is 1 for all VMDKs, but the vast
5744 * majority of streamOptimized VMDKs actually contain
5745 * version 3 - so go with the majority. Both are accepted. */
5746 pExtent->uVersion = 3;
5747 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5748 pExtent->fFooter = true;
5749
5750 pExtent->enmAccess = VMDKACCESS_READONLY;
5751 pExtent->fUncleanShutdown = false;
5752 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5753 pExtent->uSectorOffset = 0;
5754 pExtent->fMetaDirty = true;
5755
5756 /* Create grain directory, without preallocating it straight away. It will
5757 * be constructed on the fly when writing out the data and written when
5758 * closing the image. The end effect is that the full grain directory is
5759 * allocated, which is a requirement of the VMDK specs. */
5760 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5761 false /* fPreAlloc */);
5762 if (RT_FAILURE(rc))
5763 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5764
5765 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5766 "streamOptimized");
5767 if (RT_FAILURE(rc))
5768 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5769
5770 return rc;
5771}
5772
5773/**
5774 * Initializes the UUID fields in the DDB.
5775 *
5776 * @returns VBox status code.
5777 * @param pImage The VMDK image instance.
5778 */
5779static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5780{
5781 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5782 if (RT_SUCCESS(rc))
5783 {
5784 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5785 if (RT_SUCCESS(rc))
5786 {
5787 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5788 &pImage->ModificationUuid);
5789 if (RT_SUCCESS(rc))
5790 {
5791 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5792 &pImage->ParentModificationUuid);
5793 if (RT_FAILURE(rc))
5794 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5795 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5796 }
5797 else
5798 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5799 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5800 }
5801 else
5802 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5803 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5804 }
5805 else
5806 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5807 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5808
5809 return rc;
5810}
5811
5812/**
5813 * Internal: The actual code for creating any VMDK variant currently in
5814 * existence on hosted environments.
5815 */
5816static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5817 unsigned uImageFlags, const char *pszComment,
5818 PCVDGEOMETRY pPCHSGeometry,
5819 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5820 PVDINTERFACEPROGRESS pIfProgress,
5821 unsigned uPercentStart, unsigned uPercentSpan)
5822{
5823 pImage->uImageFlags = uImageFlags;
5824
5825 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5826 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5827 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5828
5829 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5830 &pImage->Descriptor);
5831 if (RT_SUCCESS(rc))
5832 {
5833 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5834 {
5835 /* Raw disk image (includes raw partition). */
5836 PVDISKRAW pRaw = NULL;
5837 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5838 if (RT_FAILURE(rc))
5839 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"),
5840 pImage->pszFilename);
5841 if (!cbSize)
5842 cbSize = pImage->cbSize;
5843
5844 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5845 vmdkRawDescFree(pRaw);
5846 }
5847 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5848 {
5849 /* Stream optimized sparse image (monolithic). */
5850 rc = vmdkCreateStreamImage(pImage, cbSize);
5851 }
5852 else
5853 {
5854 /* Regular fixed or sparse image (monolithic or split). */
5855 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5856 pIfProgress, uPercentStart,
5857 uPercentSpan * 95 / 100);
5858 }
5859
5860 if (RT_SUCCESS(rc))
5861 {
5862 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5863
5864 pImage->cbSize = cbSize;
5865
5866 for (unsigned i = 0; i < pImage->cExtents; i++)
5867 {
5868 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5869
5870 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5871 pExtent->cNominalSectors, pExtent->enmType,
5872 pExtent->pszBasename, pExtent->uSectorOffset);
5873 if (RT_FAILURE(rc))
5874 {
5875 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5876 break;
5877 }
5878 }
5879
5880 if (RT_SUCCESS(rc))
5881 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5882
5883 pImage->LCHSGeometry = *pLCHSGeometry;
5884 pImage->PCHSGeometry = *pPCHSGeometry;
5885
5886 if (RT_SUCCESS(rc))
5887 {
5888 if ( pPCHSGeometry->cCylinders != 0
5889 && pPCHSGeometry->cHeads != 0
5890 && pPCHSGeometry->cSectors != 0)
5891 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5892 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5893 {
5894 VDGEOMETRY RawDiskPCHSGeometry;
5895 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383);
5896 RawDiskPCHSGeometry.cHeads = 16;
5897 RawDiskPCHSGeometry.cSectors = 63;
5898 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry);
5899 }
5900 }
5901
5902 if ( RT_SUCCESS(rc)
5903 && pLCHSGeometry->cCylinders != 0
5904 && pLCHSGeometry->cHeads != 0
5905 && pLCHSGeometry->cSectors != 0)
5906 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5907
5908 pImage->ImageUuid = *pUuid;
5909 RTUuidClear(&pImage->ParentUuid);
5910 RTUuidClear(&pImage->ModificationUuid);
5911 RTUuidClear(&pImage->ParentModificationUuid);
5912
5913 if (RT_SUCCESS(rc))
5914 rc = vmdkCreateImageDdbUuidsInit(pImage);
5915
5916 if (RT_SUCCESS(rc))
5917 rc = vmdkAllocateGrainTableCache(pImage);
5918
5919 if (RT_SUCCESS(rc))
5920 {
5921 rc = vmdkSetImageComment(pImage, pszComment);
5922 if (RT_FAILURE(rc))
5923 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5924 }
5925
5926 if (RT_SUCCESS(rc))
5927 {
5928 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5929
5930 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5931 {
5932 /* streamOptimized is a bit special, we cannot trigger the flush
5933 * until all data has been written. So we write the necessary
5934 * information explicitly. */
5935 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5936 - pImage->Descriptor.aLines[0], 512));
5937 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5938 if (RT_SUCCESS(rc))
5939 {
5940 rc = vmdkWriteDescriptor(pImage, NULL);
5941 if (RT_FAILURE(rc))
5942 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5943 }
5944 else
5945 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5946 }
5947 else
5948 rc = vmdkFlushImage(pImage, NULL);
5949 }
5950 }
5951 }
5952 else
5953 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5954
5955
5956 if (RT_SUCCESS(rc))
5957 {
5958 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5959 pImage->RegionList.fFlags = 0;
5960 pImage->RegionList.cRegions = 1;
5961
5962 pRegion->offRegion = 0; /* Disk start. */
5963 pRegion->cbBlock = 512;
5964 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5965 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5966 pRegion->cbData = 512;
5967 pRegion->cbMetadata = 0;
5968 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5969
5970 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5971 }
5972 else
5973 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5974 return rc;
5975}
5976
5977/**
5978 * Internal: Update image comment.
5979 */
5980static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5981{
5982 char *pszCommentEncoded = NULL;
5983 if (pszComment)
5984 {
5985 pszCommentEncoded = vmdkEncodeString(pszComment);
5986 if (!pszCommentEncoded)
5987 return VERR_NO_MEMORY;
5988 }
5989
5990 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5991 "ddb.comment", pszCommentEncoded);
5992 if (pszCommentEncoded)
5993 RTStrFree(pszCommentEncoded);
5994 if (RT_FAILURE(rc))
5995 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5996 return VINF_SUCCESS;
5997}
5998
5999/**
6000 * Internal. Clear the grain table buffer for real stream optimized writing.
6001 */
6002static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
6003{
6004 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6005 for (uint32_t i = 0; i < cCacheLines; i++)
6006 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
6007 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6008}
6009
6010/**
6011 * Internal. Flush the grain table buffer for real stream optimized writing.
6012 */
6013static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6014 uint32_t uGDEntry)
6015{
6016 int rc = VINF_SUCCESS;
6017 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6018
6019 /* VMware does not write out completely empty grain tables in the case
6020 * of streamOptimized images, which according to my interpretation of
6021 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
6022 * handle it without problems do it the same way and save some bytes. */
6023 bool fAllZero = true;
6024 for (uint32_t i = 0; i < cCacheLines; i++)
6025 {
6026 /* Convert the grain table to little endian in place, as it will not
6027 * be used at all after this function has been called. */
6028 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6029 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6030 if (*pGTTmp)
6031 {
6032 fAllZero = false;
6033 break;
6034 }
6035 if (!fAllZero)
6036 break;
6037 }
6038 if (fAllZero)
6039 return VINF_SUCCESS;
6040
6041 uint64_t uFileOffset = pExtent->uAppendPosition;
6042 if (!uFileOffset)
6043 return VERR_INTERNAL_ERROR;
6044 /* Align to sector, as the previous write could have been any size. */
6045 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6046
6047 /* Grain table marker. */
6048 uint8_t aMarker[512];
6049 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6050 memset(pMarker, '\0', sizeof(aMarker));
6051 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
6052 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
6053 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6054 aMarker, sizeof(aMarker));
6055 AssertRC(rc);
6056 uFileOffset += 512;
6057
6058 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
6059 return VERR_INTERNAL_ERROR;
6060
6061 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6062
6063 for (uint32_t i = 0; i < cCacheLines; i++)
6064 {
6065 /* Convert the grain table to little endian in place, as it will not
6066 * be used at all after this function has been called. */
6067 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6068 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6069 *pGTTmp = RT_H2LE_U32(*pGTTmp);
6070
6071 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6072 &pImage->pGTCache->aGTCache[i].aGTData[0],
6073 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6074 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
6075 if (RT_FAILURE(rc))
6076 break;
6077 }
6078 Assert(!(uFileOffset % 512));
6079 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
6080 return rc;
6081}
6082
6083/**
6084 * Internal. Free all allocated space for representing an image, and optionally
6085 * delete the image from disk.
6086 */
6087static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
6088{
6089 int rc = VINF_SUCCESS;
6090
6091 /* Freeing a never allocated image (e.g. because the open failed) is
6092 * not signalled as an error. After all nothing bad happens. */
6093 if (pImage)
6094 {
6095 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6096 {
6097 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6098 {
6099 /* Check if all extents are clean. */
6100 for (unsigned i = 0; i < pImage->cExtents; i++)
6101 {
6102 Assert(!pImage->pExtents[i].fUncleanShutdown);
6103 }
6104 }
6105 else
6106 {
6107 /* Mark all extents as clean. */
6108 for (unsigned i = 0; i < pImage->cExtents; i++)
6109 {
6110 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
6111 && pImage->pExtents[i].fUncleanShutdown)
6112 {
6113 pImage->pExtents[i].fUncleanShutdown = false;
6114 pImage->pExtents[i].fMetaDirty = true;
6115 }
6116
6117 /* From now on it's not safe to append any more data. */
6118 pImage->pExtents[i].uAppendPosition = 0;
6119 }
6120 }
6121 }
6122
6123 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6124 {
6125 /* No need to write any pending data if the file will be deleted
6126 * or if the new file wasn't successfully created. */
6127 if ( !fDelete && pImage->pExtents
6128 && pImage->pExtents[0].cGTEntries
6129 && pImage->pExtents[0].uAppendPosition)
6130 {
6131 PVMDKEXTENT pExtent = &pImage->pExtents[0];
6132 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6133 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6134 AssertRC(rc);
6135 vmdkStreamClearGT(pImage, pExtent);
6136 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
6137 {
6138 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6139 AssertRC(rc);
6140 }
6141
6142 uint64_t uFileOffset = pExtent->uAppendPosition;
6143 if (!uFileOffset)
6144 return VERR_INTERNAL_ERROR;
6145 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6146
6147 /* From now on it's not safe to append any more data. */
6148 pExtent->uAppendPosition = 0;
6149
6150 /* Grain directory marker. */
6151 uint8_t aMarker[512];
6152 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6153 memset(pMarker, '\0', sizeof(aMarker));
6154 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
6155 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
6156 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6157 aMarker, sizeof(aMarker));
6158 AssertRC(rc);
6159 uFileOffset += 512;
6160
6161 /* Write grain directory in little endian style. The array will
6162 * not be used after this, so convert in place. */
6163 uint32_t *pGDTmp = pExtent->pGD;
6164 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
6165 *pGDTmp = RT_H2LE_U32(*pGDTmp);
6166 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6167 uFileOffset, pExtent->pGD,
6168 pExtent->cGDEntries * sizeof(uint32_t));
6169 AssertRC(rc);
6170
6171 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
6172 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
6173 uFileOffset = RT_ALIGN_64( uFileOffset
6174 + pExtent->cGDEntries * sizeof(uint32_t),
6175 512);
6176
6177 /* Footer marker. */
6178 memset(pMarker, '\0', sizeof(aMarker));
6179 pMarker->uSector = VMDK_BYTE2SECTOR(512);
6180 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
6181 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6182 uFileOffset, aMarker, sizeof(aMarker));
6183 AssertRC(rc);
6184
6185 uFileOffset += 512;
6186 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
6187 AssertRC(rc);
6188
6189 uFileOffset += 512;
6190 /* End-of-stream marker. */
6191 memset(pMarker, '\0', sizeof(aMarker));
6192 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6193 uFileOffset, aMarker, sizeof(aMarker));
6194 AssertRC(rc);
6195 }
6196 }
6197 else if (!fDelete && fFlush)
6198 vmdkFlushImage(pImage, NULL);
6199
6200 if (pImage->pExtents != NULL)
6201 {
6202 for (unsigned i = 0 ; i < pImage->cExtents; i++)
6203 {
6204 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
6205 if (RT_SUCCESS(rc))
6206 rc = rc2; /* Propogate any error when closing the file. */
6207 }
6208 RTMemFree(pImage->pExtents);
6209 pImage->pExtents = NULL;
6210 }
6211 pImage->cExtents = 0;
6212 if (pImage->pFile != NULL)
6213 {
6214 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
6215 if (RT_SUCCESS(rc))
6216 rc = rc2; /* Propogate any error when closing the file. */
6217 }
6218 int rc2 = vmdkFileCheckAllClose(pImage);
6219 if (RT_SUCCESS(rc))
6220 rc = rc2; /* Propogate any error when closing the file. */
6221
6222 if (pImage->pGTCache)
6223 {
6224 RTMemFree(pImage->pGTCache);
6225 pImage->pGTCache = NULL;
6226 }
6227 if (pImage->pDescData)
6228 {
6229 RTMemFree(pImage->pDescData);
6230 pImage->pDescData = NULL;
6231 }
6232 }
6233
6234 LogFlowFunc(("returns %Rrc\n", rc));
6235 return rc;
6236}
6237
6238/**
6239 * Internal. Flush image data (and metadata) to disk.
6240 */
6241static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
6242{
6243 PVMDKEXTENT pExtent;
6244 int rc = VINF_SUCCESS;
6245
6246 /* Update descriptor if changed. */
6247 if (pImage->Descriptor.fDirty)
6248 rc = vmdkWriteDescriptor(pImage, pIoCtx);
6249
6250 if (RT_SUCCESS(rc))
6251 {
6252 for (unsigned i = 0; i < pImage->cExtents; i++)
6253 {
6254 pExtent = &pImage->pExtents[i];
6255 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6256 {
6257 switch (pExtent->enmType)
6258 {
6259 case VMDKETYPE_HOSTED_SPARSE:
6260 if (!pExtent->fFooter)
6261 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
6262 else
6263 {
6264 uint64_t uFileOffset = pExtent->uAppendPosition;
6265 /* Simply skip writing anything if the streamOptimized
6266 * image hasn't been just created. */
6267 if (!uFileOffset)
6268 break;
6269 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6270 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
6271 uFileOffset, pIoCtx);
6272 }
6273 break;
6274 case VMDKETYPE_VMFS:
6275 case VMDKETYPE_FLAT:
6276 /* Nothing to do. */
6277 break;
6278 case VMDKETYPE_ZERO:
6279 default:
6280 AssertMsgFailed(("extent with type %d marked as dirty\n",
6281 pExtent->enmType));
6282 break;
6283 }
6284 }
6285
6286 if (RT_FAILURE(rc))
6287 break;
6288
6289 switch (pExtent->enmType)
6290 {
6291 case VMDKETYPE_HOSTED_SPARSE:
6292 case VMDKETYPE_VMFS:
6293 case VMDKETYPE_FLAT:
6294 /** @todo implement proper path absolute check. */
6295 if ( pExtent->pFile != NULL
6296 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6297 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6298 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
6299 NULL, NULL);
6300 break;
6301 case VMDKETYPE_ZERO:
6302 /* No need to do anything for this extent. */
6303 break;
6304 default:
6305 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6306 break;
6307 }
6308 }
6309 }
6310
6311 return rc;
6312}
6313
6314/**
6315 * Internal. Find extent corresponding to the sector number in the disk.
6316 */
6317static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
6318 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
6319{
6320 PVMDKEXTENT pExtent = NULL;
6321 int rc = VINF_SUCCESS;
6322
6323 for (unsigned i = 0; i < pImage->cExtents; i++)
6324 {
6325 if (offSector < pImage->pExtents[i].cNominalSectors)
6326 {
6327 pExtent = &pImage->pExtents[i];
6328 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
6329 break;
6330 }
6331 offSector -= pImage->pExtents[i].cNominalSectors;
6332 }
6333
6334 if (pExtent)
6335 *ppExtent = pExtent;
6336 else
6337 rc = VERR_IO_SECTOR_NOT_FOUND;
6338
6339 return rc;
6340}
6341
6342/**
6343 * Internal. Hash function for placing the grain table hash entries.
6344 */
6345static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
6346 unsigned uExtent)
6347{
6348 /** @todo this hash function is quite simple, maybe use a better one which
6349 * scrambles the bits better. */
6350 return (uSector + uExtent) % pCache->cEntries;
6351}
6352
6353/**
6354 * Internal. Get sector number in the extent file from the relative sector
6355 * number in the extent.
6356 */
6357static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
6358 PVMDKEXTENT pExtent, uint64_t uSector,
6359 uint64_t *puExtentSector)
6360{
6361 PVMDKGTCACHE pCache = pImage->pGTCache;
6362 uint64_t uGDIndex, uGTSector, uGTBlock;
6363 uint32_t uGTHash, uGTBlockIndex;
6364 PVMDKGTCACHEENTRY pGTCacheEntry;
6365 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6366 int rc;
6367
6368 /* For newly created and readonly/sequentially opened streamOptimized
6369 * images this must be a no-op, as the grain directory is not there. */
6370 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6371 && pExtent->uAppendPosition)
6372 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6373 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
6374 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6375 {
6376 *puExtentSector = 0;
6377 return VINF_SUCCESS;
6378 }
6379
6380 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6381 if (uGDIndex >= pExtent->cGDEntries)
6382 return VERR_OUT_OF_RANGE;
6383 uGTSector = pExtent->pGD[uGDIndex];
6384 if (!uGTSector)
6385 {
6386 /* There is no grain table referenced by this grain directory
6387 * entry. So there is absolutely no data in this area. */
6388 *puExtentSector = 0;
6389 return VINF_SUCCESS;
6390 }
6391
6392 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6393 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6394 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6395 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6396 || pGTCacheEntry->uGTBlock != uGTBlock)
6397 {
6398 /* Cache miss, fetch data from disk. */
6399 PVDMETAXFER pMetaXfer;
6400 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6401 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6402 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
6403 if (RT_FAILURE(rc))
6404 return rc;
6405 /* We can release the metadata transfer immediately. */
6406 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6407 pGTCacheEntry->uExtent = pExtent->uExtent;
6408 pGTCacheEntry->uGTBlock = uGTBlock;
6409 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6410 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6411 }
6412 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6413 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
6414 if (uGrainSector)
6415 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
6416 else
6417 *puExtentSector = 0;
6418 return VINF_SUCCESS;
6419}
6420
6421/**
6422 * Internal. Writes the grain and also if necessary the grain tables.
6423 * Uses the grain table cache as a true grain table.
6424 */
6425static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6426 uint64_t uSector, PVDIOCTX pIoCtx,
6427 uint64_t cbWrite)
6428{
6429 uint32_t uGrain;
6430 uint32_t uGDEntry, uLastGDEntry;
6431 uint32_t cbGrain = 0;
6432 uint32_t uCacheLine, uCacheEntry;
6433 const void *pData;
6434 int rc;
6435
6436 /* Very strict requirements: always write at least one full grain, with
6437 * proper alignment. Everything else would require reading of already
6438 * written data, which we don't support for obvious reasons. The only
6439 * exception is the last grain, and only if the image size specifies
6440 * that only some portion holds data. In any case the write must be
6441 * within the image limits, no "overshoot" allowed. */
6442 if ( cbWrite == 0
6443 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
6444 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
6445 || uSector % pExtent->cSectorsPerGrain
6446 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
6447 return VERR_INVALID_PARAMETER;
6448
6449 /* Clip write range to at most the rest of the grain. */
6450 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
6451
6452 /* Do not allow to go back. */
6453 uGrain = uSector / pExtent->cSectorsPerGrain;
6454 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
6455 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
6456 uGDEntry = uGrain / pExtent->cGTEntries;
6457 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6458 if (uGrain < pExtent->uLastGrainAccess)
6459 return VERR_VD_VMDK_INVALID_WRITE;
6460
6461 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
6462 * to allocate something, we also need to detect the situation ourself. */
6463 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
6464 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
6465 return VINF_SUCCESS;
6466
6467 if (uGDEntry != uLastGDEntry)
6468 {
6469 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6470 if (RT_FAILURE(rc))
6471 return rc;
6472 vmdkStreamClearGT(pImage, pExtent);
6473 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
6474 {
6475 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6476 if (RT_FAILURE(rc))
6477 return rc;
6478 }
6479 }
6480
6481 uint64_t uFileOffset;
6482 uFileOffset = pExtent->uAppendPosition;
6483 if (!uFileOffset)
6484 return VERR_INTERNAL_ERROR;
6485 /* Align to sector, as the previous write could have been any size. */
6486 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6487
6488 /* Paranoia check: extent type, grain table buffer presence and
6489 * grain table buffer space. Also grain table entry must be clear. */
6490 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
6491 || !pImage->pGTCache
6492 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
6493 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
6494 return VERR_INTERNAL_ERROR;
6495
6496 /* Update grain table entry. */
6497 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6498
6499 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6500 {
6501 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
6502 memset((char *)pExtent->pvGrain + cbWrite, '\0',
6503 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
6504 pData = pExtent->pvGrain;
6505 }
6506 else
6507 {
6508 RTSGSEG Segment;
6509 unsigned cSegments = 1;
6510
6511 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6512 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6513 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); RT_NOREF(cbSeg);
6514 pData = Segment.pvSeg;
6515 }
6516 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
6517 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6518 uSector, &cbGrain);
6519 if (RT_FAILURE(rc))
6520 {
6521 pExtent->uGrainSectorAbs = 0;
6522 AssertRC(rc);
6523 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6524 }
6525 pExtent->uLastGrainAccess = uGrain;
6526 pExtent->uAppendPosition += cbGrain;
6527
6528 return rc;
6529}
6530
6531/**
6532 * Internal: Updates the grain table during grain allocation.
6533 */
6534static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6535 PVMDKGRAINALLOCASYNC pGrainAlloc)
6536{
6537 int rc = VINF_SUCCESS;
6538 PVMDKGTCACHE pCache = pImage->pGTCache;
6539 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6540 uint32_t uGTHash, uGTBlockIndex;
6541 uint64_t uGTSector, uRGTSector, uGTBlock;
6542 uint64_t uSector = pGrainAlloc->uSector;
6543 PVMDKGTCACHEENTRY pGTCacheEntry;
6544
6545 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
6546 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
6547
6548 uGTSector = pGrainAlloc->uGTSector;
6549 uRGTSector = pGrainAlloc->uRGTSector;
6550 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6551
6552 /* Update the grain table (and the cache). */
6553 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6554 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6555 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6556 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6557 || pGTCacheEntry->uGTBlock != uGTBlock)
6558 {
6559 /* Cache miss, fetch data from disk. */
6560 LogFlow(("Cache miss, fetch data from disk\n"));
6561 PVDMETAXFER pMetaXfer = NULL;
6562 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6563 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6564 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6565 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
6566 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6567 {
6568 pGrainAlloc->cIoXfersPending++;
6569 pGrainAlloc->fGTUpdateNeeded = true;
6570 /* Leave early, we will be called again after the read completed. */
6571 LogFlowFunc(("Metadata read in progress, leaving\n"));
6572 return rc;
6573 }
6574 else if (RT_FAILURE(rc))
6575 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
6576 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6577 pGTCacheEntry->uExtent = pExtent->uExtent;
6578 pGTCacheEntry->uGTBlock = uGTBlock;
6579 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6580 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6581 }
6582 else
6583 {
6584 /* Cache hit. Convert grain table block back to disk format, otherwise
6585 * the code below will write garbage for all but the updated entry. */
6586 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6587 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
6588 }
6589 pGrainAlloc->fGTUpdateNeeded = false;
6590 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6591 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
6592 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
6593 /* Update grain table on disk. */
6594 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6595 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6596 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6597 vmdkAllocGrainComplete, pGrainAlloc);
6598 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6599 pGrainAlloc->cIoXfersPending++;
6600 else if (RT_FAILURE(rc))
6601 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
6602 if (pExtent->pRGD)
6603 {
6604 /* Update backup grain table on disk. */
6605 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6606 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6607 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6608 vmdkAllocGrainComplete, pGrainAlloc);
6609 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6610 pGrainAlloc->cIoXfersPending++;
6611 else if (RT_FAILURE(rc))
6612 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
6613 }
6614
6615 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6616 return rc;
6617}
6618
6619/**
6620 * Internal - complete the grain allocation by updating disk grain table if required.
6621 */
6622static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
6623{
6624 RT_NOREF1(rcReq);
6625 int rc = VINF_SUCCESS;
6626 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6627 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
6628
6629 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
6630 pBackendData, pIoCtx, pvUser, rcReq));
6631
6632 pGrainAlloc->cIoXfersPending--;
6633 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6634 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6635
6636 if (!pGrainAlloc->cIoXfersPending)
6637 {
6638 /* Grain allocation completed. */
6639 RTMemFree(pGrainAlloc);
6640 }
6641
6642 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6643 return rc;
6644}
6645
6646/**
6647 * Internal. Allocates a new grain table (if necessary).
6648 */
6649static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6650 uint64_t uSector, uint64_t cbWrite)
6651{
6652 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6653 uint64_t uGDIndex, uGTSector, uRGTSector;
6654 uint64_t uFileOffset;
6655 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6656 int rc;
6657
6658 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6659 pCache, pExtent, pIoCtx, uSector, cbWrite));
6660
6661 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6662 if (!pGrainAlloc)
6663 return VERR_NO_MEMORY;
6664
6665 pGrainAlloc->pExtent = pExtent;
6666 pGrainAlloc->uSector = uSector;
6667
6668 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6669 if (uGDIndex >= pExtent->cGDEntries)
6670 {
6671 RTMemFree(pGrainAlloc);
6672 return VERR_OUT_OF_RANGE;
6673 }
6674 uGTSector = pExtent->pGD[uGDIndex];
6675 if (pExtent->pRGD)
6676 uRGTSector = pExtent->pRGD[uGDIndex];
6677 else
6678 uRGTSector = 0; /**< avoid compiler warning */
6679 if (!uGTSector)
6680 {
6681 LogFlow(("Allocating new grain table\n"));
6682
6683 /* There is no grain table referenced by this grain directory
6684 * entry. So there is absolutely no data in this area. Allocate
6685 * a new grain table and put the reference to it in the GDs. */
6686 uFileOffset = pExtent->uAppendPosition;
6687 if (!uFileOffset)
6688 {
6689 RTMemFree(pGrainAlloc);
6690 return VERR_INTERNAL_ERROR;
6691 }
6692 Assert(!(uFileOffset % 512));
6693
6694 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6695 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6696
6697 /* Normally the grain table is preallocated for hosted sparse extents
6698 * that support more than 32 bit sector numbers. So this shouldn't
6699 * ever happen on a valid extent. */
6700 if (uGTSector > UINT32_MAX)
6701 {
6702 RTMemFree(pGrainAlloc);
6703 return VERR_VD_VMDK_INVALID_HEADER;
6704 }
6705
6706 /* Write grain table by writing the required number of grain table
6707 * cache chunks. Allocate memory dynamically here or we flood the
6708 * metadata cache with very small entries. */
6709 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6710 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6711
6712 if (!paGTDataTmp)
6713 {
6714 RTMemFree(pGrainAlloc);
6715 return VERR_NO_MEMORY;
6716 }
6717
6718 memset(paGTDataTmp, '\0', cbGTDataTmp);
6719 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6720 VMDK_SECTOR2BYTE(uGTSector),
6721 paGTDataTmp, cbGTDataTmp, pIoCtx,
6722 vmdkAllocGrainComplete, pGrainAlloc);
6723 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6724 pGrainAlloc->cIoXfersPending++;
6725 else if (RT_FAILURE(rc))
6726 {
6727 RTMemTmpFree(paGTDataTmp);
6728 RTMemFree(pGrainAlloc);
6729 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6730 }
6731 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6732 + cbGTDataTmp, 512);
6733
6734 if (pExtent->pRGD)
6735 {
6736 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6737 uFileOffset = pExtent->uAppendPosition;
6738 if (!uFileOffset)
6739 return VERR_INTERNAL_ERROR;
6740 Assert(!(uFileOffset % 512));
6741 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6742
6743 /* Normally the redundant grain table is preallocated for hosted
6744 * sparse extents that support more than 32 bit sector numbers. So
6745 * this shouldn't ever happen on a valid extent. */
6746 if (uRGTSector > UINT32_MAX)
6747 {
6748 RTMemTmpFree(paGTDataTmp);
6749 return VERR_VD_VMDK_INVALID_HEADER;
6750 }
6751
6752 /* Write grain table by writing the required number of grain table
6753 * cache chunks. Allocate memory dynamically here or we flood the
6754 * metadata cache with very small entries. */
6755 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6756 VMDK_SECTOR2BYTE(uRGTSector),
6757 paGTDataTmp, cbGTDataTmp, pIoCtx,
6758 vmdkAllocGrainComplete, pGrainAlloc);
6759 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6760 pGrainAlloc->cIoXfersPending++;
6761 else if (RT_FAILURE(rc))
6762 {
6763 RTMemTmpFree(paGTDataTmp);
6764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6765 }
6766
6767 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6768 }
6769
6770 RTMemTmpFree(paGTDataTmp);
6771
6772 /* Update the grain directory on disk (doing it before writing the
6773 * grain table will result in a garbled extent if the operation is
6774 * aborted for some reason. Otherwise the worst that can happen is
6775 * some unused sectors in the extent. */
6776 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6777 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6778 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6779 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6780 vmdkAllocGrainComplete, pGrainAlloc);
6781 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6782 pGrainAlloc->cIoXfersPending++;
6783 else if (RT_FAILURE(rc))
6784 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6785 if (pExtent->pRGD)
6786 {
6787 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6788 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6789 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6790 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6791 vmdkAllocGrainComplete, pGrainAlloc);
6792 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6793 pGrainAlloc->cIoXfersPending++;
6794 else if (RT_FAILURE(rc))
6795 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6796 }
6797
6798 /* As the final step update the in-memory copy of the GDs. */
6799 pExtent->pGD[uGDIndex] = uGTSector;
6800 if (pExtent->pRGD)
6801 pExtent->pRGD[uGDIndex] = uRGTSector;
6802 }
6803
6804 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6805 pGrainAlloc->uGTSector = uGTSector;
6806 pGrainAlloc->uRGTSector = uRGTSector;
6807
6808 uFileOffset = pExtent->uAppendPosition;
6809 if (!uFileOffset)
6810 return VERR_INTERNAL_ERROR;
6811 Assert(!(uFileOffset % 512));
6812
6813 pGrainAlloc->uGrainOffset = uFileOffset;
6814
6815 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6816 {
6817 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6818 ("Accesses to stream optimized images must be synchronous\n"),
6819 VERR_INVALID_STATE);
6820
6821 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6822 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6823
6824 /* Invalidate cache, just in case some code incorrectly allows mixing
6825 * of reads and writes. Normally shouldn't be needed. */
6826 pExtent->uGrainSectorAbs = 0;
6827
6828 /* Write compressed data block and the markers. */
6829 uint32_t cbGrain = 0;
6830 RTSGSEG Segment;
6831 unsigned cSegments = 1;
6832
6833 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6834 &cSegments, cbWrite);
6835 Assert(cbSeg == cbWrite); RT_NOREF(cbSeg);
6836
6837 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6838 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6839 if (RT_FAILURE(rc))
6840 {
6841 AssertRC(rc);
6842 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6843 }
6844 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6845 pExtent->uAppendPosition += cbGrain;
6846 }
6847 else
6848 {
6849 /* Write the data. Always a full grain, or we're in big trouble. */
6850 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6851 uFileOffset, pIoCtx, cbWrite,
6852 vmdkAllocGrainComplete, pGrainAlloc);
6853 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6854 pGrainAlloc->cIoXfersPending++;
6855 else if (RT_FAILURE(rc))
6856 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6857
6858 pExtent->uAppendPosition += cbWrite;
6859 }
6860
6861 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6862
6863 if (!pGrainAlloc->cIoXfersPending)
6864 {
6865 /* Grain allocation completed. */
6866 RTMemFree(pGrainAlloc);
6867 }
6868
6869 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6870
6871 return rc;
6872}
6873
6874/**
6875 * Internal. Reads the contents by sequentially going over the compressed
6876 * grains (hoping that they are in sequence).
6877 */
6878static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6879 uint64_t uSector, PVDIOCTX pIoCtx,
6880 uint64_t cbRead)
6881{
6882 int rc;
6883
6884 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6885 pImage, pExtent, uSector, pIoCtx, cbRead));
6886
6887 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6888 ("Async I/O not supported for sequential stream optimized images\n"),
6889 VERR_INVALID_STATE);
6890
6891 /* Do not allow to go back. */
6892 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6893 if (uGrain < pExtent->uLastGrainAccess)
6894 return VERR_VD_VMDK_INVALID_STATE;
6895 pExtent->uLastGrainAccess = uGrain;
6896
6897 /* After a previous error do not attempt to recover, as it would need
6898 * seeking (in the general case backwards which is forbidden). */
6899 if (!pExtent->uGrainSectorAbs)
6900 return VERR_VD_VMDK_INVALID_STATE;
6901
6902 /* Check if we need to read something from the image or if what we have
6903 * in the buffer is good to fulfill the request. */
6904 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6905 {
6906 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6907 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6908
6909 /* Get the marker from the next data block - and skip everything which
6910 * is not a compressed grain. If it's a compressed grain which is for
6911 * the requested sector (or after), read it. */
6912 VMDKMARKER Marker;
6913 do
6914 {
6915 RT_ZERO(Marker);
6916 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6917 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6918 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6919 if (RT_FAILURE(rc))
6920 return rc;
6921 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6922 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6923
6924 if (Marker.cbSize == 0)
6925 {
6926 /* A marker for something else than a compressed grain. */
6927 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6928 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6929 + RT_UOFFSETOF(VMDKMARKER, uType),
6930 &Marker.uType, sizeof(Marker.uType));
6931 if (RT_FAILURE(rc))
6932 return rc;
6933 Marker.uType = RT_LE2H_U32(Marker.uType);
6934 switch (Marker.uType)
6935 {
6936 case VMDK_MARKER_EOS:
6937 uGrainSectorAbs++;
6938 /* Read (or mostly skip) to the end of file. Uses the
6939 * Marker (LBA sector) as it is unused anyway. This
6940 * makes sure that really everything is read in the
6941 * success case. If this read fails it means the image
6942 * is truncated, but this is harmless so ignore. */
6943 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6944 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6945 + 511,
6946 &Marker.uSector, 1);
6947 break;
6948 case VMDK_MARKER_GT:
6949 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6950 break;
6951 case VMDK_MARKER_GD:
6952 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6953 break;
6954 case VMDK_MARKER_FOOTER:
6955 uGrainSectorAbs += 2;
6956 break;
6957 case VMDK_MARKER_UNSPECIFIED:
6958 /* Skip over the contents of the unspecified marker
6959 * type 4 which exists in some vSphere created files. */
6960 /** @todo figure out what the payload means. */
6961 uGrainSectorAbs += 1;
6962 break;
6963 default:
6964 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6965 pExtent->uGrainSectorAbs = 0;
6966 return VERR_VD_VMDK_INVALID_STATE;
6967 }
6968 pExtent->cbGrainStreamRead = 0;
6969 }
6970 else
6971 {
6972 /* A compressed grain marker. If it is at/after what we're
6973 * interested in read and decompress data. */
6974 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6975 {
6976 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6977 continue;
6978 }
6979 uint64_t uLBA = 0;
6980 uint32_t cbGrainStreamRead = 0;
6981 rc = vmdkFileInflateSync(pImage, pExtent,
6982 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6983 pExtent->pvGrain,
6984 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6985 &Marker, &uLBA, &cbGrainStreamRead);
6986 if (RT_FAILURE(rc))
6987 {
6988 pExtent->uGrainSectorAbs = 0;
6989 return rc;
6990 }
6991 if ( pExtent->uGrain
6992 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6993 {
6994 pExtent->uGrainSectorAbs = 0;
6995 return VERR_VD_VMDK_INVALID_STATE;
6996 }
6997 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6998 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6999 break;
7000 }
7001 } while (Marker.uType != VMDK_MARKER_EOS);
7002
7003 pExtent->uGrainSectorAbs = uGrainSectorAbs;
7004
7005 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
7006 {
7007 pExtent->uGrain = UINT32_MAX;
7008 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
7009 * the next read would try to get more data, and we're at EOF. */
7010 pExtent->cbGrainStreamRead = 1;
7011 }
7012 }
7013
7014 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
7015 {
7016 /* The next data block we have is not for this area, so just return
7017 * that there is no data. */
7018 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
7019 return VERR_VD_BLOCK_FREE;
7020 }
7021
7022 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
7023 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7024 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
7025 cbRead);
7026 LogFlowFunc(("returns VINF_SUCCESS\n"));
7027 return VINF_SUCCESS;
7028}
7029
7030/**
7031 * Replaces a fragment of a string with the specified string.
7032 *
7033 * @returns Pointer to the allocated UTF-8 string.
7034 * @param pszWhere UTF-8 string to search in.
7035 * @param pszWhat UTF-8 string to search for.
7036 * @param pszByWhat UTF-8 string to replace the found string with.
7037 *
7038 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
7039 * for updating the base name in the descriptor, the second is for
7040 * generating new filenames for extents. This code borked when
7041 * RTPathAbs started correcting the driver letter case on windows,
7042 * when strstr failed because the pExtent->pszFullname was not
7043 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
7044 * this by apply RTPathAbs to the places it wasn't applied.
7045 *
7046 * However, this highlights some undocumented ASSUMPTIONS as well as
7047 * terrible short commings of the approach.
7048 *
7049 * Given the right filename, it may also screw up the descriptor. Take
7050 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
7051 * we'll be asked to replace "Test0" with something, no problem. No,
7052 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
7053 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
7054 * its bum. The descriptor string must be parsed and reconstructed,
7055 * the lazy strstr approach doesn't cut it.
7056 *
7057 * I'm also curious as to what would be the correct escaping of '"' in
7058 * the file name and how that is supposed to be handled, because it
7059 * needs to be or such names must be rejected in several places (maybe
7060 * they are, I didn't check).
7061 *
7062 * When this function is used to replace the start of a path, I think
7063 * the assumption from the prep/setup code is that we kind of knows
7064 * what we're working on (I could be wrong). However, using strstr
7065 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
7066 * Especially on unix systems, weird stuff could happen if someone
7067 * unwittingly tinkers with the prep/setup code. What should really be
7068 * done here is using a new RTPathStartEx function that (via flags)
7069 * allows matching partial final component and returns the length of
7070 * what it matched up (in case it skipped slashes and '.' components).
7071 *
7072 */
7073static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
7074 const char *pszByWhat)
7075{
7076 AssertPtr(pszWhere);
7077 AssertPtr(pszWhat);
7078 AssertPtr(pszByWhat);
7079 const char *pszFoundStr = strstr(pszWhere, pszWhat);
7080 if (!pszFoundStr)
7081 {
7082 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
7083 return NULL;
7084 }
7085 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
7086 char *pszNewStr = RTStrAlloc(cbFinal);
7087 if (pszNewStr)
7088 {
7089 char *pszTmp = pszNewStr;
7090 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
7091 pszTmp += pszFoundStr - pszWhere;
7092 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
7093 pszTmp += strlen(pszByWhat);
7094 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
7095 }
7096 return pszNewStr;
7097}
7098
7099
7100/** @copydoc VDIMAGEBACKEND::pfnProbe */
7101static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
7102 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
7103{
7104 RT_NOREF(enmDesiredType);
7105 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
7106 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
7107 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7108 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7109
7110 int rc = VINF_SUCCESS;
7111 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7112 if (RT_LIKELY(pImage))
7113 {
7114 pImage->pszFilename = pszFilename;
7115 pImage->pFile = NULL;
7116 pImage->pExtents = NULL;
7117 pImage->pFiles = NULL;
7118 pImage->pGTCache = NULL;
7119 pImage->pDescData = NULL;
7120 pImage->pVDIfsDisk = pVDIfsDisk;
7121 pImage->pVDIfsImage = pVDIfsImage;
7122 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
7123 * much as possible in vmdkOpenImage. */
7124 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
7125 vmdkFreeImage(pImage, false, false /*fFlush*/);
7126 RTMemFree(pImage);
7127
7128 if (RT_SUCCESS(rc))
7129 *penmType = VDTYPE_HDD;
7130 }
7131 else
7132 rc = VERR_NO_MEMORY;
7133
7134 LogFlowFunc(("returns %Rrc\n", rc));
7135 return rc;
7136}
7137
7138/** @copydoc VDIMAGEBACKEND::pfnOpen */
7139static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
7140 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7141 VDTYPE enmType, void **ppBackendData)
7142{
7143 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
7144
7145 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
7146 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
7147 int rc;
7148
7149 /* Check open flags. All valid flags are supported. */
7150 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7151 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7152 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7153
7154
7155 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7156 if (RT_LIKELY(pImage))
7157 {
7158 pImage->pszFilename = pszFilename;
7159 pImage->pFile = NULL;
7160 pImage->pExtents = NULL;
7161 pImage->pFiles = NULL;
7162 pImage->pGTCache = NULL;
7163 pImage->pDescData = NULL;
7164 pImage->pVDIfsDisk = pVDIfsDisk;
7165 pImage->pVDIfsImage = pVDIfsImage;
7166
7167 rc = vmdkOpenImage(pImage, uOpenFlags);
7168 if (RT_SUCCESS(rc))
7169 *ppBackendData = pImage;
7170 else
7171 RTMemFree(pImage);
7172 }
7173 else
7174 rc = VERR_NO_MEMORY;
7175
7176 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7177 return rc;
7178}
7179
7180/** @copydoc VDIMAGEBACKEND::pfnCreate */
7181static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
7182 unsigned uImageFlags, const char *pszComment,
7183 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7184 PCRTUUID pUuid, unsigned uOpenFlags,
7185 unsigned uPercentStart, unsigned uPercentSpan,
7186 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7187 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
7188 void **ppBackendData)
7189{
7190 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
7191 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
7192 int rc;
7193
7194 /* Check the VD container type and image flags. */
7195 if ( enmType != VDTYPE_HDD
7196 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
7197 return VERR_VD_INVALID_TYPE;
7198
7199 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
7200 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7201 && ( !cbSize
7202 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
7203 return VERR_VD_INVALID_SIZE;
7204
7205 /* Check image flags for invalid combinations. */
7206 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7207 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
7208 return VERR_INVALID_PARAMETER;
7209
7210 /* Check open flags. All valid flags are supported. */
7211 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7212 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7213 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7214 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
7215 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
7216 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
7217 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
7218 VERR_INVALID_PARAMETER);
7219
7220 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7221 if (RT_LIKELY(pImage))
7222 {
7223 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7224
7225 pImage->pszFilename = pszFilename;
7226 pImage->pFile = NULL;
7227 pImage->pExtents = NULL;
7228 pImage->pFiles = NULL;
7229 pImage->pGTCache = NULL;
7230 pImage->pDescData = NULL;
7231 pImage->pVDIfsDisk = pVDIfsDisk;
7232 pImage->pVDIfsImage = pVDIfsImage;
7233 /* Descriptors for split images can be pretty large, especially if the
7234 * filename is long. So prepare for the worst, and allocate quite some
7235 * memory for the descriptor in this case. */
7236 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7237 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
7238 else
7239 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
7240 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
7241 if (RT_LIKELY(pImage->pDescData))
7242 {
7243 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
7244 pPCHSGeometry, pLCHSGeometry, pUuid,
7245 pIfProgress, uPercentStart, uPercentSpan);
7246 if (RT_SUCCESS(rc))
7247 {
7248 /* So far the image is opened in read/write mode. Make sure the
7249 * image is opened in read-only mode if the caller requested that. */
7250 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7251 {
7252 vmdkFreeImage(pImage, false, true /*fFlush*/);
7253 rc = vmdkOpenImage(pImage, uOpenFlags);
7254 }
7255
7256 if (RT_SUCCESS(rc))
7257 *ppBackendData = pImage;
7258 }
7259
7260 if (RT_FAILURE(rc))
7261 RTMemFree(pImage->pDescData);
7262 }
7263 else
7264 rc = VERR_NO_MEMORY;
7265
7266 if (RT_FAILURE(rc))
7267 RTMemFree(pImage);
7268 }
7269 else
7270 rc = VERR_NO_MEMORY;
7271
7272 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7273 return rc;
7274}
7275
7276/**
7277 * Prepares the state for renaming a VMDK image, setting up the state and allocating
7278 * memory.
7279 *
7280 * @returns VBox status code.
7281 * @param pImage VMDK image instance.
7282 * @param pRenameState The state to initialize.
7283 * @param pszFilename The new filename.
7284 */
7285static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7286{
7287 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
7288
7289 int rc = VINF_SUCCESS;
7290
7291 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
7292
7293 /*
7294 * Allocate an array to store both old and new names of renamed files
7295 * in case we have to roll back the changes. Arrays are initialized
7296 * with zeros. We actually save stuff when and if we change it.
7297 */
7298 pRenameState->cExtents = pImage->cExtents;
7299 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7300 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7301 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
7302 if ( pRenameState->apszOldName
7303 && pRenameState->apszNewName
7304 && pRenameState->apszNewLines)
7305 {
7306 /* Save the descriptor size and position. */
7307 if (pImage->pDescData)
7308 {
7309 /* Separate descriptor file. */
7310 pRenameState->fEmbeddedDesc = false;
7311 }
7312 else
7313 {
7314 /* Embedded descriptor file. */
7315 pRenameState->ExtentCopy = pImage->pExtents[0];
7316 pRenameState->fEmbeddedDesc = true;
7317 }
7318
7319 /* Save the descriptor content. */
7320 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
7321 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7322 {
7323 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
7324 if (!pRenameState->DescriptorCopy.aLines[i])
7325 {
7326 rc = VERR_NO_MEMORY;
7327 break;
7328 }
7329 }
7330
7331 if (RT_SUCCESS(rc))
7332 {
7333 /* Prepare both old and new base names used for string replacement. */
7334 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
7335 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
7336 RTPathStripSuffix(pRenameState->pszNewBaseName);
7337
7338 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
7339 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
7340 RTPathStripSuffix(pRenameState->pszOldBaseName);
7341
7342 /* Prepare both old and new full names used for string replacement.
7343 Note! Must abspath the stuff here, so the strstr weirdness later in
7344 the renaming process get a match against abspath'ed extent paths.
7345 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
7346 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
7347 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
7348 RTPathStripSuffix(pRenameState->pszNewFullName);
7349
7350 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
7351 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
7352 RTPathStripSuffix(pRenameState->pszOldFullName);
7353
7354 /* Save the old name for easy access to the old descriptor file. */
7355 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
7356 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
7357
7358 /* Save old image name. */
7359 pRenameState->pszOldImageName = pImage->pszFilename;
7360 }
7361 }
7362 else
7363 rc = VERR_NO_TMP_MEMORY;
7364
7365 return rc;
7366}
7367
7368/**
7369 * Destroys the given rename state, freeing all allocated memory.
7370 *
7371 * @param pRenameState The rename state to destroy.
7372 */
7373static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
7374{
7375 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7376 if (pRenameState->DescriptorCopy.aLines[i])
7377 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
7378 if (pRenameState->apszOldName)
7379 {
7380 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7381 if (pRenameState->apszOldName[i])
7382 RTStrFree(pRenameState->apszOldName[i]);
7383 RTMemTmpFree(pRenameState->apszOldName);
7384 }
7385 if (pRenameState->apszNewName)
7386 {
7387 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7388 if (pRenameState->apszNewName[i])
7389 RTStrFree(pRenameState->apszNewName[i]);
7390 RTMemTmpFree(pRenameState->apszNewName);
7391 }
7392 if (pRenameState->apszNewLines)
7393 {
7394 for (unsigned i = 0; i < pRenameState->cExtents; i++)
7395 if (pRenameState->apszNewLines[i])
7396 RTStrFree(pRenameState->apszNewLines[i]);
7397 RTMemTmpFree(pRenameState->apszNewLines);
7398 }
7399 if (pRenameState->pszOldDescName)
7400 RTStrFree(pRenameState->pszOldDescName);
7401 if (pRenameState->pszOldBaseName)
7402 RTStrFree(pRenameState->pszOldBaseName);
7403 if (pRenameState->pszNewBaseName)
7404 RTStrFree(pRenameState->pszNewBaseName);
7405 if (pRenameState->pszOldFullName)
7406 RTStrFree(pRenameState->pszOldFullName);
7407 if (pRenameState->pszNewFullName)
7408 RTStrFree(pRenameState->pszNewFullName);
7409}
7410
7411/**
7412 * Rolls back the rename operation to the original state.
7413 *
7414 * @returns VBox status code.
7415 * @param pImage VMDK image instance.
7416 * @param pRenameState The rename state.
7417 */
7418static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
7419{
7420 int rc = VINF_SUCCESS;
7421
7422 if (!pRenameState->fImageFreed)
7423 {
7424 /*
7425 * Some extents may have been closed, close the rest. We will
7426 * re-open the whole thing later.
7427 */
7428 vmdkFreeImage(pImage, false, true /*fFlush*/);
7429 }
7430
7431 /* Rename files back. */
7432 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7433 {
7434 if (pRenameState->apszOldName[i])
7435 {
7436 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
7437 AssertRC(rc);
7438 }
7439 }
7440 /* Restore the old descriptor. */
7441 PVMDKFILE pFile;
7442 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
7443 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
7444 false /* fCreate */));
7445 AssertRC(rc);
7446 if (pRenameState->fEmbeddedDesc)
7447 {
7448 pRenameState->ExtentCopy.pFile = pFile;
7449 pImage->pExtents = &pRenameState->ExtentCopy;
7450 }
7451 else
7452 {
7453 /* Shouldn't be null for separate descriptor.
7454 * There will be no access to the actual content.
7455 */
7456 pImage->pDescData = pRenameState->pszOldDescName;
7457 pImage->pFile = pFile;
7458 }
7459 pImage->Descriptor = pRenameState->DescriptorCopy;
7460 vmdkWriteDescriptor(pImage, NULL);
7461 vmdkFileClose(pImage, &pFile, false);
7462 /* Get rid of the stuff we implanted. */
7463 pImage->pExtents = NULL;
7464 pImage->pFile = NULL;
7465 pImage->pDescData = NULL;
7466 /* Re-open the image back. */
7467 pImage->pszFilename = pRenameState->pszOldImageName;
7468 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7469
7470 return rc;
7471}
7472
7473/**
7474 * Rename worker doing the real work.
7475 *
7476 * @returns VBox status code.
7477 * @param pImage VMDK image instance.
7478 * @param pRenameState The rename state.
7479 * @param pszFilename The new filename.
7480 */
7481static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7482{
7483 int rc = VINF_SUCCESS;
7484 unsigned i, line;
7485
7486 /* Update the descriptor with modified extent names. */
7487 for (i = 0, line = pImage->Descriptor.uFirstExtent;
7488 i < pRenameState->cExtents;
7489 i++, line = pImage->Descriptor.aNextLines[line])
7490 {
7491 /* Update the descriptor. */
7492 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
7493 pRenameState->pszOldBaseName,
7494 pRenameState->pszNewBaseName);
7495 if (!pRenameState->apszNewLines[i])
7496 {
7497 rc = VERR_NO_MEMORY;
7498 break;
7499 }
7500 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
7501 }
7502
7503 if (RT_SUCCESS(rc))
7504 {
7505 /* Make sure the descriptor gets written back. */
7506 pImage->Descriptor.fDirty = true;
7507 /* Flush the descriptor now, in case it is embedded. */
7508 vmdkFlushImage(pImage, NULL);
7509
7510 /* Close and rename/move extents. */
7511 for (i = 0; i < pRenameState->cExtents; i++)
7512 {
7513 PVMDKEXTENT pExtent = &pImage->pExtents[i];
7514 /* Compose new name for the extent. */
7515 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
7516 pRenameState->pszOldFullName,
7517 pRenameState->pszNewFullName);
7518 if (!pRenameState->apszNewName[i])
7519 {
7520 rc = VERR_NO_MEMORY;
7521 break;
7522 }
7523 /* Close the extent file. */
7524 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
7525 if (RT_FAILURE(rc))
7526 break;;
7527
7528 /* Rename the extent file. */
7529 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
7530 if (RT_FAILURE(rc))
7531 break;
7532 /* Remember the old name. */
7533 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
7534 }
7535
7536 if (RT_SUCCESS(rc))
7537 {
7538 /* Release all old stuff. */
7539 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
7540 if (RT_SUCCESS(rc))
7541 {
7542 pRenameState->fImageFreed = true;
7543
7544 /* Last elements of new/old name arrays are intended for
7545 * storing descriptor's names.
7546 */
7547 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
7548 /* Rename the descriptor file if it's separate. */
7549 if (!pRenameState->fEmbeddedDesc)
7550 {
7551 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
7552 if (RT_SUCCESS(rc))
7553 {
7554 /* Save old name only if we may need to change it back. */
7555 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
7556 }
7557 }
7558
7559 /* Update pImage with the new information. */
7560 pImage->pszFilename = pszFilename;
7561
7562 /* Open the new image. */
7563 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7564 }
7565 }
7566 }
7567
7568 return rc;
7569}
7570
7571/** @copydoc VDIMAGEBACKEND::pfnRename */
7572static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
7573{
7574 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
7575
7576 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7577 VMDKRENAMESTATE RenameState;
7578
7579 memset(&RenameState, 0, sizeof(RenameState));
7580
7581 /* Check arguments. */
7582 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
7583 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7584 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7585 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
7586
7587 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
7588 if (RT_SUCCESS(rc))
7589 {
7590 /* --- Up to this point we have not done any damage yet. --- */
7591
7592 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
7593 /* Roll back all changes in case of failure. */
7594 if (RT_FAILURE(rc))
7595 {
7596 int rrc = vmdkRenameRollback(pImage, &RenameState);
7597 AssertRC(rrc);
7598 }
7599 }
7600
7601 vmdkRenameStateDestroy(&RenameState);
7602 LogFlowFunc(("returns %Rrc\n", rc));
7603 return rc;
7604}
7605
7606/** @copydoc VDIMAGEBACKEND::pfnClose */
7607static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
7608{
7609 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
7610 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7611
7612 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
7613 RTMemFree(pImage);
7614
7615 LogFlowFunc(("returns %Rrc\n", rc));
7616 return rc;
7617}
7618
7619/** @copydoc VDIMAGEBACKEND::pfnRead */
7620static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
7621 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7622{
7623 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7624 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
7625 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7626
7627 AssertPtr(pImage);
7628 Assert(uOffset % 512 == 0);
7629 Assert(cbToRead % 512 == 0);
7630 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7631 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
7632 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
7633
7634 /* Find the extent and check access permissions as defined in the extent descriptor. */
7635 PVMDKEXTENT pExtent;
7636 uint64_t uSectorExtentRel;
7637 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7638 &pExtent, &uSectorExtentRel);
7639 if ( RT_SUCCESS(rc)
7640 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7641 {
7642 /* Clip read range to remain in this extent. */
7643 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7644
7645 /* Handle the read according to the current extent type. */
7646 switch (pExtent->enmType)
7647 {
7648 case VMDKETYPE_HOSTED_SPARSE:
7649 {
7650 uint64_t uSectorExtentAbs;
7651
7652 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7653 if (RT_FAILURE(rc))
7654 break;
7655 /* Clip read range to at most the rest of the grain. */
7656 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7657 Assert(!(cbToRead % 512));
7658 if (uSectorExtentAbs == 0)
7659 {
7660 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7661 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7662 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7663 rc = VERR_VD_BLOCK_FREE;
7664 else
7665 rc = vmdkStreamReadSequential(pImage, pExtent,
7666 uSectorExtentRel,
7667 pIoCtx, cbToRead);
7668 }
7669 else
7670 {
7671 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7672 {
7673 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7674 ("Async I/O is not supported for stream optimized VMDK's\n"));
7675
7676 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7677 uSectorExtentAbs -= uSectorInGrain;
7678 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7679 {
7680 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7681 rc = vmdkFileInflateSync(pImage, pExtent,
7682 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7683 pExtent->pvGrain,
7684 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7685 NULL, &uLBA, NULL);
7686 if (RT_FAILURE(rc))
7687 {
7688 pExtent->uGrainSectorAbs = 0;
7689 break;
7690 }
7691 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7692 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7693 Assert(uLBA == uSectorExtentRel);
7694 }
7695 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7696 (uint8_t *)pExtent->pvGrain
7697 + VMDK_SECTOR2BYTE(uSectorInGrain),
7698 cbToRead);
7699 }
7700 else
7701 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7702 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7703 pIoCtx, cbToRead);
7704 }
7705 break;
7706 }
7707 case VMDKETYPE_VMFS:
7708 case VMDKETYPE_FLAT:
7709 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7710 VMDK_SECTOR2BYTE(uSectorExtentRel),
7711 pIoCtx, cbToRead);
7712 break;
7713 case VMDKETYPE_ZERO:
7714 {
7715 size_t cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7716 Assert(cbSet == cbToRead); RT_NOREF(cbSet);
7717 break;
7718 }
7719 }
7720 if (pcbActuallyRead)
7721 *pcbActuallyRead = cbToRead;
7722 }
7723 else if (RT_SUCCESS(rc))
7724 rc = VERR_VD_VMDK_INVALID_STATE;
7725
7726 LogFlowFunc(("returns %Rrc\n", rc));
7727 return rc;
7728}
7729
7730/** @copydoc VDIMAGEBACKEND::pfnWrite */
7731static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7732 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7733 size_t *pcbPostRead, unsigned fWrite)
7734{
7735 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7736 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7737 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7738 int rc;
7739
7740 AssertPtr(pImage);
7741 Assert(uOffset % 512 == 0);
7742 Assert(cbToWrite % 512 == 0);
7743 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7744 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7745
7746 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7747 {
7748 PVMDKEXTENT pExtent;
7749 uint64_t uSectorExtentRel;
7750 uint64_t uSectorExtentAbs;
7751
7752 /* No size check here, will do that later when the extent is located.
7753 * There are sparse images out there which according to the spec are
7754 * invalid, because the total size is not a multiple of the grain size.
7755 * Also for sparse images which are stitched together in odd ways (not at
7756 * grain boundaries, and with the nominal size not being a multiple of the
7757 * grain size), this would prevent writing to the last grain. */
7758
7759 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7760 &pExtent, &uSectorExtentRel);
7761 if (RT_SUCCESS(rc))
7762 {
7763 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7764 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7765 && !pImage->pExtents[0].uAppendPosition
7766 && pExtent->enmAccess != VMDKACCESS_READONLY))
7767 rc = VERR_VD_VMDK_INVALID_STATE;
7768 else
7769 {
7770 /* Handle the write according to the current extent type. */
7771 switch (pExtent->enmType)
7772 {
7773 case VMDKETYPE_HOSTED_SPARSE:
7774 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7775 if (RT_SUCCESS(rc))
7776 {
7777 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7778 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7779 rc = VERR_VD_VMDK_INVALID_WRITE;
7780 else
7781 {
7782 /* Clip write range to at most the rest of the grain. */
7783 cbToWrite = RT_MIN(cbToWrite,
7784 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7785 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7786 if (uSectorExtentAbs == 0)
7787 {
7788 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7789 {
7790 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7791 {
7792 /* Full block write to a previously unallocated block.
7793 * Check if the caller wants to avoid the automatic alloc. */
7794 if (!(fWrite & VD_WRITE_NO_ALLOC))
7795 {
7796 /* Allocate GT and find out where to store the grain. */
7797 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7798 uSectorExtentRel, cbToWrite);
7799 }
7800 else
7801 rc = VERR_VD_BLOCK_FREE;
7802 *pcbPreRead = 0;
7803 *pcbPostRead = 0;
7804 }
7805 else
7806 {
7807 /* Clip write range to remain in this extent. */
7808 cbToWrite = RT_MIN(cbToWrite,
7809 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7810 + pExtent->cNominalSectors - uSectorExtentRel));
7811 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7812 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7813 rc = VERR_VD_BLOCK_FREE;
7814 }
7815 }
7816 else
7817 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7818 pIoCtx, cbToWrite);
7819 }
7820 else
7821 {
7822 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7823 {
7824 /* A partial write to a streamOptimized image is simply
7825 * invalid. It requires rewriting already compressed data
7826 * which is somewhere between expensive and impossible. */
7827 rc = VERR_VD_VMDK_INVALID_STATE;
7828 pExtent->uGrainSectorAbs = 0;
7829 AssertRC(rc);
7830 }
7831 else
7832 {
7833 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7834 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7835 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7836 pIoCtx, cbToWrite, NULL, NULL);
7837 }
7838 }
7839 }
7840 }
7841 break;
7842 case VMDKETYPE_VMFS:
7843 case VMDKETYPE_FLAT:
7844 /* Clip write range to remain in this extent. */
7845 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7846 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7847 VMDK_SECTOR2BYTE(uSectorExtentRel),
7848 pIoCtx, cbToWrite, NULL, NULL);
7849 break;
7850 case VMDKETYPE_ZERO:
7851 /* Clip write range to remain in this extent. */
7852 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7853 break;
7854 }
7855 }
7856
7857 if (pcbWriteProcess)
7858 *pcbWriteProcess = cbToWrite;
7859 }
7860 }
7861 else
7862 rc = VERR_VD_IMAGE_READ_ONLY;
7863
7864 LogFlowFunc(("returns %Rrc\n", rc));
7865 return rc;
7866}
7867
7868/** @copydoc VDIMAGEBACKEND::pfnFlush */
7869static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7870{
7871 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7872
7873 return vmdkFlushImage(pImage, pIoCtx);
7874}
7875
7876/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7877static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7878{
7879 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7880 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7881
7882 AssertPtrReturn(pImage, 0);
7883
7884 return VMDK_IMAGE_VERSION;
7885}
7886
7887/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7888static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7889{
7890 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7891 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7892 uint64_t cb = 0;
7893
7894 AssertPtrReturn(pImage, 0);
7895
7896 if (pImage->pFile != NULL)
7897 {
7898 uint64_t cbFile;
7899 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7900 if (RT_SUCCESS(rc))
7901 cb += cbFile;
7902 }
7903 for (unsigned i = 0; i < pImage->cExtents; i++)
7904 {
7905 if (pImage->pExtents[i].pFile != NULL)
7906 {
7907 uint64_t cbFile;
7908 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7909 if (RT_SUCCESS(rc))
7910 cb += cbFile;
7911 }
7912 }
7913
7914 LogFlowFunc(("returns %lld\n", cb));
7915 return cb;
7916}
7917
7918/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7919static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7920{
7921 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7922 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7923 int rc = VINF_SUCCESS;
7924
7925 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7926
7927 if (pImage->PCHSGeometry.cCylinders)
7928 *pPCHSGeometry = pImage->PCHSGeometry;
7929 else
7930 rc = VERR_VD_GEOMETRY_NOT_SET;
7931
7932 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7933 return rc;
7934}
7935
7936/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7937static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7938{
7939 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7940 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7941 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7942 int rc = VINF_SUCCESS;
7943
7944 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7945
7946 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7947 {
7948 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7949 {
7950 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7951 if (RT_SUCCESS(rc))
7952 pImage->PCHSGeometry = *pPCHSGeometry;
7953 }
7954 else
7955 rc = VERR_NOT_SUPPORTED;
7956 }
7957 else
7958 rc = VERR_VD_IMAGE_READ_ONLY;
7959
7960 LogFlowFunc(("returns %Rrc\n", rc));
7961 return rc;
7962}
7963
7964/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7965static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7966{
7967 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7968 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7969 int rc = VINF_SUCCESS;
7970
7971 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7972
7973 if (pImage->LCHSGeometry.cCylinders)
7974 *pLCHSGeometry = pImage->LCHSGeometry;
7975 else
7976 rc = VERR_VD_GEOMETRY_NOT_SET;
7977
7978 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7979 return rc;
7980}
7981
7982/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7983static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7984{
7985 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7986 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7987 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7988 int rc = VINF_SUCCESS;
7989
7990 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7991
7992 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7993 {
7994 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7995 {
7996 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7997 if (RT_SUCCESS(rc))
7998 pImage->LCHSGeometry = *pLCHSGeometry;
7999 }
8000 else
8001 rc = VERR_NOT_SUPPORTED;
8002 }
8003 else
8004 rc = VERR_VD_IMAGE_READ_ONLY;
8005
8006 LogFlowFunc(("returns %Rrc\n", rc));
8007 return rc;
8008}
8009
8010/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
8011static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
8012{
8013 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
8014 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8015
8016 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
8017
8018 *ppRegionList = &pThis->RegionList;
8019 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
8020 return VINF_SUCCESS;
8021}
8022
8023/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
8024static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
8025{
8026 RT_NOREF1(pRegionList);
8027 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
8028 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8029 AssertPtr(pThis); RT_NOREF(pThis);
8030
8031 /* Nothing to do here. */
8032}
8033
8034/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
8035static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
8036{
8037 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8038 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8039
8040 AssertPtrReturn(pImage, 0);
8041
8042 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
8043 return pImage->uImageFlags;
8044}
8045
8046/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
8047static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
8048{
8049 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8050 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8051
8052 AssertPtrReturn(pImage, 0);
8053
8054 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
8055 return pImage->uOpenFlags;
8056}
8057
8058/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
8059static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
8060{
8061 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
8062 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8063 int rc;
8064
8065 /* Image must be opened and the new flags must be valid. */
8066 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
8067 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
8068 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
8069 rc = VERR_INVALID_PARAMETER;
8070 else
8071 {
8072 /* StreamOptimized images need special treatment: reopen is prohibited. */
8073 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
8074 {
8075 if (pImage->uOpenFlags == uOpenFlags)
8076 rc = VINF_SUCCESS;
8077 else
8078 rc = VERR_INVALID_PARAMETER;
8079 }
8080 else
8081 {
8082 /* Implement this operation via reopening the image. */
8083 vmdkFreeImage(pImage, false, true /*fFlush*/);
8084 rc = vmdkOpenImage(pImage, uOpenFlags);
8085 }
8086 }
8087
8088 LogFlowFunc(("returns %Rrc\n", rc));
8089 return rc;
8090}
8091
8092/** @copydoc VDIMAGEBACKEND::pfnGetComment */
8093static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
8094{
8095 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
8096 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8097
8098 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8099
8100 char *pszCommentEncoded = NULL;
8101 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
8102 "ddb.comment", &pszCommentEncoded);
8103 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
8104 {
8105 pszCommentEncoded = NULL;
8106 rc = VINF_SUCCESS;
8107 }
8108
8109 if (RT_SUCCESS(rc))
8110 {
8111 if (pszComment && pszCommentEncoded)
8112 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
8113 else if (pszComment)
8114 *pszComment = '\0';
8115
8116 if (pszCommentEncoded)
8117 RTMemTmpFree(pszCommentEncoded);
8118 }
8119
8120 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
8121 return rc;
8122}
8123
8124/** @copydoc VDIMAGEBACKEND::pfnSetComment */
8125static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
8126{
8127 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
8128 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8129 int rc;
8130
8131 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8132
8133 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8134 {
8135 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8136 rc = vmdkSetImageComment(pImage, pszComment);
8137 else
8138 rc = VERR_NOT_SUPPORTED;
8139 }
8140 else
8141 rc = VERR_VD_IMAGE_READ_ONLY;
8142
8143 LogFlowFunc(("returns %Rrc\n", rc));
8144 return rc;
8145}
8146
8147/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
8148static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
8149{
8150 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8151 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8152
8153 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8154
8155 *pUuid = pImage->ImageUuid;
8156
8157 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8158 return VINF_SUCCESS;
8159}
8160
8161/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
8162static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
8163{
8164 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8165 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8166 int rc = VINF_SUCCESS;
8167
8168 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8169
8170 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8171 {
8172 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8173 {
8174 pImage->ImageUuid = *pUuid;
8175 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8176 VMDK_DDB_IMAGE_UUID, pUuid);
8177 if (RT_FAILURE(rc))
8178 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8179 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
8180 }
8181 else
8182 rc = VERR_NOT_SUPPORTED;
8183 }
8184 else
8185 rc = VERR_VD_IMAGE_READ_ONLY;
8186
8187 LogFlowFunc(("returns %Rrc\n", rc));
8188 return rc;
8189}
8190
8191/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
8192static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
8193{
8194 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8195 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8196
8197 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8198
8199 *pUuid = pImage->ModificationUuid;
8200
8201 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8202 return VINF_SUCCESS;
8203}
8204
8205/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
8206static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
8207{
8208 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8209 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8210 int rc = VINF_SUCCESS;
8211
8212 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8213
8214 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8215 {
8216 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8217 {
8218 /* Only touch the modification uuid if it changed. */
8219 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
8220 {
8221 pImage->ModificationUuid = *pUuid;
8222 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8223 VMDK_DDB_MODIFICATION_UUID, pUuid);
8224 if (RT_FAILURE(rc))
8225 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
8226 }
8227 }
8228 else
8229 rc = VERR_NOT_SUPPORTED;
8230 }
8231 else
8232 rc = VERR_VD_IMAGE_READ_ONLY;
8233
8234 LogFlowFunc(("returns %Rrc\n", rc));
8235 return rc;
8236}
8237
8238/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
8239static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
8240{
8241 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8242 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8243
8244 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8245
8246 *pUuid = pImage->ParentUuid;
8247
8248 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8249 return VINF_SUCCESS;
8250}
8251
8252/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
8253static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
8254{
8255 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8256 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8257 int rc = VINF_SUCCESS;
8258
8259 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8260
8261 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8262 {
8263 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8264 {
8265 pImage->ParentUuid = *pUuid;
8266 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8267 VMDK_DDB_PARENT_UUID, pUuid);
8268 if (RT_FAILURE(rc))
8269 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8270 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8271 }
8272 else
8273 rc = VERR_NOT_SUPPORTED;
8274 }
8275 else
8276 rc = VERR_VD_IMAGE_READ_ONLY;
8277
8278 LogFlowFunc(("returns %Rrc\n", rc));
8279 return rc;
8280}
8281
8282/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
8283static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
8284{
8285 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8286 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8287
8288 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8289
8290 *pUuid = pImage->ParentModificationUuid;
8291
8292 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8293 return VINF_SUCCESS;
8294}
8295
8296/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
8297static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
8298{
8299 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8300 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8301 int rc = VINF_SUCCESS;
8302
8303 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8304
8305 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8306 {
8307 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8308 {
8309 pImage->ParentModificationUuid = *pUuid;
8310 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8311 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
8312 if (RT_FAILURE(rc))
8313 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8314 }
8315 else
8316 rc = VERR_NOT_SUPPORTED;
8317 }
8318 else
8319 rc = VERR_VD_IMAGE_READ_ONLY;
8320
8321 LogFlowFunc(("returns %Rrc\n", rc));
8322 return rc;
8323}
8324
8325/** @copydoc VDIMAGEBACKEND::pfnDump */
8326static DECLCALLBACK(void) vmdkDump(void *pBackendData)
8327{
8328 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8329
8330 AssertPtrReturnVoid(pImage);
8331 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
8332 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
8333 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
8334 VMDK_BYTE2SECTOR(pImage->cbSize));
8335 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
8336 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
8337 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
8338 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
8339}
8340
8341
8342/**
8343 * Returns the size, in bytes, of the sparse extent overhead for
8344 * the number of desired total sectors and based on the current
8345 * sectors of the extent.
8346 *
8347 * @returns uint64_t size of new overhead in bytes.
8348 * @param pExtent VMDK extent instance.
8349 * @param cSectorsNew Number of desired total sectors.
8350 */
8351static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew)
8352{
8353 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8354 if (cSectorsNew % pExtent->cSectorsPerGDE)
8355 cNewDirEntries++;
8356
8357 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8358 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8359 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8360 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector
8361 + pExtent->cDescriptorSectors, 1)
8362 + cbNewDirSize + cbNewAllTablesSize, 512);
8363 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize;
8364 cbNewOverhead = RT_ALIGN_64(cbNewOverhead,
8365 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8366
8367 return cbNewOverhead;
8368}
8369
8370/**
8371 * Internal: Replaces the size (in sectors) of an extent in the descriptor file.
8372 *
8373 * @returns VBox status code.
8374 * @param pImage VMDK image instance.
8375 * @param pExtent VMDK extent instance.
8376 * @param uLine Line number of descriptor to change.
8377 * @param cSectorsOld Existing number of sectors.
8378 * @param cSectorsNew New number of sectors.
8379 */
8380static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld,
8381 uint64_t cSectorsNew)
8382{
8383 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE];
8384 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE];
8385
8386 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld);
8387 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors))
8388 return VERR_BUFFER_OVERFLOW;
8389
8390 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew);
8391 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors))
8392 return VERR_BUFFER_OVERFLOW;
8393
8394 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine],
8395 szOldExtentSectors,
8396 szNewExtentSectors);
8397
8398 if (RT_UNLIKELY(!pszNewExtentLine))
8399 return VERR_INVALID_PARAMETER;
8400
8401 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine);
8402 vmdkDescExtInsert(pImage, &pImage->Descriptor,
8403 pExtent->enmAccess, cSectorsNew,
8404 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset);
8405
8406 RTStrFree(pszNewExtentLine);
8407 pszNewExtentLine = NULL;
8408
8409 pImage->Descriptor.fDirty = true;
8410
8411 return VINF_SUCCESS;
8412}
8413
8414/**
8415 * Moves sectors down to make room for new overhead.
8416 * Used for sparse extent resize.
8417 *
8418 * @returns VBox status code.
8419 * @param pImage VMDK image instance.
8420 * @param pExtent VMDK extent instance.
8421 * @param cSectorsNew Number of sectors after resize.
8422 */
8423static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8424 uint64_t cSectorsNew)
8425{
8426 int rc = VINF_SUCCESS;
8427
8428 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8429
8430 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8431 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8432
8433 uint64_t cbFile = 0;
8434 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
8435
8436 uint64_t uNewAppendPosition;
8437
8438 /* Calculate how many sectors need to be relocated. */
8439 unsigned cSectorsReloc = cOverheadSectorDiff;
8440 if (cbNewOverhead % VMDK_SECTOR_SIZE)
8441 cSectorsReloc++;
8442
8443 if (cSectorsReloc < pExtent->cSectors)
8444 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512);
8445 else
8446 uNewAppendPosition = cbFile;
8447
8448 /*
8449 * Get the blocks we need to relocate first, they are appended to the end
8450 * of the image.
8451 */
8452 void *pvBuf = NULL, *pvZero = NULL;
8453 do
8454 {
8455 /* Allocate data buffer. */
8456 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8457 if (!pvBuf)
8458 {
8459 rc = VERR_NO_MEMORY;
8460 break;
8461 }
8462
8463 /* Allocate buffer for overwriting with zeroes. */
8464 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8465 if (!pvZero)
8466 {
8467 RTMemFree(pvBuf);
8468 pvBuf = NULL;
8469
8470 rc = VERR_NO_MEMORY;
8471 break;
8472 }
8473
8474 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8475 if(!aGTDataTmp)
8476 {
8477 RTMemFree(pvBuf);
8478 pvBuf = NULL;
8479
8480 RTMemFree(pvZero);
8481 pvZero = NULL;
8482
8483 rc = VERR_NO_MEMORY;
8484 break;
8485 }
8486
8487 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8488 if(!aRGTDataTmp)
8489 {
8490 RTMemFree(pvBuf);
8491 pvBuf = NULL;
8492
8493 RTMemFree(pvZero);
8494 pvZero = NULL;
8495
8496 RTMemFree(aGTDataTmp);
8497 aGTDataTmp = NULL;
8498
8499 rc = VERR_NO_MEMORY;
8500 break;
8501 }
8502
8503 /* Search for overlap sector in the grain table. */
8504 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++)
8505 {
8506 uint64_t uGTSector = pExtent->pGD[idxGD];
8507 uint64_t uRGTSector = pExtent->pRGD[idxGD];
8508
8509 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8510 VMDK_SECTOR2BYTE(uGTSector),
8511 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8512
8513 if (RT_FAILURE(rc))
8514 break;
8515
8516 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8517 VMDK_SECTOR2BYTE(uRGTSector),
8518 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8519
8520 if (RT_FAILURE(rc))
8521 break;
8522
8523 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++)
8524 {
8525 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]);
8526 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]);
8527
8528 /**
8529 * Check if grain table is valid. If not dump out with an error.
8530 * Shoudln't ever get here (given other checks) but good sanity check.
8531 */
8532 if (aGTEntryLE != aRGTEntryLE)
8533 {
8534 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8535 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname);
8536 break;
8537 }
8538
8539 if (aGTEntryLE < cNewOverheadSectors
8540 && aGTEntryLE != 0)
8541 {
8542 /* Read data and append grain to the end of the image. */
8543 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8544 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf,
8545 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8546 if (RT_FAILURE(rc))
8547 break;
8548
8549 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8550 uNewAppendPosition, pvBuf,
8551 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8552 if (RT_FAILURE(rc))
8553 break;
8554
8555 /* Zero out the old block area. */
8556 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8557 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero,
8558 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8559 if (RT_FAILURE(rc))
8560 break;
8561
8562 /* Write updated grain tables to file */
8563 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8564 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8565
8566 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries))
8567 {
8568 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8569 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
8570 break;
8571 }
8572
8573 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8574 VMDK_SECTOR2BYTE(uGTSector),
8575 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8576
8577 if (RT_FAILURE(rc))
8578 break;
8579
8580 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8581 VMDK_SECTOR2BYTE(uRGTSector),
8582 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8583
8584 break;
8585 }
8586 }
8587 }
8588
8589 RTMemFree(aGTDataTmp);
8590 aGTDataTmp = NULL;
8591
8592 RTMemFree(aRGTDataTmp);
8593 aRGTDataTmp = NULL;
8594
8595 if (RT_FAILURE(rc))
8596 break;
8597
8598 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain);
8599 } while (0);
8600
8601 if (pvBuf)
8602 {
8603 RTMemFree(pvBuf);
8604 pvBuf = NULL;
8605 }
8606
8607 if (pvZero)
8608 {
8609 RTMemFree(pvZero);
8610 pvZero = NULL;
8611 }
8612
8613 // Update append position for extent
8614 pExtent->uAppendPosition = uNewAppendPosition;
8615
8616 return rc;
8617}
8618
8619/**
8620 * Resizes meta/overhead for sparse extent resize.
8621 *
8622 * @returns VBox status code.
8623 * @param pImage VMDK image instance.
8624 * @param pExtent VMDK extent instance.
8625 * @param cSectorsNew Number of sectors after resize.
8626 */
8627static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8628 uint64_t cSectorsNew)
8629{
8630 uint32_t cOldGDEntries = pExtent->cGDEntries;
8631
8632 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8633 if (cSectorsNew % pExtent->cSectorsPerGDE)
8634 cNewDirEntries++;
8635
8636 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8637
8638 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8639 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512);
8640 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize);
8641
8642 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8643 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512);
8644 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize);
8645
8646 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8647 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8648 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8649
8650 /*
8651 * Get the blocks we need to relocate first, they are appended to the end
8652 * of the image.
8653 */
8654 void *pvBuf = NULL;
8655 AssertCompile(sizeof(g_abRTZero4K) >= VMDK_GRAIN_TABLE_SIZE);
8656
8657 /* Allocate data buffer. */
8658 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8659 if (!pvBuf)
8660 return VERR_NO_MEMORY;
8661
8662 /** @todo r=aeichner Carefully review the error handling logic, we mustn't leave the image in an inconsistent
8663 * state if something fails, apart from hard underlying I/O errors of course.
8664 * Memory allocation failures should not cause any corruptions.
8665 */
8666 int rc;
8667 do
8668 {
8669 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8670
8671 // points to last element in the grain table
8672 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8673 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512);
8674
8675 /** @todo r=aeichner An error here doesn't make the whole operation fail, it just breaks out of the loop. */
8676 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8677 {
8678 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8679 uGTTail, pvBuf,
8680 VMDK_GRAIN_TABLE_SIZE);
8681 if (RT_FAILURE(rc))
8682 break;
8683
8684 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8685 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf,
8686 VMDK_GRAIN_TABLE_SIZE);
8687 if (RT_FAILURE(rc))
8688 break;
8689
8690 // This overshoots when i == 0, but we don't need it anymore.
8691 uGTTail -= VMDK_GRAIN_TABLE_SIZE;
8692 }
8693
8694
8695 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */
8696 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8697 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf,
8698 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8699 if (RT_FAILURE(rc))
8700 break;
8701
8702 int * tmpBuf = (int *)pvBuf;
8703
8704 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8705 {
8706 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff);
8707 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff);
8708 }
8709
8710 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8711 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf,
8712 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8713 if (RT_FAILURE(rc))
8714 break;
8715
8716 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff;
8717
8718 /* Repeat both steps with the redundant grain table/directory. */
8719
8720 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8721
8722 // points to last element in the grain table
8723 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8724 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512);
8725
8726 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8727 {
8728 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8729 uRGTTail, pvBuf,
8730 VMDK_GRAIN_TABLE_SIZE);
8731 if (RT_FAILURE(rc))
8732 break;
8733
8734 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8735 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf,
8736 VMDK_GRAIN_TABLE_SIZE);
8737 if (RT_FAILURE(rc))
8738 break;
8739
8740 // This overshoots when i == 0, but we don't need it anymore.
8741 uRGTTail -= VMDK_GRAIN_TABLE_SIZE;
8742 }
8743
8744 /* Update locations of GT entries. */
8745 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8746 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8747 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8748 if (RT_FAILURE(rc))
8749 break;
8750
8751 tmpBuf = (int *)pvBuf;
8752
8753 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8754 {
8755 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff;
8756 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff;
8757 }
8758
8759 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8760 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8761 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8762 if (RT_FAILURE(rc))
8763 break;
8764
8765 pExtent->uSectorRGD = pExtent->uSectorRGD;
8766 pExtent->cOverheadSectors += cOverheadSectorDiff;
8767
8768 } while (0);
8769
8770 RTMemFree(pvBuf);
8771 pvBuf = NULL;
8772
8773 pExtent->cGDEntries = cNewDirEntries;
8774
8775 // Allocate additional grain dir
8776 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8777 if (RT_LIKELY(pExtent->pGD))
8778 {
8779 if (pExtent->uSectorRGD)
8780 {
8781 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8782 if (RT_UNLIKELY(!pExtent->pRGD))
8783 return VERR_NO_MEMORY;
8784 }
8785 }
8786 else
8787 return VERR_NO_MEMORY;
8788
8789
8790 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8791 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8792 {
8793 pExtent->pGD[i] = uTmpDirVal;
8794
8795 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8796 VMDK_SECTOR2BYTE(uTmpDirVal), &g_abRTZero4K[0],
8797 VMDK_GRAIN_TABLE_SIZE);
8798
8799 if (RT_FAILURE(rc))
8800 return rc;
8801
8802 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8803 }
8804
8805 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8806 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8807 {
8808 pExtent->pRGD[i] = uRTmpDirVal;
8809
8810 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8811 VMDK_SECTOR2BYTE(uRTmpDirVal), &g_abRTZero4K[0],
8812 VMDK_GRAIN_TABLE_SIZE);
8813
8814 if (RT_FAILURE(rc))
8815 return rc;
8816
8817 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8818 }
8819
8820 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8821 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD,
8822 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8823 if (RT_FAILURE(rc))
8824 return rc;
8825
8826 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8827 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD,
8828 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8829 if (RT_FAILURE(rc))
8830 return rc;
8831
8832 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent,
8833 pExtent->cNominalSectors, cSectorsNew);
8834 if (RT_FAILURE(rc))
8835 return rc;
8836
8837 return rc;
8838}
8839
8840/** @copydoc VDIMAGEBACKEND::pfnResize */
8841static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
8842 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
8843 unsigned uPercentStart, unsigned uPercentSpan,
8844 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
8845 PVDINTERFACE pVDIfsOperation)
8846{
8847 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
8848
8849 // Establish variables and objects needed
8850 int rc = VINF_SUCCESS;
8851 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8852 unsigned uImageFlags = pImage->uImageFlags;
8853 PVMDKEXTENT pExtent = &pImage->pExtents[0];
8854 pExtent->fMetaDirty = true;
8855
8856 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
8857 if (cbSize % VMDK_SECTOR_SIZE)
8858 cSectorsNew++;
8859
8860 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
8861 if (pImage->cbSize % VMDK_SECTOR_SIZE)
8862 cSectorsOld++;
8863 unsigned cExtents = pImage->cExtents;
8864
8865 /* Check size is within min/max bounds. */
8866 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
8867 && ( !cbSize
8868 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
8869 return VERR_VD_INVALID_SIZE;
8870
8871 /*
8872 * Making the image smaller is not supported at the moment.
8873 */
8874 /** @todo implement making the image smaller, it is the responsibility of
8875 * the user to know what they're doing. */
8876 if (cbSize < pImage->cbSize)
8877 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
8878 else if (cbSize > pImage->cbSize)
8879 {
8880 /**
8881 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
8882 */
8883 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8884 {
8885 /** Required space in bytes for the extent after the resize. */
8886 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
8887 pExtent = &pImage->pExtents[0];
8888
8889 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
8890 0 /* fFlags */, NULL,
8891 uPercentStart, uPercentSpan);
8892 if (RT_FAILURE(rc))
8893 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8894
8895 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
8896 if (RT_FAILURE(rc))
8897 return rc;
8898 }
8899
8900 /**
8901 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
8902 */
8903 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8904 {
8905 /* Check to see how much space remains in last extent */
8906 bool fSpaceAvailible = false;
8907 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8908 if (cLastExtentRemSectors)
8909 fSpaceAvailible = true;
8910
8911 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
8912
8913 /** Space remaining in current last extent file that we don't need to create another one. */
8914 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
8915 {
8916 pExtent = &pImage->pExtents[cExtents - 1];
8917 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
8918 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
8919 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
8920 if (RT_FAILURE(rc))
8921 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8922
8923 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8924 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
8925 if (RT_FAILURE(rc))
8926 return rc;
8927 }
8928 //** Need more extent files to handle all the requested space. */
8929 else
8930 {
8931 if (fSpaceAvailible)
8932 {
8933 pExtent = &pImage->pExtents[cExtents - 1];
8934 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
8935 0 /* fFlags */, NULL,
8936 uPercentStart, uPercentSpan);
8937 if (RT_FAILURE(rc))
8938 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8939
8940 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
8941
8942 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8943 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
8944 if (RT_FAILURE(rc))
8945 return rc;
8946 }
8947
8948 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
8949 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
8950 cNewExtents++;
8951
8952 for (unsigned i = cExtents;
8953 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8954 i++)
8955 {
8956 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
8957 if (RT_FAILURE(rc))
8958 return rc;
8959
8960 pExtent = &pImage->pExtents[i];
8961
8962 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8963 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8964 }
8965
8966 if (cSectorsNeeded)
8967 {
8968 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
8969 if (RT_FAILURE(rc))
8970 return rc;
8971 }
8972 }
8973 }
8974
8975 /**
8976 * monolithicSparse.
8977 */
8978 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8979 {
8980 // 1. Calculate sectors needed for new overhead.
8981
8982 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8983 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8984 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8985
8986 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT
8987 if (cOverheadSectorDiff > 0)
8988 {
8989 if (pExtent->cSectors > 0)
8990 {
8991 /* Do the relocation. */
8992 LogFlow(("Relocating VMDK sectors\n"));
8993 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew);
8994 if (RT_FAILURE(rc))
8995 return rc;
8996
8997 rc = vmdkFlushImage(pImage, NULL);
8998 if (RT_FAILURE(rc))
8999 return rc;
9000 }
9001
9002 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew);
9003 if (RT_FAILURE(rc))
9004 return rc;
9005 }
9006 }
9007
9008 /**
9009 * twoGbSparseExtent
9010 */
9011 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
9012 {
9013 /* Check to see how much space remains in last extent */
9014 bool fSpaceAvailible = false;
9015 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9016 if (cLastExtentRemSectors)
9017 fSpaceAvailible = true;
9018
9019 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
9020
9021 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
9022 {
9023 pExtent = &pImage->pExtents[cExtents - 1];
9024 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9025 if (RT_FAILURE(rc))
9026 return rc;
9027
9028 rc = vmdkFlushImage(pImage, NULL);
9029 if (RT_FAILURE(rc))
9030 return rc;
9031
9032 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9033 if (RT_FAILURE(rc))
9034 return rc;
9035 }
9036 else
9037 {
9038 if (fSpaceAvailible)
9039 {
9040 pExtent = &pImage->pExtents[cExtents - 1];
9041 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9042 if (RT_FAILURE(rc))
9043 return rc;
9044
9045 rc = vmdkFlushImage(pImage, NULL);
9046 if (RT_FAILURE(rc))
9047 return rc;
9048
9049 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9050 if (RT_FAILURE(rc))
9051 return rc;
9052
9053 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
9054 }
9055
9056 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
9057 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
9058 cNewExtents++;
9059
9060 for (unsigned i = cExtents;
9061 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9062 i++)
9063 {
9064 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
9065 if (RT_FAILURE(rc))
9066 return rc;
9067
9068 pExtent = &pImage->pExtents[i];
9069
9070 rc = vmdkFlushImage(pImage, NULL);
9071 if (RT_FAILURE(rc))
9072 return rc;
9073
9074 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9075 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9076 }
9077
9078 if (cSectorsNeeded)
9079 {
9080 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
9081 if (RT_FAILURE(rc))
9082 return rc;
9083
9084 pExtent = &pImage->pExtents[pImage->cExtents];
9085
9086 rc = vmdkFlushImage(pImage, NULL);
9087 if (RT_FAILURE(rc))
9088 return rc;
9089 }
9090 }
9091 }
9092
9093 /* Successful resize. Update metadata */
9094 if (RT_SUCCESS(rc))
9095 {
9096 /* Update size and new block count. */
9097 pImage->cbSize = cbSize;
9098 pExtent->cNominalSectors = cSectorsNew;
9099 pExtent->cSectors = cSectorsNew;
9100
9101 /* Update geometry. */
9102 pImage->PCHSGeometry = *pPCHSGeometry;
9103 pImage->LCHSGeometry = *pLCHSGeometry;
9104 }
9105
9106 /* Update header information in base image file. */
9107 pImage->Descriptor.fDirty = true;
9108 rc = vmdkWriteDescriptor(pImage, NULL);
9109
9110 if (RT_SUCCESS(rc))
9111 rc = vmdkFlushImage(pImage, NULL);
9112 }
9113 /* Same size doesn't change the image at all. */
9114
9115 LogFlowFunc(("returns %Rrc\n", rc));
9116 return rc;
9117}
9118
9119const VDIMAGEBACKEND g_VmdkBackend =
9120{
9121 /* u32Version */
9122 VD_IMGBACKEND_VERSION,
9123 /* pszBackendName */
9124 "VMDK",
9125 /* uBackendCaps */
9126 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
9127 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
9128 | VD_CAP_VFS | VD_CAP_PREFERRED,
9129 /* paFileExtensions */
9130 s_aVmdkFileExtensions,
9131 /* paConfigInfo */
9132 s_aVmdkConfigInfo,
9133 /* pfnProbe */
9134 vmdkProbe,
9135 /* pfnOpen */
9136 vmdkOpen,
9137 /* pfnCreate */
9138 vmdkCreate,
9139 /* pfnRename */
9140 vmdkRename,
9141 /* pfnClose */
9142 vmdkClose,
9143 /* pfnRead */
9144 vmdkRead,
9145 /* pfnWrite */
9146 vmdkWrite,
9147 /* pfnFlush */
9148 vmdkFlush,
9149 /* pfnDiscard */
9150 NULL,
9151 /* pfnGetVersion */
9152 vmdkGetVersion,
9153 /* pfnGetFileSize */
9154 vmdkGetFileSize,
9155 /* pfnGetPCHSGeometry */
9156 vmdkGetPCHSGeometry,
9157 /* pfnSetPCHSGeometry */
9158 vmdkSetPCHSGeometry,
9159 /* pfnGetLCHSGeometry */
9160 vmdkGetLCHSGeometry,
9161 /* pfnSetLCHSGeometry */
9162 vmdkSetLCHSGeometry,
9163 /* pfnQueryRegions */
9164 vmdkQueryRegions,
9165 /* pfnRegionListRelease */
9166 vmdkRegionListRelease,
9167 /* pfnGetImageFlags */
9168 vmdkGetImageFlags,
9169 /* pfnGetOpenFlags */
9170 vmdkGetOpenFlags,
9171 /* pfnSetOpenFlags */
9172 vmdkSetOpenFlags,
9173 /* pfnGetComment */
9174 vmdkGetComment,
9175 /* pfnSetComment */
9176 vmdkSetComment,
9177 /* pfnGetUuid */
9178 vmdkGetUuid,
9179 /* pfnSetUuid */
9180 vmdkSetUuid,
9181 /* pfnGetModificationUuid */
9182 vmdkGetModificationUuid,
9183 /* pfnSetModificationUuid */
9184 vmdkSetModificationUuid,
9185 /* pfnGetParentUuid */
9186 vmdkGetParentUuid,
9187 /* pfnSetParentUuid */
9188 vmdkSetParentUuid,
9189 /* pfnGetParentModificationUuid */
9190 vmdkGetParentModificationUuid,
9191 /* pfnSetParentModificationUuid */
9192 vmdkSetParentModificationUuid,
9193 /* pfnDump */
9194 vmdkDump,
9195 /* pfnGetTimestamp */
9196 NULL,
9197 /* pfnGetParentTimestamp */
9198 NULL,
9199 /* pfnSetParentTimestamp */
9200 NULL,
9201 /* pfnGetParentFilename */
9202 NULL,
9203 /* pfnSetParentFilename */
9204 NULL,
9205 /* pfnComposeLocation */
9206 genericFileComposeLocation,
9207 /* pfnComposeName */
9208 genericFileComposeName,
9209 /* pfnCompact */
9210 NULL,
9211 /* pfnResize */
9212 vmdkResize,
9213 /* pfnRepair */
9214 NULL,
9215 /* pfnTraverseMetadata */
9216 NULL,
9217 /* u32VersionEnd */
9218 VD_IMGBACKEND_VERSION
9219};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette