VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 18565

Last change on this file since 18565 was 18505, checked in by vboxsync, 16 years ago

VmdkHDDCore.cpp: a bunch of size_t/unsigned mixups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 218.1 KB
Line 
1/* $Id: VmdkHDDCore.cpp 18505 2009-03-29 02:39:11Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VD_VMDK
26#include "VBoxHDD-Internal.h"
27#include <VBox/err.h>
28
29#include <VBox/log.h>
30#include <iprt/assert.h>
31#include <iprt/alloc.h>
32#include <iprt/uuid.h>
33#include <iprt/file.h>
34#include <iprt/path.h>
35#include <iprt/string.h>
36#include <iprt/rand.h>
37#include <iprt/zip.h>
38
39
40/*******************************************************************************
41* Constants And Macros, Structures and Typedefs *
42*******************************************************************************/
43
44/** Maximum encoded string size (including NUL) we allow for VMDK images.
45 * Deliberately not set high to avoid running out of descriptor space. */
46#define VMDK_ENCODED_COMMENT_MAX 1024
47
48/** VMDK descriptor DDB entry for PCHS cylinders. */
49#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
50
51/** VMDK descriptor DDB entry for PCHS heads. */
52#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
53
54/** VMDK descriptor DDB entry for PCHS sectors. */
55#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
56
57/** VMDK descriptor DDB entry for LCHS cylinders. */
58#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
59
60/** VMDK descriptor DDB entry for LCHS heads. */
61#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
62
63/** VMDK descriptor DDB entry for LCHS sectors. */
64#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
65
66/** VMDK descriptor DDB entry for image UUID. */
67#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
68
69/** VMDK descriptor DDB entry for image modification UUID. */
70#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
71
72/** VMDK descriptor DDB entry for parent image UUID. */
73#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
74
75/** VMDK descriptor DDB entry for parent image modification UUID. */
76#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
77
78/** No compression for streamOptimized files. */
79#define VMDK_COMPRESSION_NONE 0
80
81/** Deflate compression for streamOptimized files. */
82#define VMDK_COMPRESSION_DEFLATE 1
83
84/** Marker that the actual GD value is stored in the footer. */
85#define VMDK_GD_AT_END 0xffffffffffffffffULL
86
87/** Marker for end-of-stream in streamOptimized images. */
88#define VMDK_MARKER_EOS 0
89
90/** Marker for grain table block in streamOptimized images. */
91#define VMDK_MARKER_GT 1
92
93/** Marker for grain directory block in streamOptimized images. */
94#define VMDK_MARKER_GD 2
95
96/** Marker for footer in streamOptimized images. */
97#define VMDK_MARKER_FOOTER 3
98
99/** Dummy marker for "don't check the marker value". */
100#define VMDK_MARKER_IGNORE 0xffffffffU
101
102/**
103 * Magic number for hosted images created by VMware Workstation 4, VMware
104 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
105 */
106#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
107
108/**
109 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
110 * this header is also used for monolithic flat images.
111 */
112#pragma pack(1)
113typedef struct SparseExtentHeader
114{
115 uint32_t magicNumber;
116 uint32_t version;
117 uint32_t flags;
118 uint64_t capacity;
119 uint64_t grainSize;
120 uint64_t descriptorOffset;
121 uint64_t descriptorSize;
122 uint32_t numGTEsPerGT;
123 uint64_t rgdOffset;
124 uint64_t gdOffset;
125 uint64_t overHead;
126 bool uncleanShutdown;
127 char singleEndLineChar;
128 char nonEndLineChar;
129 char doubleEndLineChar1;
130 char doubleEndLineChar2;
131 uint16_t compressAlgorithm;
132 uint8_t pad[433];
133} SparseExtentHeader;
134#pragma pack()
135
136/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
137 * divisible by the default grain size (64K) */
138#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
139
140/** VMDK streamOptimized file format marker. The type field may or may not
141 * be actually valid, but there's always data to read there. */
142#pragma pack(1)
143typedef struct VMDKMARKER
144{
145 uint64_t uSector;
146 uint32_t cbSize;
147 uint32_t uType;
148} VMDKMARKER;
149#pragma pack()
150
151
152#ifdef VBOX_WITH_VMDK_ESX
153
154/** @todo the ESX code is not tested, not used, and lacks error messages. */
155
156/**
157 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
158 */
159#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
160
161#pragma pack(1)
162typedef struct COWDisk_Header
163{
164 uint32_t magicNumber;
165 uint32_t version;
166 uint32_t flags;
167 uint32_t numSectors;
168 uint32_t grainSize;
169 uint32_t gdOffset;
170 uint32_t numGDEntries;
171 uint32_t freeSector;
172 /* The spec incompletely documents quite a few further fields, but states
173 * that they are unused by the current format. Replace them by padding. */
174 char reserved1[1604];
175 uint32_t savedGeneration;
176 char reserved2[8];
177 uint32_t uncleanShutdown;
178 char padding[396];
179} COWDisk_Header;
180#pragma pack()
181#endif /* VBOX_WITH_VMDK_ESX */
182
183
184/** Convert sector number/size to byte offset/size. */
185#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
186
187/** Convert byte offset/size to sector number/size. */
188#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
189
190/**
191 * VMDK extent type.
192 */
193typedef enum VMDKETYPE
194{
195 /** Hosted sparse extent. */
196 VMDKETYPE_HOSTED_SPARSE = 1,
197 /** Flat extent. */
198 VMDKETYPE_FLAT,
199 /** Zero extent. */
200 VMDKETYPE_ZERO
201#ifdef VBOX_WITH_VMDK_ESX
202 ,
203 /** ESX sparse extent. */
204 VMDKETYPE_ESX_SPARSE
205#endif /* VBOX_WITH_VMDK_ESX */
206} VMDKETYPE, *PVMDKETYPE;
207
208/**
209 * VMDK access type for a extent.
210 */
211typedef enum VMDKACCESS
212{
213 /** No access allowed. */
214 VMDKACCESS_NOACCESS = 0,
215 /** Read-only access. */
216 VMDKACCESS_READONLY,
217 /** Read-write access. */
218 VMDKACCESS_READWRITE
219} VMDKACCESS, *PVMDKACCESS;
220
221/** Forward declaration for PVMDKIMAGE. */
222typedef struct VMDKIMAGE *PVMDKIMAGE;
223
224/**
225 * Extents files entry. Used for opening a particular file only once.
226 */
227typedef struct VMDKFILE
228{
229 /** Pointer to filename. Local copy. */
230 const char *pszFilename;
231 /** File open flags for consistency checking. */
232 unsigned fOpen;
233 /** File handle. */
234 RTFILE File;
235 /** Handle for asnychronous access if requested.*/
236 void *pStorage;
237 /** Flag whether to use File or pStorage. */
238 bool fAsyncIO;
239 /** Reference counter. */
240 unsigned uReferences;
241 /** Flag whether the file should be deleted on last close. */
242 bool fDelete;
243 /** Pointer to the image we belong to. */
244 PVMDKIMAGE pImage;
245 /** Pointer to next file descriptor. */
246 struct VMDKFILE *pNext;
247 /** Pointer to the previous file descriptor. */
248 struct VMDKFILE *pPrev;
249} VMDKFILE, *PVMDKFILE;
250
251/**
252 * VMDK extent data structure.
253 */
254typedef struct VMDKEXTENT
255{
256 /** File handle. */
257 PVMDKFILE pFile;
258 /** Base name of the image extent. */
259 const char *pszBasename;
260 /** Full name of the image extent. */
261 const char *pszFullname;
262 /** Number of sectors in this extent. */
263 uint64_t cSectors;
264 /** Number of sectors per block (grain in VMDK speak). */
265 uint64_t cSectorsPerGrain;
266 /** Starting sector number of descriptor. */
267 uint64_t uDescriptorSector;
268 /** Size of descriptor in sectors. */
269 uint64_t cDescriptorSectors;
270 /** Starting sector number of grain directory. */
271 uint64_t uSectorGD;
272 /** Starting sector number of redundant grain directory. */
273 uint64_t uSectorRGD;
274 /** Total number of metadata sectors. */
275 uint64_t cOverheadSectors;
276 /** Nominal size (i.e. as described by the descriptor) of this extent. */
277 uint64_t cNominalSectors;
278 /** Sector offset (i.e. as described by the descriptor) of this extent. */
279 uint64_t uSectorOffset;
280 /** Number of entries in a grain table. */
281 uint32_t cGTEntries;
282 /** Number of sectors reachable via a grain directory entry. */
283 uint32_t cSectorsPerGDE;
284 /** Number of entries in the grain directory. */
285 uint32_t cGDEntries;
286 /** Pointer to the next free sector. Legacy information. Do not use. */
287 uint32_t uFreeSector;
288 /** Number of this extent in the list of images. */
289 uint32_t uExtent;
290 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
291 char *pDescData;
292 /** Pointer to the grain directory. */
293 uint32_t *pGD;
294 /** Pointer to the redundant grain directory. */
295 uint32_t *pRGD;
296 /** VMDK version of this extent. 1=1.0/1.1 */
297 uint32_t uVersion;
298 /** Type of this extent. */
299 VMDKETYPE enmType;
300 /** Access to this extent. */
301 VMDKACCESS enmAccess;
302 /** Flag whether this extent is marked as unclean. */
303 bool fUncleanShutdown;
304 /** Flag whether the metadata in the extent header needs to be updated. */
305 bool fMetaDirty;
306 /** Flag whether there is a footer in this extent. */
307 bool fFooter;
308 /** Compression type for this extent. */
309 uint16_t uCompression;
310 /** Last grain which has been written to. Only for streamOptimized extents. */
311 uint32_t uLastGrainWritten;
312 /** Sector number of last grain which has been written to. Only for
313 * streamOptimized extents. */
314 uint32_t uLastGrainSector;
315 /** Data size of last grain which has been written to. Only for
316 * streamOptimized extents. */
317 uint32_t cbLastGrainWritten;
318 /** Starting sector of the decompressed grain buffer. */
319 uint32_t uGrainSector;
320 /** Decompressed grain buffer for streamOptimized extents. */
321 void *pvGrain;
322 /** Reference to the image in which this extent is used. Do not use this
323 * on a regular basis to avoid passing pImage references to functions
324 * explicitly. */
325 struct VMDKIMAGE *pImage;
326} VMDKEXTENT, *PVMDKEXTENT;
327
328/**
329 * Grain table cache size. Allocated per image.
330 */
331#define VMDK_GT_CACHE_SIZE 256
332
333/**
334 * Grain table block size. Smaller than an actual grain table block to allow
335 * more grain table blocks to be cached without having to allocate excessive
336 * amounts of memory for the cache.
337 */
338#define VMDK_GT_CACHELINE_SIZE 128
339
340
341/**
342 * Maximum number of lines in a descriptor file. Not worth the effort of
343 * making it variable. Descriptor files are generally very short (~20 lines).
344 */
345#define VMDK_DESCRIPTOR_LINES_MAX 100U
346
347/**
348 * Parsed descriptor information. Allows easy access and update of the
349 * descriptor (whether separate file or not). Free form text files suck.
350 */
351typedef struct VMDKDESCRIPTOR
352{
353 /** Line number of first entry of the disk descriptor. */
354 unsigned uFirstDesc;
355 /** Line number of first entry in the extent description. */
356 unsigned uFirstExtent;
357 /** Line number of first disk database entry. */
358 unsigned uFirstDDB;
359 /** Total number of lines. */
360 unsigned cLines;
361 /** Total amount of memory available for the descriptor. */
362 size_t cbDescAlloc;
363 /** Set if descriptor has been changed and not yet written to disk. */
364 bool fDirty;
365 /** Array of pointers to the data in the descriptor. */
366 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
367 /** Array of line indices pointing to the next non-comment line. */
368 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
369} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
370
371
372/**
373 * Cache entry for translating extent/sector to a sector number in that
374 * extent.
375 */
376typedef struct VMDKGTCACHEENTRY
377{
378 /** Extent number for which this entry is valid. */
379 uint32_t uExtent;
380 /** GT data block number. */
381 uint64_t uGTBlock;
382 /** Data part of the cache entry. */
383 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
384} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
385
386/**
387 * Cache data structure for blocks of grain table entries. For now this is a
388 * fixed size direct mapping cache, but this should be adapted to the size of
389 * the sparse image and maybe converted to a set-associative cache. The
390 * implementation below implements a write-through cache with write allocate.
391 */
392typedef struct VMDKGTCACHE
393{
394 /** Cache entries. */
395 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
396 /** Number of cache entries (currently unused). */
397 unsigned cEntries;
398} VMDKGTCACHE, *PVMDKGTCACHE;
399
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Pointer to the image extents. */
407 PVMDKEXTENT pExtents;
408 /** Number of image extents. */
409 unsigned cExtents;
410 /** Pointer to the files list, for opening a file referenced multiple
411 * times only once (happens mainly with raw partition access). */
412 PVMDKFILE pFiles;
413
414 /** Base image name. */
415 const char *pszFilename;
416 /** Descriptor file if applicable. */
417 PVMDKFILE pFile;
418
419 /** Pointer to the per-disk VD interface list. */
420 PVDINTERFACE pVDIfsDisk;
421
422 /** Error interface. */
423 PVDINTERFACE pInterfaceError;
424 /** Error interface callbacks. */
425 PVDINTERFACEERROR pInterfaceErrorCallbacks;
426
427 /** Async I/O interface. */
428 PVDINTERFACE pInterfaceAsyncIO;
429 /** Async I/O interface callbacks. */
430 PVDINTERFACEASYNCIO pInterfaceAsyncIOCallbacks;
431 /**
432 * Pointer to an array of task handles for task submission.
433 * This is an optimization because the task number to submit is not known
434 * and allocating/freeing an array in the read/write functions every time
435 * is too expensive.
436 */
437 void **apTask;
438 /** Entries available in the task handle array. */
439 unsigned cTask;
440
441 /** Open flags passed by VBoxHD layer. */
442 unsigned uOpenFlags;
443 /** Image flags defined during creation or determined during open. */
444 unsigned uImageFlags;
445 /** Total size of the image. */
446 uint64_t cbSize;
447 /** Physical geometry of this image. */
448 PDMMEDIAGEOMETRY PCHSGeometry;
449 /** Logical geometry of this image. */
450 PDMMEDIAGEOMETRY LCHSGeometry;
451 /** Image UUID. */
452 RTUUID ImageUuid;
453 /** Image modification UUID. */
454 RTUUID ModificationUuid;
455 /** Parent image UUID. */
456 RTUUID ParentUuid;
457 /** Parent image modification UUID. */
458 RTUUID ParentModificationUuid;
459
460 /** Pointer to grain table cache, if this image contains sparse extents. */
461 PVMDKGTCACHE pGTCache;
462 /** Pointer to the descriptor (NULL if no separate descriptor file). */
463 char *pDescData;
464 /** Allocation size of the descriptor file. */
465 size_t cbDescAlloc;
466 /** Parsed descriptor file content. */
467 VMDKDESCRIPTOR Descriptor;
468} VMDKIMAGE;
469
470
471/** State for the input callout of the inflate reader. */
472typedef struct VMDKINFLATESTATE
473{
474 /* File where the data is stored. */
475 RTFILE File;
476 /* Total size of the data to read. */
477 size_t cbSize;
478 /* Offset in the file to read. */
479 uint64_t uFileOffset;
480 /* Current read position. */
481 ssize_t iOffset;
482} VMDKINFLATESTATE;
483
484/** State for the output callout of the deflate writer. */
485typedef struct VMDKDEFLATESTATE
486{
487 /* File where the data is to be stored. */
488 RTFILE File;
489 /* Offset in the file to write at. */
490 uint64_t uFileOffset;
491 /* Current write position. */
492 ssize_t iOffset;
493} VMDKDEFLATESTATE;
494
495/*******************************************************************************
496 * Static Variables *
497 *******************************************************************************/
498
499/** NULL-terminated array of supported file extensions. */
500static const char *const s_apszVmdkFileExtensions[] =
501{
502 "vmdk",
503 NULL
504};
505
506/*******************************************************************************
507* Internal Functions *
508*******************************************************************************/
509
510static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
511
512static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
513 bool fDelete);
514
515static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
516static int vmdkFlushImage(PVMDKIMAGE pImage);
517static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
518static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
519
520
521/**
522 * Internal: signal an error to the frontend.
523 */
524DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
525 const char *pszFormat, ...)
526{
527 va_list va;
528 va_start(va, pszFormat);
529 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
530 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
531 pszFormat, va);
532 va_end(va);
533 return rc;
534}
535
536/**
537 * Internal: open a file (using a file descriptor cache to ensure each file
538 * is only opened once - anything else can cause locking problems).
539 */
540static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
541 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
542{
543 int rc = VINF_SUCCESS;
544 PVMDKFILE pVmdkFile;
545
546 for (pVmdkFile = pImage->pFiles;
547 pVmdkFile != NULL;
548 pVmdkFile = pVmdkFile->pNext)
549 {
550 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
551 {
552 Assert(fOpen == pVmdkFile->fOpen);
553 pVmdkFile->uReferences++;
554
555 *ppVmdkFile = pVmdkFile;
556
557 return rc;
558 }
559 }
560
561 /* If we get here, there's no matching entry in the cache. */
562 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
563 if (!VALID_PTR(pVmdkFile))
564 {
565 *ppVmdkFile = NULL;
566 return VERR_NO_MEMORY;
567 }
568
569 pVmdkFile->pszFilename = RTStrDup(pszFilename);
570 if (!VALID_PTR(pVmdkFile->pszFilename))
571 {
572 RTMemFree(pVmdkFile);
573 *ppVmdkFile = NULL;
574 return VERR_NO_MEMORY;
575 }
576 pVmdkFile->fOpen = fOpen;
577 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
578 {
579 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
580 pszFilename,
581 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
582 ? true
583 : false,
584 &pVmdkFile->pStorage);
585 pVmdkFile->fAsyncIO = true;
586 }
587 else
588 {
589 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
590 pVmdkFile->fAsyncIO = false;
591 }
592 if (RT_SUCCESS(rc))
593 {
594 pVmdkFile->uReferences = 1;
595 pVmdkFile->pImage = pImage;
596 pVmdkFile->pNext = pImage->pFiles;
597 if (pImage->pFiles)
598 pImage->pFiles->pPrev = pVmdkFile;
599 pImage->pFiles = pVmdkFile;
600 *ppVmdkFile = pVmdkFile;
601 }
602 else
603 {
604 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
605 RTMemFree(pVmdkFile);
606 *ppVmdkFile = NULL;
607 }
608
609 return rc;
610}
611
612/**
613 * Internal: close a file, updating the file descriptor cache.
614 */
615static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
616{
617 int rc = VINF_SUCCESS;
618 PVMDKFILE pVmdkFile = *ppVmdkFile;
619
620 AssertPtr(pVmdkFile);
621
622 pVmdkFile->fDelete |= fDelete;
623 Assert(pVmdkFile->uReferences);
624 pVmdkFile->uReferences--;
625 if (pVmdkFile->uReferences == 0)
626 {
627 PVMDKFILE pPrev;
628 PVMDKFILE pNext;
629
630 /* Unchain the element from the list. */
631 pPrev = pVmdkFile->pPrev;
632 pNext = pVmdkFile->pNext;
633
634 if (pNext)
635 pNext->pPrev = pPrev;
636 if (pPrev)
637 pPrev->pNext = pNext;
638 else
639 pImage->pFiles = pNext;
640
641 if (pVmdkFile->fAsyncIO)
642 {
643 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
644 pVmdkFile->pStorage);
645 }
646 else
647 {
648 rc = RTFileClose(pVmdkFile->File);
649 }
650 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
651 rc = RTFileDelete(pVmdkFile->pszFilename);
652 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
653 RTMemFree(pVmdkFile);
654 }
655
656 *ppVmdkFile = NULL;
657 return rc;
658}
659
660/**
661 * Internal: read from a file distinguishing between async and normal operation
662 */
663DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
664 uint64_t uOffset, void *pvBuf,
665 size_t cbToRead, size_t *pcbRead)
666{
667 PVMDKIMAGE pImage = pVmdkFile->pImage;
668
669 if (pVmdkFile->fAsyncIO)
670 return pImage->pInterfaceAsyncIOCallbacks->pfnRead(pImage->pInterfaceAsyncIO->pvUser,
671 pVmdkFile->pStorage, uOffset,
672 cbToRead, pvBuf, pcbRead);
673 else
674 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
675}
676
677/**
678 * Internal: write to a file distinguishing between async and normal operation
679 */
680DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
681 uint64_t uOffset, const void *pvBuf,
682 size_t cbToWrite, size_t *pcbWritten)
683{
684 PVMDKIMAGE pImage = pVmdkFile->pImage;
685
686 if (pVmdkFile->fAsyncIO)
687 return pImage->pInterfaceAsyncIOCallbacks->pfnWrite(pImage->pInterfaceAsyncIO->pvUser,
688 pVmdkFile->pStorage, uOffset,
689 cbToWrite, pvBuf, pcbWritten);
690 else
691 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
692}
693
694/**
695 * Internal: get the size of a file distinguishing beween async and normal operation
696 */
697DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
698{
699 if (pVmdkFile->fAsyncIO)
700 {
701 AssertMsgFailed(("TODO\n"));
702 return 0;
703 }
704 else
705 return RTFileGetSize(pVmdkFile->File, pcbSize);
706}
707
708/**
709 * Internal: set the size of a file distinguishing beween async and normal operation
710 */
711DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
712{
713 if (pVmdkFile->fAsyncIO)
714 {
715 AssertMsgFailed(("TODO\n"));
716 return VERR_NOT_SUPPORTED;
717 }
718 else
719 return RTFileSetSize(pVmdkFile->File, cbSize);
720}
721
722/**
723 * Internal: flush a file distinguishing between async and normal operation
724 */
725DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
726{
727 PVMDKIMAGE pImage = pVmdkFile->pImage;
728
729 if (pVmdkFile->fAsyncIO)
730 return pImage->pInterfaceAsyncIOCallbacks->pfnFlush(pImage->pInterfaceAsyncIO->pvUser,
731 pVmdkFile->pStorage);
732 else
733 return RTFileFlush(pVmdkFile->File);
734}
735
736
737static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
738{
739 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
740
741 Assert(cbBuf);
742 if (pInflateState->iOffset < 0)
743 {
744 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
745 if (pcbBuf)
746 *pcbBuf = 1;
747 pInflateState->iOffset = 0;
748 return VINF_SUCCESS;
749 }
750 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
751 int rc = RTFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
752 if (RT_FAILURE(rc))
753 return rc;
754 pInflateState->uFileOffset += cbBuf;
755 pInflateState->iOffset += cbBuf;
756 pInflateState->cbSize -= cbBuf;
757 Assert(pcbBuf);
758 *pcbBuf = cbBuf;
759 return VINF_SUCCESS;
760}
761
762/**
763 * Internal: read from a file and inflate the compressed data,
764 * distinguishing between async and normal operation
765 */
766DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
767 uint64_t uOffset, void *pvBuf,
768 size_t cbToRead, unsigned uMarker,
769 uint64_t *puLBA, uint32_t *pcbMarkerData)
770{
771 if (pVmdkFile->fAsyncIO)
772 {
773 AssertMsgFailed(("TODO\n"));
774 return VERR_NOT_SUPPORTED;
775 }
776 else
777 {
778 int rc;
779 PRTZIPDECOMP pZip = NULL;
780 VMDKMARKER Marker;
781 uint64_t uCompOffset, cbComp;
782 VMDKINFLATESTATE InflateState;
783 size_t cbActuallyRead;
784
785 rc = RTFileReadAt(pVmdkFile->File, uOffset, &Marker, sizeof(Marker), NULL);
786 if (RT_FAILURE(rc))
787 return rc;
788 Marker.uSector = RT_LE2H_U64(Marker.uSector);
789 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
790 if ( uMarker != VMDK_MARKER_IGNORE
791 && ( RT_LE2H_U32(Marker.uType) != uMarker
792 || Marker.cbSize != 0))
793 return VERR_VD_VMDK_INVALID_FORMAT;
794 if (Marker.cbSize != 0)
795 {
796 /* Compressed grain marker. Data follows immediately. */
797 uCompOffset = uOffset + 12;
798 cbComp = Marker.cbSize;
799 if (puLBA)
800 *puLBA = Marker.uSector;
801 if (pcbMarkerData)
802 *pcbMarkerData = cbComp + 12;
803 }
804 else
805 {
806 Marker.uType = RT_LE2H_U32(Marker.uType);
807 if (Marker.uType == VMDK_MARKER_EOS)
808 {
809 Assert(uMarker != VMDK_MARKER_EOS);
810 return VERR_VD_VMDK_INVALID_FORMAT;
811 }
812 else if ( Marker.uType == VMDK_MARKER_GT
813 || Marker.uType == VMDK_MARKER_GD
814 || Marker.uType == VMDK_MARKER_FOOTER)
815 {
816 uCompOffset = uOffset + 512;
817 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
818 if (pcbMarkerData)
819 *pcbMarkerData = cbComp + 512;
820 }
821 else
822 {
823 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
824 return VERR_VD_VMDK_INVALID_FORMAT;
825 }
826 }
827 InflateState.File = pVmdkFile->File;
828 InflateState.cbSize = cbComp;
829 InflateState.uFileOffset = uCompOffset;
830 InflateState.iOffset = -1;
831 /* Sanity check - the expansion ratio should be much less than 2. */
832 Assert(cbComp < 2 * cbToRead);
833 if (cbComp >= 2 * cbToRead)
834 return VERR_VD_VMDK_INVALID_FORMAT;
835
836 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
837 if (RT_FAILURE(rc))
838 return rc;
839 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
840 RTZipDecompDestroy(pZip);
841 if (RT_FAILURE(rc))
842 return rc;
843 if (cbActuallyRead != cbToRead)
844 rc = VERR_VD_VMDK_INVALID_FORMAT;
845 return rc;
846 }
847}
848
849static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
850{
851 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
852
853 Assert(cbBuf);
854 if (pDeflateState->iOffset < 0)
855 {
856 pvBuf = (const uint8_t *)pvBuf + 1;
857 cbBuf--;
858 pDeflateState->iOffset = 0;
859 }
860 if (!cbBuf)
861 return VINF_SUCCESS;
862 int rc = RTFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
863 if (RT_FAILURE(rc))
864 return rc;
865 pDeflateState->uFileOffset += cbBuf;
866 pDeflateState->iOffset += cbBuf;
867 return VINF_SUCCESS;
868}
869
870/**
871 * Internal: deflate the uncompressed data and write to a file,
872 * distinguishing between async and normal operation
873 */
874DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
875 uint64_t uOffset, const void *pvBuf,
876 size_t cbToWrite, unsigned uMarker,
877 uint64_t uLBA, uint32_t *pcbMarkerData)
878{
879 if (pVmdkFile->fAsyncIO)
880 {
881 AssertMsgFailed(("TODO\n"));
882 return VERR_NOT_SUPPORTED;
883 }
884 else
885 {
886 int rc;
887 PRTZIPCOMP pZip = NULL;
888 VMDKMARKER Marker;
889 uint64_t uCompOffset, cbDecomp;
890 VMDKDEFLATESTATE DeflateState;
891
892 Marker.uSector = RT_H2LE_U64(uLBA);
893 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
894 if (uMarker == VMDK_MARKER_IGNORE)
895 {
896 /* Compressed grain marker. Data follows immediately. */
897 uCompOffset = uOffset + 12;
898 cbDecomp = cbToWrite;
899 }
900 else
901 {
902 /** @todo implement creating the other marker types */
903 return VERR_NOT_IMPLEMENTED;
904 }
905 DeflateState.File = pVmdkFile->File;
906 DeflateState.uFileOffset = uCompOffset;
907 DeflateState.iOffset = -1;
908
909 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
910 if (RT_FAILURE(rc))
911 return rc;
912 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
913 if (RT_SUCCESS(rc))
914 rc = RTZipCompFinish(pZip);
915 RTZipCompDestroy(pZip);
916 if (RT_SUCCESS(rc))
917 {
918 if (pcbMarkerData)
919 *pcbMarkerData = 12 + DeflateState.iOffset;
920 /* Set the file size to remove old garbage in case the block is
921 * rewritten. Cannot cause data loss as the code calling this
922 * guarantees that data gets only appended. */
923 Assert(DeflateState.uFileOffset > uCompOffset);
924 rc = RTFileSetSize(pVmdkFile->File, DeflateState.uFileOffset);
925
926 if (uMarker == VMDK_MARKER_IGNORE)
927 {
928 /* Compressed grain marker. */
929 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
930 rc = RTFileWriteAt(pVmdkFile->File, uOffset, &Marker, 12, NULL);
931 if (RT_FAILURE(rc))
932 return rc;
933 }
934 else
935 {
936 /** @todo implement creating the other marker types */
937 return VERR_NOT_IMPLEMENTED;
938 }
939 }
940 return rc;
941 }
942}
943
944/**
945 * Internal: check if all files are closed, prevent leaking resources.
946 */
947static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
948{
949 int rc = VINF_SUCCESS, rc2;
950 PVMDKFILE pVmdkFile;
951
952 Assert(pImage->pFiles == NULL);
953 for (pVmdkFile = pImage->pFiles;
954 pVmdkFile != NULL;
955 pVmdkFile = pVmdkFile->pNext)
956 {
957 LogRel(("VMDK: leaking reference to file \"%s\"\n",
958 pVmdkFile->pszFilename));
959 pImage->pFiles = pVmdkFile->pNext;
960
961 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
962 rc2 = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
963 pVmdkFile->pStorage);
964 else
965 rc2 = RTFileClose(pVmdkFile->File);
966
967 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
968 rc2 = RTFileDelete(pVmdkFile->pszFilename);
969 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
970 RTMemFree(pVmdkFile);
971 if (RT_SUCCESS(rc))
972 rc = rc2;
973 }
974 return rc;
975}
976
977/**
978 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
979 * critical non-ASCII characters.
980 */
981static char *vmdkEncodeString(const char *psz)
982{
983 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
984 char *pszDst = szEnc;
985
986 AssertPtr(psz);
987
988 for (; *psz; psz = RTStrNextCp(psz))
989 {
990 char *pszDstPrev = pszDst;
991 RTUNICP Cp = RTStrGetCp(psz);
992 if (Cp == '\\')
993 {
994 pszDst = RTStrPutCp(pszDst, Cp);
995 pszDst = RTStrPutCp(pszDst, Cp);
996 }
997 else if (Cp == '\n')
998 {
999 pszDst = RTStrPutCp(pszDst, '\\');
1000 pszDst = RTStrPutCp(pszDst, 'n');
1001 }
1002 else if (Cp == '\r')
1003 {
1004 pszDst = RTStrPutCp(pszDst, '\\');
1005 pszDst = RTStrPutCp(pszDst, 'r');
1006 }
1007 else
1008 pszDst = RTStrPutCp(pszDst, Cp);
1009 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1010 {
1011 pszDst = pszDstPrev;
1012 break;
1013 }
1014 }
1015 *pszDst = '\0';
1016 return RTStrDup(szEnc);
1017}
1018
1019/**
1020 * Internal: decode a string and store it into the specified string.
1021 */
1022static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1023{
1024 int rc = VINF_SUCCESS;
1025 char szBuf[4];
1026
1027 if (!cb)
1028 return VERR_BUFFER_OVERFLOW;
1029
1030 AssertPtr(psz);
1031
1032 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1033 {
1034 char *pszDst = szBuf;
1035 RTUNICP Cp = RTStrGetCp(pszEncoded);
1036 if (Cp == '\\')
1037 {
1038 pszEncoded = RTStrNextCp(pszEncoded);
1039 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1040 if (CpQ == 'n')
1041 RTStrPutCp(pszDst, '\n');
1042 else if (CpQ == 'r')
1043 RTStrPutCp(pszDst, '\r');
1044 else if (CpQ == '\0')
1045 {
1046 rc = VERR_VD_VMDK_INVALID_HEADER;
1047 break;
1048 }
1049 else
1050 RTStrPutCp(pszDst, CpQ);
1051 }
1052 else
1053 pszDst = RTStrPutCp(pszDst, Cp);
1054
1055 /* Need to leave space for terminating NUL. */
1056 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1057 {
1058 rc = VERR_BUFFER_OVERFLOW;
1059 break;
1060 }
1061 memcpy(psz, szBuf, pszDst - szBuf);
1062 psz += pszDst - szBuf;
1063 }
1064 *psz = '\0';
1065 return rc;
1066}
1067
1068static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1069{
1070 int rc = VINF_SUCCESS;
1071 unsigned i;
1072 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1073 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1074
1075 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1076 goto out;
1077
1078 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1079 if (!pGD)
1080 {
1081 rc = VERR_NO_MEMORY;
1082 goto out;
1083 }
1084 pExtent->pGD = pGD;
1085 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1086 * life files don't have them. The spec is wrong in creative ways. */
1087 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1088 pGD, cbGD, NULL);
1089 AssertRC(rc);
1090 if (RT_FAILURE(rc))
1091 {
1092 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1093 goto out;
1094 }
1095 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1096 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1097
1098 if (pExtent->uSectorRGD)
1099 {
1100 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1101 if (!pRGD)
1102 {
1103 rc = VERR_NO_MEMORY;
1104 goto out;
1105 }
1106 pExtent->pRGD = pRGD;
1107 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1108 * life files don't have them. The spec is wrong in creative ways. */
1109 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1110 pRGD, cbGD, NULL);
1111 AssertRC(rc);
1112 if (RT_FAILURE(rc))
1113 {
1114 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1115 goto out;
1116 }
1117 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1118 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1119
1120 /* Check grain table and redundant grain table for consistency. */
1121 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1122 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1123 if (!pTmpGT1)
1124 {
1125 rc = VERR_NO_MEMORY;
1126 goto out;
1127 }
1128 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1129 if (!pTmpGT2)
1130 {
1131 RTMemTmpFree(pTmpGT1);
1132 rc = VERR_NO_MEMORY;
1133 goto out;
1134 }
1135
1136 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1137 i < pExtent->cGDEntries;
1138 i++, pGDTmp++, pRGDTmp++)
1139 {
1140 /* If no grain table is allocated skip the entry. */
1141 if (*pGDTmp == 0 && *pRGDTmp == 0)
1142 continue;
1143
1144 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1145 {
1146 /* Just one grain directory entry refers to a not yet allocated
1147 * grain table or both grain directory copies refer to the same
1148 * grain table. Not allowed. */
1149 RTMemTmpFree(pTmpGT1);
1150 RTMemTmpFree(pTmpGT2);
1151 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1152 goto out;
1153 }
1154 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1155 * life files don't have them. The spec is wrong in creative ways. */
1156 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1157 pTmpGT1, cbGT, NULL);
1158 if (RT_FAILURE(rc))
1159 {
1160 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1161 RTMemTmpFree(pTmpGT1);
1162 RTMemTmpFree(pTmpGT2);
1163 goto out;
1164 }
1165 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1166 * life files don't have them. The spec is wrong in creative ways. */
1167 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1168 pTmpGT2, cbGT, NULL);
1169 if (RT_FAILURE(rc))
1170 {
1171 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1172 RTMemTmpFree(pTmpGT1);
1173 RTMemTmpFree(pTmpGT2);
1174 goto out;
1175 }
1176 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1177 {
1178 RTMemTmpFree(pTmpGT1);
1179 RTMemTmpFree(pTmpGT2);
1180 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1181 goto out;
1182 }
1183 }
1184
1185 /** @todo figure out what to do for unclean VMDKs. */
1186 RTMemTmpFree(pTmpGT1);
1187 RTMemTmpFree(pTmpGT2);
1188 }
1189
1190 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1191 {
1192 uint32_t uLastGrainWritten = 0;
1193 uint32_t uLastGrainSector = 0;
1194 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1195 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1196 if (!pTmpGT)
1197 {
1198 rc = VERR_NO_MEMORY;
1199 goto out;
1200 }
1201 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1202 {
1203 /* If no grain table is allocated skip the entry. */
1204 if (*pGDTmp == 0)
1205 continue;
1206
1207 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1208 * life files don't have them. The spec is wrong in creative ways. */
1209 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1210 pTmpGT, cbGT, NULL);
1211 if (RT_FAILURE(rc))
1212 {
1213 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1214 RTMemTmpFree(pTmpGT);
1215 goto out;
1216 }
1217 uint32_t j;
1218 uint32_t *pGTTmp;
1219 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1220 {
1221 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1222
1223 /* If no grain is allocated skip the entry. */
1224 if (uGTTmp == 0)
1225 continue;
1226
1227 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1228 {
1229 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1230 RTMemTmpFree(pTmpGT);
1231 goto out;
1232 }
1233 uLastGrainSector = uGTTmp;
1234 uLastGrainWritten = i * pExtent->cGTEntries + j;
1235 }
1236 }
1237 RTMemTmpFree(pTmpGT);
1238
1239 /* streamOptimized extents need a grain decompress buffer. */
1240 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1241 if (!pExtent->pvGrain)
1242 {
1243 rc = VERR_NO_MEMORY;
1244 goto out;
1245 }
1246
1247 if (uLastGrainSector)
1248 {
1249 uint64_t uLBA = 0;
1250 uint32_t cbMarker = 0;
1251 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1252 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1253 if (RT_FAILURE(rc))
1254 goto out;
1255
1256 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1257 pExtent->uGrainSector = uLastGrainSector;
1258 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1259 }
1260 pExtent->uLastGrainWritten = uLastGrainWritten;
1261 pExtent->uLastGrainSector = uLastGrainSector;
1262 }
1263
1264out:
1265 if (RT_FAILURE(rc))
1266 vmdkFreeGrainDirectory(pExtent);
1267 return rc;
1268}
1269
1270static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1271 bool fPreAlloc)
1272{
1273 int rc = VINF_SUCCESS;
1274 unsigned i;
1275 uint32_t *pGD = NULL, *pRGD = NULL;
1276 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1277 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1278 size_t cbGTRounded;
1279 uint64_t cbOverhead;
1280
1281 if (fPreAlloc)
1282 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1283 else
1284 cbGTRounded = 0;
1285
1286 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1287 if (!pGD)
1288 {
1289 rc = VERR_NO_MEMORY;
1290 goto out;
1291 }
1292 pExtent->pGD = pGD;
1293 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1294 if (!pRGD)
1295 {
1296 rc = VERR_NO_MEMORY;
1297 goto out;
1298 }
1299 pExtent->pRGD = pRGD;
1300
1301 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1302 /* For streamOptimized extents put the end-of-stream marker at the end. */
1303 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1304 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1305 else
1306 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1307 if (RT_FAILURE(rc))
1308 goto out;
1309 pExtent->uSectorRGD = uStartSector;
1310 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1311
1312 if (fPreAlloc)
1313 {
1314 uint32_t uGTSectorLE;
1315 uint64_t uOffsetSectors;
1316
1317 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1318 for (i = 0; i < pExtent->cGDEntries; i++)
1319 {
1320 pRGD[i] = uOffsetSectors;
1321 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1322 /* Write the redundant grain directory entry to disk. */
1323 rc = vmdkFileWriteAt(pExtent->pFile,
1324 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1325 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1326 if (RT_FAILURE(rc))
1327 {
1328 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1329 goto out;
1330 }
1331 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1332 }
1333
1334 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1335 for (i = 0; i < pExtent->cGDEntries; i++)
1336 {
1337 pGD[i] = uOffsetSectors;
1338 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1339 /* Write the grain directory entry to disk. */
1340 rc = vmdkFileWriteAt(pExtent->pFile,
1341 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1342 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1343 if (RT_FAILURE(rc))
1344 {
1345 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1346 goto out;
1347 }
1348 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1349 }
1350 }
1351 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1352
1353 /* streamOptimized extents need a grain decompress buffer. */
1354 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1355 {
1356 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1357 if (!pExtent->pvGrain)
1358 {
1359 rc = VERR_NO_MEMORY;
1360 goto out;
1361 }
1362 }
1363
1364out:
1365 if (RT_FAILURE(rc))
1366 vmdkFreeGrainDirectory(pExtent);
1367 return rc;
1368}
1369
1370static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1371{
1372 if (pExtent->pGD)
1373 {
1374 RTMemFree(pExtent->pGD);
1375 pExtent->pGD = NULL;
1376 }
1377 if (pExtent->pRGD)
1378 {
1379 RTMemFree(pExtent->pRGD);
1380 pExtent->pRGD = NULL;
1381 }
1382}
1383
1384static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1385 char **ppszUnquoted, char **ppszNext)
1386{
1387 char *pszQ;
1388 char *pszUnquoted;
1389
1390 /* Skip over whitespace. */
1391 while (*pszStr == ' ' || *pszStr == '\t')
1392 pszStr++;
1393
1394 if (*pszStr != '"')
1395 {
1396 pszQ = (char *)pszStr;
1397 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1398 pszQ++;
1399 }
1400 else
1401 {
1402 pszStr++;
1403 pszQ = (char *)strchr(pszStr, '"');
1404 if (pszQ == NULL)
1405 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1406 }
1407
1408 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1409 if (!pszUnquoted)
1410 return VERR_NO_MEMORY;
1411 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1412 pszUnquoted[pszQ - pszStr] = '\0';
1413 *ppszUnquoted = pszUnquoted;
1414 if (ppszNext)
1415 *ppszNext = pszQ + 1;
1416 return VINF_SUCCESS;
1417}
1418
1419static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1420 const char *pszLine)
1421{
1422 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1423 ssize_t cbDiff = strlen(pszLine) + 1;
1424
1425 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1426 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1427 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1428
1429 memcpy(pEnd, pszLine, cbDiff);
1430 pDescriptor->cLines++;
1431 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1432 pDescriptor->fDirty = true;
1433
1434 return VINF_SUCCESS;
1435}
1436
1437static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1438 const char *pszKey, const char **ppszValue)
1439{
1440 size_t cbKey = strlen(pszKey);
1441 const char *pszValue;
1442
1443 while (uStart != 0)
1444 {
1445 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1446 {
1447 /* Key matches, check for a '=' (preceded by whitespace). */
1448 pszValue = pDescriptor->aLines[uStart] + cbKey;
1449 while (*pszValue == ' ' || *pszValue == '\t')
1450 pszValue++;
1451 if (*pszValue == '=')
1452 {
1453 *ppszValue = pszValue + 1;
1454 break;
1455 }
1456 }
1457 uStart = pDescriptor->aNextLines[uStart];
1458 }
1459 return !!uStart;
1460}
1461
1462static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1463 unsigned uStart,
1464 const char *pszKey, const char *pszValue)
1465{
1466 char *pszTmp;
1467 size_t cbKey = strlen(pszKey);
1468 unsigned uLast = 0;
1469
1470 while (uStart != 0)
1471 {
1472 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1473 {
1474 /* Key matches, check for a '=' (preceded by whitespace). */
1475 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1476 while (*pszTmp == ' ' || *pszTmp == '\t')
1477 pszTmp++;
1478 if (*pszTmp == '=')
1479 {
1480 pszTmp++;
1481 while (*pszTmp == ' ' || *pszTmp == '\t')
1482 pszTmp++;
1483 break;
1484 }
1485 }
1486 if (!pDescriptor->aNextLines[uStart])
1487 uLast = uStart;
1488 uStart = pDescriptor->aNextLines[uStart];
1489 }
1490 if (uStart)
1491 {
1492 if (pszValue)
1493 {
1494 /* Key already exists, replace existing value. */
1495 size_t cbOldVal = strlen(pszTmp);
1496 size_t cbNewVal = strlen(pszValue);
1497 ssize_t cbDiff = cbNewVal - cbOldVal;
1498 /* Check for buffer overflow. */
1499 if ( pDescriptor->aLines[pDescriptor->cLines]
1500 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1501 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1502
1503 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1504 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1505 memcpy(pszTmp, pszValue, cbNewVal + 1);
1506 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1507 pDescriptor->aLines[i] += cbDiff;
1508 }
1509 else
1510 {
1511 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1512 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1513 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1514 {
1515 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1516 if (pDescriptor->aNextLines[i])
1517 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1518 else
1519 pDescriptor->aNextLines[i-1] = 0;
1520 }
1521 pDescriptor->cLines--;
1522 /* Adjust starting line numbers of following descriptor sections. */
1523 if (uStart < pDescriptor->uFirstExtent)
1524 pDescriptor->uFirstExtent--;
1525 if (uStart < pDescriptor->uFirstDDB)
1526 pDescriptor->uFirstDDB--;
1527 }
1528 }
1529 else
1530 {
1531 /* Key doesn't exist, append after the last entry in this category. */
1532 if (!pszValue)
1533 {
1534 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1535 return VINF_SUCCESS;
1536 }
1537 size_t cbKey = strlen(pszKey);
1538 size_t cbValue = strlen(pszValue);
1539 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1540 /* Check for buffer overflow. */
1541 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1542 || ( pDescriptor->aLines[pDescriptor->cLines]
1543 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1544 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1545 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1546 {
1547 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1548 if (pDescriptor->aNextLines[i - 1])
1549 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1550 else
1551 pDescriptor->aNextLines[i] = 0;
1552 }
1553 uStart = uLast + 1;
1554 pDescriptor->aNextLines[uLast] = uStart;
1555 pDescriptor->aNextLines[uStart] = 0;
1556 pDescriptor->cLines++;
1557 pszTmp = pDescriptor->aLines[uStart];
1558 memmove(pszTmp + cbDiff, pszTmp,
1559 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1560 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1561 pDescriptor->aLines[uStart][cbKey] = '=';
1562 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1563 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1564 pDescriptor->aLines[i] += cbDiff;
1565
1566 /* Adjust starting line numbers of following descriptor sections. */
1567 if (uStart <= pDescriptor->uFirstExtent)
1568 pDescriptor->uFirstExtent++;
1569 if (uStart <= pDescriptor->uFirstDDB)
1570 pDescriptor->uFirstDDB++;
1571 }
1572 pDescriptor->fDirty = true;
1573 return VINF_SUCCESS;
1574}
1575
1576static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1577 uint32_t *puValue)
1578{
1579 const char *pszValue;
1580
1581 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1582 &pszValue))
1583 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1584 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1585}
1586
1587static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1588 const char *pszKey, const char **ppszValue)
1589{
1590 const char *pszValue;
1591 char *pszValueUnquoted;
1592
1593 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1594 &pszValue))
1595 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1596 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1597 if (RT_FAILURE(rc))
1598 return rc;
1599 *ppszValue = pszValueUnquoted;
1600 return rc;
1601}
1602
1603static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1604 const char *pszKey, const char *pszValue)
1605{
1606 char *pszValueQuoted;
1607
1608 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1609 if (RT_FAILURE(rc))
1610 return rc;
1611 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1612 pszValueQuoted);
1613 RTStrFree(pszValueQuoted);
1614 return rc;
1615}
1616
1617static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1618 PVMDKDESCRIPTOR pDescriptor)
1619{
1620 unsigned uEntry = pDescriptor->uFirstExtent;
1621 ssize_t cbDiff;
1622
1623 if (!uEntry)
1624 return;
1625
1626 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1627 /* Move everything including \0 in the entry marking the end of buffer. */
1628 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1629 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1630 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1631 {
1632 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1633 if (pDescriptor->aNextLines[i])
1634 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1635 else
1636 pDescriptor->aNextLines[i - 1] = 0;
1637 }
1638 pDescriptor->cLines--;
1639 if (pDescriptor->uFirstDDB)
1640 pDescriptor->uFirstDDB--;
1641
1642 return;
1643}
1644
1645static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1646 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1647 VMDKETYPE enmType, const char *pszBasename,
1648 uint64_t uSectorOffset)
1649{
1650 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1651 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO" };
1652 char *pszTmp;
1653 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1654 char szExt[1024];
1655 ssize_t cbDiff;
1656
1657 /* Find last entry in extent description. */
1658 while (uStart)
1659 {
1660 if (!pDescriptor->aNextLines[uStart])
1661 uLast = uStart;
1662 uStart = pDescriptor->aNextLines[uStart];
1663 }
1664
1665 if (enmType == VMDKETYPE_ZERO)
1666 {
1667 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1668 cNominalSectors, apszType[enmType]);
1669 }
1670 else
1671 {
1672 if (!uSectorOffset)
1673 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1674 apszAccess[enmAccess], cNominalSectors,
1675 apszType[enmType], pszBasename);
1676 else
1677 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1678 apszAccess[enmAccess], cNominalSectors,
1679 apszType[enmType], pszBasename, uSectorOffset);
1680 }
1681 cbDiff = strlen(szExt) + 1;
1682
1683 /* Check for buffer overflow. */
1684 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1685 || ( pDescriptor->aLines[pDescriptor->cLines]
1686 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1687 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1688
1689 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1690 {
1691 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1692 if (pDescriptor->aNextLines[i - 1])
1693 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1694 else
1695 pDescriptor->aNextLines[i] = 0;
1696 }
1697 uStart = uLast + 1;
1698 pDescriptor->aNextLines[uLast] = uStart;
1699 pDescriptor->aNextLines[uStart] = 0;
1700 pDescriptor->cLines++;
1701 pszTmp = pDescriptor->aLines[uStart];
1702 memmove(pszTmp + cbDiff, pszTmp,
1703 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1704 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1705 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1706 pDescriptor->aLines[i] += cbDiff;
1707
1708 /* Adjust starting line numbers of following descriptor sections. */
1709 if (uStart <= pDescriptor->uFirstDDB)
1710 pDescriptor->uFirstDDB++;
1711
1712 pDescriptor->fDirty = true;
1713 return VINF_SUCCESS;
1714}
1715
1716static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1717 const char *pszKey, const char **ppszValue)
1718{
1719 const char *pszValue;
1720 char *pszValueUnquoted;
1721
1722 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1723 &pszValue))
1724 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1725 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1726 if (RT_FAILURE(rc))
1727 return rc;
1728 *ppszValue = pszValueUnquoted;
1729 return rc;
1730}
1731
1732static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1733 const char *pszKey, uint32_t *puValue)
1734{
1735 const char *pszValue;
1736 char *pszValueUnquoted;
1737
1738 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1739 &pszValue))
1740 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1741 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1742 if (RT_FAILURE(rc))
1743 return rc;
1744 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1745 RTMemTmpFree(pszValueUnquoted);
1746 return rc;
1747}
1748
1749static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1750 const char *pszKey, PRTUUID pUuid)
1751{
1752 const char *pszValue;
1753 char *pszValueUnquoted;
1754
1755 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1756 &pszValue))
1757 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1758 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1759 if (RT_FAILURE(rc))
1760 return rc;
1761 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1762 RTMemTmpFree(pszValueUnquoted);
1763 return rc;
1764}
1765
1766static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1767 const char *pszKey, const char *pszVal)
1768{
1769 int rc;
1770 char *pszValQuoted;
1771
1772 if (pszVal)
1773 {
1774 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1775 if (RT_FAILURE(rc))
1776 return rc;
1777 }
1778 else
1779 pszValQuoted = NULL;
1780 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1781 pszValQuoted);
1782 if (pszValQuoted)
1783 RTStrFree(pszValQuoted);
1784 return rc;
1785}
1786
1787static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1788 const char *pszKey, PCRTUUID pUuid)
1789{
1790 char *pszUuid;
1791
1792 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1793 if (RT_FAILURE(rc))
1794 return rc;
1795 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1796 pszUuid);
1797 RTStrFree(pszUuid);
1798 return rc;
1799}
1800
1801static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1802 const char *pszKey, uint32_t uValue)
1803{
1804 char *pszValue;
1805
1806 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1807 if (RT_FAILURE(rc))
1808 return rc;
1809 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1810 pszValue);
1811 RTStrFree(pszValue);
1812 return rc;
1813}
1814
1815static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1816 size_t cbDescData,
1817 PVMDKDESCRIPTOR pDescriptor)
1818{
1819 int rc = VINF_SUCCESS;
1820 unsigned cLine = 0, uLastNonEmptyLine = 0;
1821 char *pTmp = pDescData;
1822
1823 pDescriptor->cbDescAlloc = cbDescData;
1824 while (*pTmp != '\0')
1825 {
1826 pDescriptor->aLines[cLine++] = pTmp;
1827 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1828 {
1829 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1830 goto out;
1831 }
1832
1833 while (*pTmp != '\0' && *pTmp != '\n')
1834 {
1835 if (*pTmp == '\r')
1836 {
1837 if (*(pTmp + 1) != '\n')
1838 {
1839 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1840 goto out;
1841 }
1842 else
1843 {
1844 /* Get rid of CR character. */
1845 *pTmp = '\0';
1846 }
1847 }
1848 pTmp++;
1849 }
1850 /* Get rid of LF character. */
1851 if (*pTmp == '\n')
1852 {
1853 *pTmp = '\0';
1854 pTmp++;
1855 }
1856 }
1857 pDescriptor->cLines = cLine;
1858 /* Pointer right after the end of the used part of the buffer. */
1859 pDescriptor->aLines[cLine] = pTmp;
1860
1861 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1862 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1863 {
1864 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1865 goto out;
1866 }
1867
1868 /* Initialize those, because we need to be able to reopen an image. */
1869 pDescriptor->uFirstDesc = 0;
1870 pDescriptor->uFirstExtent = 0;
1871 pDescriptor->uFirstDDB = 0;
1872 for (unsigned i = 0; i < cLine; i++)
1873 {
1874 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1875 {
1876 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1877 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1878 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1879 {
1880 /* An extent descriptor. */
1881 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1882 {
1883 /* Incorrect ordering of entries. */
1884 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1885 goto out;
1886 }
1887 if (!pDescriptor->uFirstExtent)
1888 {
1889 pDescriptor->uFirstExtent = i;
1890 uLastNonEmptyLine = 0;
1891 }
1892 }
1893 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1894 {
1895 /* A disk database entry. */
1896 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1897 {
1898 /* Incorrect ordering of entries. */
1899 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1900 goto out;
1901 }
1902 if (!pDescriptor->uFirstDDB)
1903 {
1904 pDescriptor->uFirstDDB = i;
1905 uLastNonEmptyLine = 0;
1906 }
1907 }
1908 else
1909 {
1910 /* A normal entry. */
1911 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1912 {
1913 /* Incorrect ordering of entries. */
1914 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1915 goto out;
1916 }
1917 if (!pDescriptor->uFirstDesc)
1918 {
1919 pDescriptor->uFirstDesc = i;
1920 uLastNonEmptyLine = 0;
1921 }
1922 }
1923 if (uLastNonEmptyLine)
1924 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1925 uLastNonEmptyLine = i;
1926 }
1927 }
1928
1929out:
1930 return rc;
1931}
1932
1933static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1934 PCPDMMEDIAGEOMETRY pPCHSGeometry)
1935{
1936 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1937 VMDK_DDB_GEO_PCHS_CYLINDERS,
1938 pPCHSGeometry->cCylinders);
1939 if (RT_FAILURE(rc))
1940 return rc;
1941 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1942 VMDK_DDB_GEO_PCHS_HEADS,
1943 pPCHSGeometry->cHeads);
1944 if (RT_FAILURE(rc))
1945 return rc;
1946 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1947 VMDK_DDB_GEO_PCHS_SECTORS,
1948 pPCHSGeometry->cSectors);
1949 return rc;
1950}
1951
1952static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1953 PCPDMMEDIAGEOMETRY pLCHSGeometry)
1954{
1955 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1956 VMDK_DDB_GEO_LCHS_CYLINDERS,
1957 pLCHSGeometry->cCylinders);
1958 if (RT_FAILURE(rc))
1959 return rc;
1960 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1961 VMDK_DDB_GEO_LCHS_HEADS,
1962 pLCHSGeometry->cHeads);
1963 if (RT_FAILURE(rc))
1964 return rc;
1965 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1966 VMDK_DDB_GEO_LCHS_SECTORS,
1967 pLCHSGeometry->cSectors);
1968 return rc;
1969}
1970
1971static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1972 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1973{
1974 int rc;
1975
1976 pDescriptor->uFirstDesc = 0;
1977 pDescriptor->uFirstExtent = 0;
1978 pDescriptor->uFirstDDB = 0;
1979 pDescriptor->cLines = 0;
1980 pDescriptor->cbDescAlloc = cbDescData;
1981 pDescriptor->fDirty = false;
1982 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1983 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1984
1985 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1986 if (RT_FAILURE(rc))
1987 goto out;
1988 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1989 if (RT_FAILURE(rc))
1990 goto out;
1991 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1992 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1993 if (RT_FAILURE(rc))
1994 goto out;
1995 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1996 if (RT_FAILURE(rc))
1997 goto out;
1998 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1999 if (RT_FAILURE(rc))
2000 goto out;
2001 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2002 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2003 if (RT_FAILURE(rc))
2004 goto out;
2005 /* The trailing space is created by VMware, too. */
2006 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2007 if (RT_FAILURE(rc))
2008 goto out;
2009 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2010 if (RT_FAILURE(rc))
2011 goto out;
2012 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2013 if (RT_FAILURE(rc))
2014 goto out;
2015 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2016 if (RT_FAILURE(rc))
2017 goto out;
2018 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2019
2020 /* Now that the framework is in place, use the normal functions to insert
2021 * the remaining keys. */
2022 char szBuf[9];
2023 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2024 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2025 "CID", szBuf);
2026 if (RT_FAILURE(rc))
2027 goto out;
2028 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2029 "parentCID", "ffffffff");
2030 if (RT_FAILURE(rc))
2031 goto out;
2032
2033 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2034 if (RT_FAILURE(rc))
2035 goto out;
2036
2037out:
2038 return rc;
2039}
2040
2041static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2042 size_t cbDescData)
2043{
2044 int rc;
2045 unsigned cExtents;
2046 unsigned uLine;
2047
2048 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2049 &pImage->Descriptor);
2050 if (RT_FAILURE(rc))
2051 return rc;
2052
2053 /* Check version, must be 1. */
2054 uint32_t uVersion;
2055 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2056 if (RT_FAILURE(rc))
2057 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2058 if (uVersion != 1)
2059 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2060
2061 /* Get image creation type and determine image flags. */
2062 const char *pszCreateType;
2063 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2064 &pszCreateType);
2065 if (RT_FAILURE(rc))
2066 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2067 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2068 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2069 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2070 else if ( !strcmp(pszCreateType, "partitionedDevice")
2071 || !strcmp(pszCreateType, "fullDevice"))
2072 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2073 else if (!strcmp(pszCreateType, "streamOptimized"))
2074 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2075 RTStrFree((char *)(void *)pszCreateType);
2076
2077 /* Count the number of extent config entries. */
2078 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2079 uLine != 0;
2080 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2081 /* nothing */;
2082
2083 if (!pImage->pDescData && cExtents != 1)
2084 {
2085 /* Monolithic image, must have only one extent (already opened). */
2086 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2087 }
2088
2089 if (pImage->pDescData)
2090 {
2091 /* Non-monolithic image, extents need to be allocated. */
2092 rc = vmdkCreateExtents(pImage, cExtents);
2093 if (RT_FAILURE(rc))
2094 return rc;
2095 }
2096
2097 for (unsigned i = 0, uLine = pImage->Descriptor.uFirstExtent;
2098 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2099 {
2100 char *pszLine = pImage->Descriptor.aLines[uLine];
2101
2102 /* Access type of the extent. */
2103 if (!strncmp(pszLine, "RW", 2))
2104 {
2105 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2106 pszLine += 2;
2107 }
2108 else if (!strncmp(pszLine, "RDONLY", 6))
2109 {
2110 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2111 pszLine += 6;
2112 }
2113 else if (!strncmp(pszLine, "NOACCESS", 8))
2114 {
2115 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2116 pszLine += 8;
2117 }
2118 else
2119 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2120 if (*pszLine++ != ' ')
2121 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2122
2123 /* Nominal size of the extent. */
2124 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2125 &pImage->pExtents[i].cNominalSectors);
2126 if (RT_FAILURE(rc))
2127 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2128 if (*pszLine++ != ' ')
2129 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2130
2131 /* Type of the extent. */
2132#ifdef VBOX_WITH_VMDK_ESX
2133 /** @todo Add the ESX extent types. Not necessary for now because
2134 * the ESX extent types are only used inside an ESX server. They are
2135 * automatically converted if the VMDK is exported. */
2136#endif /* VBOX_WITH_VMDK_ESX */
2137 if (!strncmp(pszLine, "SPARSE", 6))
2138 {
2139 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2140 pszLine += 6;
2141 }
2142 else if (!strncmp(pszLine, "FLAT", 4))
2143 {
2144 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2145 pszLine += 4;
2146 }
2147 else if (!strncmp(pszLine, "ZERO", 4))
2148 {
2149 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2150 pszLine += 4;
2151 }
2152 else
2153 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2154 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2155 {
2156 /* This one has no basename or offset. */
2157 if (*pszLine == ' ')
2158 pszLine++;
2159 if (*pszLine != '\0')
2160 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2161 pImage->pExtents[i].pszBasename = NULL;
2162 }
2163 else
2164 {
2165 /* All other extent types have basename and optional offset. */
2166 if (*pszLine++ != ' ')
2167 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2168
2169 /* Basename of the image. Surrounded by quotes. */
2170 char *pszBasename;
2171 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2172 if (RT_FAILURE(rc))
2173 return rc;
2174 pImage->pExtents[i].pszBasename = pszBasename;
2175 if (*pszLine == ' ')
2176 {
2177 pszLine++;
2178 if (*pszLine != '\0')
2179 {
2180 /* Optional offset in extent specified. */
2181 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2182 &pImage->pExtents[i].uSectorOffset);
2183 if (RT_FAILURE(rc))
2184 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2185 }
2186 }
2187
2188 if (*pszLine != '\0')
2189 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2190 }
2191 }
2192
2193 /* Determine PCHS geometry (autogenerate if necessary). */
2194 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2195 VMDK_DDB_GEO_PCHS_CYLINDERS,
2196 &pImage->PCHSGeometry.cCylinders);
2197 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2198 pImage->PCHSGeometry.cCylinders = 0;
2199 else if (RT_FAILURE(rc))
2200 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2201 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2202 VMDK_DDB_GEO_PCHS_HEADS,
2203 &pImage->PCHSGeometry.cHeads);
2204 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2205 pImage->PCHSGeometry.cHeads = 0;
2206 else if (RT_FAILURE(rc))
2207 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2208 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2209 VMDK_DDB_GEO_PCHS_SECTORS,
2210 &pImage->PCHSGeometry.cSectors);
2211 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2212 pImage->PCHSGeometry.cSectors = 0;
2213 else if (RT_FAILURE(rc))
2214 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2215 if ( pImage->PCHSGeometry.cCylinders == 0
2216 || pImage->PCHSGeometry.cHeads == 0
2217 || pImage->PCHSGeometry.cHeads > 16
2218 || pImage->PCHSGeometry.cSectors == 0
2219 || pImage->PCHSGeometry.cSectors > 63)
2220 {
2221 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2222 * as the total image size isn't known yet). */
2223 pImage->PCHSGeometry.cCylinders = 0;
2224 pImage->PCHSGeometry.cHeads = 16;
2225 pImage->PCHSGeometry.cSectors = 63;
2226 }
2227
2228 /* Determine LCHS geometry (set to 0 if not specified). */
2229 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2230 VMDK_DDB_GEO_LCHS_CYLINDERS,
2231 &pImage->LCHSGeometry.cCylinders);
2232 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2233 pImage->LCHSGeometry.cCylinders = 0;
2234 else if (RT_FAILURE(rc))
2235 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2236 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2237 VMDK_DDB_GEO_LCHS_HEADS,
2238 &pImage->LCHSGeometry.cHeads);
2239 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2240 pImage->LCHSGeometry.cHeads = 0;
2241 else if (RT_FAILURE(rc))
2242 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2243 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2244 VMDK_DDB_GEO_LCHS_SECTORS,
2245 &pImage->LCHSGeometry.cSectors);
2246 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2247 pImage->LCHSGeometry.cSectors = 0;
2248 else if (RT_FAILURE(rc))
2249 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2250 if ( pImage->LCHSGeometry.cCylinders == 0
2251 || pImage->LCHSGeometry.cHeads == 0
2252 || pImage->LCHSGeometry.cSectors == 0)
2253 {
2254 pImage->LCHSGeometry.cCylinders = 0;
2255 pImage->LCHSGeometry.cHeads = 0;
2256 pImage->LCHSGeometry.cSectors = 0;
2257 }
2258
2259 /* Get image UUID. */
2260 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2261 &pImage->ImageUuid);
2262 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2263 {
2264 /* Image without UUID. Probably created by VMware and not yet used
2265 * by VirtualBox. Can only be added for images opened in read/write
2266 * mode, so don't bother producing a sensible UUID otherwise. */
2267 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2268 RTUuidClear(&pImage->ImageUuid);
2269 else
2270 {
2271 rc = RTUuidCreate(&pImage->ImageUuid);
2272 if (RT_FAILURE(rc))
2273 return rc;
2274 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2275 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2276 if (RT_FAILURE(rc))
2277 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2278 }
2279 }
2280 else if (RT_FAILURE(rc))
2281 return rc;
2282
2283 /* Get image modification UUID. */
2284 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2285 VMDK_DDB_MODIFICATION_UUID,
2286 &pImage->ModificationUuid);
2287 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2288 {
2289 /* Image without UUID. Probably created by VMware and not yet used
2290 * by VirtualBox. Can only be added for images opened in read/write
2291 * mode, so don't bother producing a sensible UUID otherwise. */
2292 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2293 RTUuidClear(&pImage->ModificationUuid);
2294 else
2295 {
2296 rc = RTUuidCreate(&pImage->ModificationUuid);
2297 if (RT_FAILURE(rc))
2298 return rc;
2299 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2300 VMDK_DDB_MODIFICATION_UUID,
2301 &pImage->ModificationUuid);
2302 if (RT_FAILURE(rc))
2303 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2304 }
2305 }
2306 else if (RT_FAILURE(rc))
2307 return rc;
2308
2309 /* Get UUID of parent image. */
2310 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2311 &pImage->ParentUuid);
2312 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2313 {
2314 /* Image without UUID. Probably created by VMware and not yet used
2315 * by VirtualBox. Can only be added for images opened in read/write
2316 * mode, so don't bother producing a sensible UUID otherwise. */
2317 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2318 RTUuidClear(&pImage->ParentUuid);
2319 else
2320 {
2321 rc = RTUuidClear(&pImage->ParentUuid);
2322 if (RT_FAILURE(rc))
2323 return rc;
2324 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2325 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2326 if (RT_FAILURE(rc))
2327 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2328 }
2329 }
2330 else if (RT_FAILURE(rc))
2331 return rc;
2332
2333 /* Get parent image modification UUID. */
2334 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2335 VMDK_DDB_PARENT_MODIFICATION_UUID,
2336 &pImage->ParentModificationUuid);
2337 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2338 {
2339 /* Image without UUID. Probably created by VMware and not yet used
2340 * by VirtualBox. Can only be added for images opened in read/write
2341 * mode, so don't bother producing a sensible UUID otherwise. */
2342 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2343 RTUuidClear(&pImage->ParentModificationUuid);
2344 else
2345 {
2346 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2347 if (RT_FAILURE(rc))
2348 return rc;
2349 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2350 VMDK_DDB_PARENT_MODIFICATION_UUID,
2351 &pImage->ParentModificationUuid);
2352 if (RT_FAILURE(rc))
2353 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2354 }
2355 }
2356 else if (RT_FAILURE(rc))
2357 return rc;
2358
2359 return VINF_SUCCESS;
2360}
2361
2362/**
2363 * Internal: write/update the descriptor part of the image.
2364 */
2365static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2366{
2367 int rc = VINF_SUCCESS;
2368 uint64_t cbLimit;
2369 uint64_t uOffset;
2370 PVMDKFILE pDescFile;
2371
2372 if (pImage->pDescData)
2373 {
2374 /* Separate descriptor file. */
2375 uOffset = 0;
2376 cbLimit = 0;
2377 pDescFile = pImage->pFile;
2378 }
2379 else
2380 {
2381 /* Embedded descriptor file. */
2382 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2383 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2384 cbLimit += uOffset;
2385 pDescFile = pImage->pExtents[0].pFile;
2386 }
2387 /* Bail out if there is no file to write to. */
2388 if (pDescFile == NULL)
2389 return VERR_INVALID_PARAMETER;
2390 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2391 {
2392 const char *psz = pImage->Descriptor.aLines[i];
2393 size_t cb = strlen(psz);
2394
2395 if (cbLimit && uOffset + cb + 1 > cbLimit)
2396 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2397 rc = vmdkFileWriteAt(pDescFile, uOffset, psz, cb, NULL);
2398 if (RT_FAILURE(rc))
2399 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2400 uOffset += cb;
2401 rc = vmdkFileWriteAt(pDescFile, uOffset, "\n", 1, NULL);
2402 if (RT_FAILURE(rc))
2403 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2404 uOffset++;
2405 }
2406 if (cbLimit)
2407 {
2408 /* Inefficient, but simple. */
2409 while (uOffset < cbLimit)
2410 {
2411 rc = vmdkFileWriteAt(pDescFile, uOffset, "", 1, NULL);
2412 if (RT_FAILURE(rc))
2413 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2414 uOffset++;
2415 }
2416 }
2417 else
2418 {
2419 rc = vmdkFileSetSize(pDescFile, uOffset);
2420 if (RT_FAILURE(rc))
2421 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2422 }
2423 pImage->Descriptor.fDirty = false;
2424 return rc;
2425}
2426
2427/**
2428 * Internal: validate the consistency check values in a binary header.
2429 */
2430static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2431{
2432 int rc = VINF_SUCCESS;
2433 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2434 {
2435 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2436 return rc;
2437 }
2438 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2439 {
2440 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2441 return rc;
2442 }
2443 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2444 && ( pHeader->singleEndLineChar != '\n'
2445 || pHeader->nonEndLineChar != ' '
2446 || pHeader->doubleEndLineChar1 != '\r'
2447 || pHeader->doubleEndLineChar2 != '\n') )
2448 {
2449 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2450 return rc;
2451 }
2452 return rc;
2453}
2454
2455/**
2456 * Internal: read metadata belonging to an extent with binary header, i.e.
2457 * as found in monolithic files.
2458 */
2459static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2460{
2461 SparseExtentHeader Header;
2462 uint64_t cSectorsPerGDE;
2463
2464 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2465 AssertRC(rc);
2466 if (RT_FAILURE(rc))
2467 {
2468 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2469 goto out;
2470 }
2471 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2472 if (RT_FAILURE(rc))
2473 goto out;
2474 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2475 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2476 {
2477 /* Read the footer, which isn't compressed and comes before the
2478 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2479 * VMware reality. Theory and practice have very little in common. */
2480 uint64_t cbSize;
2481 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2482 AssertRC(rc);
2483 if (RT_FAILURE(rc))
2484 {
2485 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2486 goto out;
2487 }
2488 cbSize = RT_ALIGN_64(cbSize, 512);
2489 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2490 AssertRC(rc);
2491 if (RT_FAILURE(rc))
2492 {
2493 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2494 goto out;
2495 }
2496 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2497 if (RT_FAILURE(rc))
2498 goto out;
2499 pExtent->fFooter = true;
2500 }
2501 pExtent->uVersion = RT_LE2H_U32(Header.version);
2502 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2503 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2504 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2505 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2506 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2507 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2508 {
2509 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2510 goto out;
2511 }
2512 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2513 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2514 {
2515 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2516 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2517 }
2518 else
2519 {
2520 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2521 pExtent->uSectorRGD = 0;
2522 }
2523 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2524 {
2525 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2526 goto out;
2527 }
2528 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2529 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2530 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2531 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2532 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2533 {
2534 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2535 goto out;
2536 }
2537 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2538 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2539
2540 /* Fix up the number of descriptor sectors, as some flat images have
2541 * really just one, and this causes failures when inserting the UUID
2542 * values and other extra information. */
2543 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2544 {
2545 /* Do it the easy way - just fix it for flat images which have no
2546 * other complicated metadata which needs space too. */
2547 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2548 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2549 pExtent->cDescriptorSectors = 4;
2550 }
2551
2552out:
2553 if (RT_FAILURE(rc))
2554 vmdkFreeExtentData(pImage, pExtent, false);
2555
2556 return rc;
2557}
2558
2559/**
2560 * Internal: read additional metadata belonging to an extent. For those
2561 * extents which have no additional metadata just verify the information.
2562 */
2563static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2564{
2565 int rc = VINF_SUCCESS;
2566 uint64_t cbExtentSize;
2567
2568 /* The image must be a multiple of a sector in size and contain the data
2569 * area (flat images only). If not, it means the image is at least
2570 * truncated, or even seriously garbled. */
2571 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2572 if (RT_FAILURE(rc))
2573 {
2574 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2575 goto out;
2576 }
2577/* disabled the size check again as there are too many too short vmdks out there */
2578#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2579 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2580 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2581 {
2582 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2583 goto out;
2584 }
2585#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2586 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2587 goto out;
2588
2589 /* The spec says that this must be a power of two and greater than 8,
2590 * but probably they meant not less than 8. */
2591 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2592 || pExtent->cSectorsPerGrain < 8)
2593 {
2594 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2595 goto out;
2596 }
2597
2598 /* This code requires that a grain table must hold a power of two multiple
2599 * of the number of entries per GT cache entry. */
2600 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2601 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2602 {
2603 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2604 goto out;
2605 }
2606
2607 rc = vmdkReadGrainDirectory(pExtent);
2608
2609out:
2610 if (RT_FAILURE(rc))
2611 vmdkFreeExtentData(pImage, pExtent, false);
2612
2613 return rc;
2614}
2615
2616/**
2617 * Internal: write/update the metadata for a sparse extent.
2618 */
2619static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2620{
2621 SparseExtentHeader Header;
2622
2623 memset(&Header, '\0', sizeof(Header));
2624 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2625 Header.version = RT_H2LE_U32(pExtent->uVersion);
2626 Header.flags = RT_H2LE_U32(RT_BIT(0));
2627 if (pExtent->pRGD)
2628 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2629 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2630 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2631 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2632 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2633 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2634 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2635 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2636 if (pExtent->fFooter && uOffset == 0)
2637 {
2638 if (pExtent->pRGD)
2639 {
2640 Assert(pExtent->uSectorRGD);
2641 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2642 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2643 }
2644 else
2645 {
2646 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2647 }
2648 }
2649 else
2650 {
2651 if (pExtent->pRGD)
2652 {
2653 Assert(pExtent->uSectorRGD);
2654 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2655 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2656 }
2657 else
2658 {
2659 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2660 }
2661 }
2662 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2663 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2664 Header.singleEndLineChar = '\n';
2665 Header.nonEndLineChar = ' ';
2666 Header.doubleEndLineChar1 = '\r';
2667 Header.doubleEndLineChar2 = '\n';
2668 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2669
2670 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2671 AssertRC(rc);
2672 if (RT_FAILURE(rc))
2673 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2674 return rc;
2675}
2676
2677#ifdef VBOX_WITH_VMDK_ESX
2678/**
2679 * Internal: unused code to read the metadata of a sparse ESX extent.
2680 *
2681 * Such extents never leave ESX server, so this isn't ever used.
2682 */
2683static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2684{
2685 COWDisk_Header Header;
2686 uint64_t cSectorsPerGDE;
2687
2688 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2689 AssertRC(rc);
2690 if (RT_FAILURE(rc))
2691 goto out;
2692 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2693 || RT_LE2H_U32(Header.version) != 1
2694 || RT_LE2H_U32(Header.flags) != 3)
2695 {
2696 rc = VERR_VD_VMDK_INVALID_HEADER;
2697 goto out;
2698 }
2699 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2700 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2701 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2702 /* The spec says that this must be between 1 sector and 1MB. This code
2703 * assumes it's a power of two, so check that requirement, too. */
2704 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2705 || pExtent->cSectorsPerGrain == 0
2706 || pExtent->cSectorsPerGrain > 2048)
2707 {
2708 rc = VERR_VD_VMDK_INVALID_HEADER;
2709 goto out;
2710 }
2711 pExtent->uDescriptorSector = 0;
2712 pExtent->cDescriptorSectors = 0;
2713 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2714 pExtent->uSectorRGD = 0;
2715 pExtent->cOverheadSectors = 0;
2716 pExtent->cGTEntries = 4096;
2717 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2718 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2719 {
2720 rc = VERR_VD_VMDK_INVALID_HEADER;
2721 goto out;
2722 }
2723 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2724 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2725 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2726 {
2727 /* Inconsistency detected. Computed number of GD entries doesn't match
2728 * stored value. Better be safe than sorry. */
2729 rc = VERR_VD_VMDK_INVALID_HEADER;
2730 goto out;
2731 }
2732 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2733 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2734
2735 rc = vmdkReadGrainDirectory(pExtent);
2736
2737out:
2738 if (RT_FAILURE(rc))
2739 vmdkFreeExtentData(pImage, pExtent, false);
2740
2741 return rc;
2742}
2743#endif /* VBOX_WITH_VMDK_ESX */
2744
2745/**
2746 * Internal: free the memory used by the extent data structure, optionally
2747 * deleting the referenced files.
2748 */
2749static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2750 bool fDelete)
2751{
2752 vmdkFreeGrainDirectory(pExtent);
2753 if (pExtent->pDescData)
2754 {
2755 RTMemFree(pExtent->pDescData);
2756 pExtent->pDescData = NULL;
2757 }
2758 if (pExtent->pFile != NULL)
2759 {
2760 /* Do not delete raw extents, these have full and base names equal. */
2761 vmdkFileClose(pImage, &pExtent->pFile,
2762 fDelete
2763 && pExtent->pszFullname
2764 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2765 }
2766 if (pExtent->pszBasename)
2767 {
2768 RTMemTmpFree((void *)pExtent->pszBasename);
2769 pExtent->pszBasename = NULL;
2770 }
2771 if (pExtent->pszFullname)
2772 {
2773 RTStrFree((char *)(void *)pExtent->pszFullname);
2774 pExtent->pszFullname = NULL;
2775 }
2776 if (pExtent->pvGrain)
2777 {
2778 RTMemFree(pExtent->pvGrain);
2779 pExtent->pvGrain = NULL;
2780 }
2781}
2782
2783/**
2784 * Internal: allocate grain table cache if necessary for this image.
2785 */
2786static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2787{
2788 PVMDKEXTENT pExtent;
2789
2790 /* Allocate grain table cache if any sparse extent is present. */
2791 for (unsigned i = 0; i < pImage->cExtents; i++)
2792 {
2793 pExtent = &pImage->pExtents[i];
2794 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
2795#ifdef VBOX_WITH_VMDK_ESX
2796 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
2797#endif /* VBOX_WITH_VMDK_ESX */
2798 )
2799 {
2800 /* Allocate grain table cache. */
2801 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2802 if (!pImage->pGTCache)
2803 return VERR_NO_MEMORY;
2804 for (unsigned i = 0; i < VMDK_GT_CACHE_SIZE; i++)
2805 {
2806 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[i];
2807 pGCE->uExtent = UINT32_MAX;
2808 }
2809 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2810 break;
2811 }
2812 }
2813
2814 return VINF_SUCCESS;
2815}
2816
2817/**
2818 * Internal: allocate the given number of extents.
2819 */
2820static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2821{
2822 int rc = VINF_SUCCESS;
2823 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2824 if (pImage)
2825 {
2826 for (unsigned i = 0; i < cExtents; i++)
2827 {
2828 pExtents[i].pFile = NULL;
2829 pExtents[i].pszBasename = NULL;
2830 pExtents[i].pszFullname = NULL;
2831 pExtents[i].pGD = NULL;
2832 pExtents[i].pRGD = NULL;
2833 pExtents[i].pDescData = NULL;
2834 pExtents[i].uVersion = 1;
2835 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2836 pExtents[i].uExtent = i;
2837 pExtents[i].pImage = pImage;
2838 }
2839 pImage->pExtents = pExtents;
2840 pImage->cExtents = cExtents;
2841 }
2842 else
2843 rc = VERR_NO_MEMORY;
2844
2845 return rc;
2846}
2847
2848/**
2849 * Internal: Open an image, constructing all necessary data structures.
2850 */
2851static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
2852{
2853 int rc;
2854 uint32_t u32Magic;
2855 PVMDKFILE pFile;
2856 PVMDKEXTENT pExtent;
2857
2858 pImage->uOpenFlags = uOpenFlags;
2859
2860 /* Try to get error interface. */
2861 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
2862 if (pImage->pInterfaceError)
2863 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
2864
2865 /* Try to get async I/O interface. */
2866 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
2867 if (pImage->pInterfaceAsyncIO)
2868 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
2869
2870 /*
2871 * Open the image.
2872 * We don't have to check for asynchronous access because
2873 * we only support raw access and the opened file is a description
2874 * file were no data is stored.
2875 */
2876 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
2877 uOpenFlags & VD_OPEN_FLAGS_READONLY
2878 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
2879 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
2880 if (RT_FAILURE(rc))
2881 {
2882 /* Do NOT signal an appropriate error here, as the VD layer has the
2883 * choice of retrying the open if it failed. */
2884 goto out;
2885 }
2886 pImage->pFile = pFile;
2887
2888 /* Read magic (if present). */
2889 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
2890 if (RT_FAILURE(rc))
2891 {
2892 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
2893 goto out;
2894 }
2895
2896 /* Handle the file according to its magic number. */
2897 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
2898 {
2899 /* It's a hosted single-extent image. */
2900 rc = vmdkCreateExtents(pImage, 1);
2901 if (RT_FAILURE(rc))
2902 goto out;
2903 /* The opened file is passed to the extent. No separate descriptor
2904 * file, so no need to keep anything open for the image. */
2905 pExtent = &pImage->pExtents[0];
2906 pExtent->pFile = pFile;
2907 pImage->pFile = NULL;
2908 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2909 if (!pExtent->pszFullname)
2910 {
2911 rc = VERR_NO_MEMORY;
2912 goto out;
2913 }
2914 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
2915 if (RT_FAILURE(rc))
2916 goto out;
2917
2918 /* As we're dealing with a monolithic image here, there must
2919 * be a descriptor embedded in the image file. */
2920 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
2921 {
2922 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
2923 goto out;
2924 }
2925 /* HACK: extend the descriptor if it is unusually small and it fits in
2926 * the unused space after the image header. Allows opening VMDK files
2927 * with extremely small descriptor in read/write mode. */
2928 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2929 && pExtent->cDescriptorSectors < 3
2930 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2931 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2932 {
2933 pExtent->cDescriptorSectors = 4;
2934 pExtent->fMetaDirty = true;
2935 }
2936 /* Read the descriptor from the extent. */
2937 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2938 if (!pExtent->pDescData)
2939 {
2940 rc = VERR_NO_MEMORY;
2941 goto out;
2942 }
2943 rc = vmdkFileReadAt(pExtent->pFile,
2944 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2945 pExtent->pDescData,
2946 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
2947 AssertRC(rc);
2948 if (RT_FAILURE(rc))
2949 {
2950 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
2951 goto out;
2952 }
2953
2954 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2955 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2956 if (RT_FAILURE(rc))
2957 goto out;
2958
2959 rc = vmdkReadMetaExtent(pImage, pExtent);
2960 if (RT_FAILURE(rc))
2961 goto out;
2962
2963 /* Mark the extent as unclean if opened in read-write mode. */
2964 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
2965 {
2966 pExtent->fUncleanShutdown = true;
2967 pExtent->fMetaDirty = true;
2968 }
2969 }
2970 else
2971 {
2972 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
2973 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
2974 if (!pImage->pDescData)
2975 {
2976 rc = VERR_NO_MEMORY;
2977 goto out;
2978 }
2979
2980 size_t cbRead;
2981 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
2982 pImage->cbDescAlloc, &cbRead);
2983 if (RT_FAILURE(rc))
2984 {
2985 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
2986 goto out;
2987 }
2988 if (cbRead == pImage->cbDescAlloc)
2989 {
2990 /* Likely the read is truncated. Better fail a bit too early
2991 * (normally the descriptor is much smaller than our buffer). */
2992 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
2993 goto out;
2994 }
2995
2996 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
2997 pImage->cbDescAlloc);
2998 if (RT_FAILURE(rc))
2999 goto out;
3000
3001 /*
3002 * We have to check for the asynchronous open flag. The
3003 * extents are parsed and the type of all are known now.
3004 * Check if every extent is either FLAT or ZERO.
3005 */
3006 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3007 {
3008 for (unsigned i = 0; i < pImage->cExtents; i++)
3009 {
3010 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3011
3012 if ( (pExtent->enmType != VMDKETYPE_FLAT)
3013 && (pExtent->enmType != VMDKETYPE_ZERO))
3014 {
3015 /*
3016 * Opened image contains at least one none flat or zero extent.
3017 * Return error but don't set error message as the caller
3018 * has the chance to open in non async I/O mode.
3019 */
3020 rc = VERR_NOT_SUPPORTED;
3021 goto out;
3022 }
3023 }
3024 }
3025
3026 for (unsigned i = 0; i < pImage->cExtents; i++)
3027 {
3028 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3029
3030 if (pExtent->pszBasename)
3031 {
3032 /* Hack to figure out whether the specified name in the
3033 * extent descriptor is absolute. Doesn't always work, but
3034 * should be good enough for now. */
3035 char *pszFullname;
3036 /** @todo implement proper path absolute check. */
3037 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3038 {
3039 pszFullname = RTStrDup(pExtent->pszBasename);
3040 if (!pszFullname)
3041 {
3042 rc = VERR_NO_MEMORY;
3043 goto out;
3044 }
3045 }
3046 else
3047 {
3048 size_t cbDirname;
3049 char *pszDirname = RTStrDup(pImage->pszFilename);
3050 if (!pszDirname)
3051 {
3052 rc = VERR_NO_MEMORY;
3053 goto out;
3054 }
3055 RTPathStripFilename(pszDirname);
3056 cbDirname = strlen(pszDirname);
3057 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3058 RTPATH_SLASH, pExtent->pszBasename);
3059 RTStrFree(pszDirname);
3060 if (RT_FAILURE(rc))
3061 goto out;
3062 }
3063 pExtent->pszFullname = pszFullname;
3064 }
3065 else
3066 pExtent->pszFullname = NULL;
3067
3068 switch (pExtent->enmType)
3069 {
3070 case VMDKETYPE_HOSTED_SPARSE:
3071 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3072 uOpenFlags & VD_OPEN_FLAGS_READONLY
3073 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3074 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3075 if (RT_FAILURE(rc))
3076 {
3077 /* Do NOT signal an appropriate error here, as the VD
3078 * layer has the choice of retrying the open if it
3079 * failed. */
3080 goto out;
3081 }
3082 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3083 if (RT_FAILURE(rc))
3084 goto out;
3085 rc = vmdkReadMetaExtent(pImage, pExtent);
3086 if (RT_FAILURE(rc))
3087 goto out;
3088
3089 /* Mark extent as unclean if opened in read-write mode. */
3090 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3091 {
3092 pExtent->fUncleanShutdown = true;
3093 pExtent->fMetaDirty = true;
3094 }
3095 break;
3096 case VMDKETYPE_FLAT:
3097 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3098 uOpenFlags & VD_OPEN_FLAGS_READONLY
3099 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3100 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3101 if (RT_FAILURE(rc))
3102 {
3103 /* Do NOT signal an appropriate error here, as the VD
3104 * layer has the choice of retrying the open if it
3105 * failed. */
3106 goto out;
3107 }
3108 break;
3109 case VMDKETYPE_ZERO:
3110 /* Nothing to do. */
3111 break;
3112 default:
3113 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3114 }
3115 }
3116 }
3117
3118 /* Make sure this is not reached accidentally with an error status. */
3119 AssertRC(rc);
3120
3121 /* Determine PCHS geometry if not set. */
3122 if (pImage->PCHSGeometry.cCylinders == 0)
3123 {
3124 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3125 / pImage->PCHSGeometry.cHeads
3126 / pImage->PCHSGeometry.cSectors;
3127 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3128 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3129 {
3130 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3131 AssertRC(rc);
3132 }
3133 }
3134
3135 /* Update the image metadata now in case has changed. */
3136 rc = vmdkFlushImage(pImage);
3137 if (RT_FAILURE(rc))
3138 goto out;
3139
3140 /* Figure out a few per-image constants from the extents. */
3141 pImage->cbSize = 0;
3142 for (unsigned i = 0; i < pImage->cExtents; i++)
3143 {
3144 pExtent = &pImage->pExtents[i];
3145 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3146#ifdef VBOX_WITH_VMDK_ESX
3147 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3148#endif /* VBOX_WITH_VMDK_ESX */
3149 )
3150 {
3151 /* Here used to be a check whether the nominal size of an extent
3152 * is a multiple of the grain size. The spec says that this is
3153 * always the case, but unfortunately some files out there in the
3154 * wild violate the spec (e.g. ReactOS 0.3.1). */
3155 }
3156 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3157 }
3158
3159 for (unsigned i = 0; i < pImage->cExtents; i++)
3160 {
3161 pExtent = &pImage->pExtents[i];
3162 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3163 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3164 {
3165 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3166 break;
3167 }
3168 }
3169
3170 rc = vmdkAllocateGrainTableCache(pImage);
3171 if (RT_FAILURE(rc))
3172 goto out;
3173
3174out:
3175 if (RT_FAILURE(rc))
3176 vmdkFreeImage(pImage, false);
3177 return rc;
3178}
3179
3180/**
3181 * Internal: create VMDK images for raw disk/partition access.
3182 */
3183static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3184 uint64_t cbSize)
3185{
3186 int rc = VINF_SUCCESS;
3187 PVMDKEXTENT pExtent;
3188
3189 if (pRaw->fRawDisk)
3190 {
3191 /* Full raw disk access. This requires setting up a descriptor
3192 * file and open the (flat) raw disk. */
3193 rc = vmdkCreateExtents(pImage, 1);
3194 if (RT_FAILURE(rc))
3195 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3196 pExtent = &pImage->pExtents[0];
3197 /* Create raw disk descriptor file. */
3198 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3199 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3200 false);
3201 if (RT_FAILURE(rc))
3202 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3203
3204 /* Set up basename for extent description. Cannot use StrDup. */
3205 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3206 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3207 if (!pszBasename)
3208 return VERR_NO_MEMORY;
3209 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3210 pExtent->pszBasename = pszBasename;
3211 /* For raw disks the full name is identical to the base name. */
3212 pExtent->pszFullname = RTStrDup(pszBasename);
3213 if (!pExtent->pszFullname)
3214 return VERR_NO_MEMORY;
3215 pExtent->enmType = VMDKETYPE_FLAT;
3216 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3217 pExtent->uSectorOffset = 0;
3218 pExtent->enmAccess = VMDKACCESS_READWRITE;
3219 pExtent->fMetaDirty = false;
3220
3221 /* Open flat image, the raw disk. */
3222 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3223 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3224 if (RT_FAILURE(rc))
3225 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3226 }
3227 else
3228 {
3229 /* Raw partition access. This requires setting up a descriptor
3230 * file, write the partition information to a flat extent and
3231 * open all the (flat) raw disk partitions. */
3232
3233 /* First pass over the partitions to determine how many
3234 * extents we need. One partition can require up to 4 extents.
3235 * One to skip over unpartitioned space, one for the
3236 * partitioning data, one to skip over unpartitioned space
3237 * and one for the partition data. */
3238 unsigned cExtents = 0;
3239 uint64_t uStart = 0;
3240 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3241 {
3242 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3243 if (pPart->cbPartitionData)
3244 {
3245 if (uStart > pPart->uPartitionDataStart)
3246 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
3247 else if (uStart != pPart->uPartitionDataStart)
3248 cExtents++;
3249 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3250 cExtents++;
3251 }
3252 if (pPart->cbPartition)
3253 {
3254 if (uStart > pPart->uPartitionStart)
3255 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
3256 else if (uStart != pPart->uPartitionStart)
3257 cExtents++;
3258 uStart = pPart->uPartitionStart + pPart->cbPartition;
3259 cExtents++;
3260 }
3261 }
3262 /* Another extent for filling up the rest of the image. */
3263 if (uStart != cbSize)
3264 cExtents++;
3265
3266 rc = vmdkCreateExtents(pImage, cExtents);
3267 if (RT_FAILURE(rc))
3268 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3269
3270 /* Create raw partition descriptor file. */
3271 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3272 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3273 false);
3274 if (RT_FAILURE(rc))
3275 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3276
3277 /* Create base filename for the partition table extent. */
3278 /** @todo remove fixed buffer without creating memory leaks. */
3279 char pszPartition[1024];
3280 const char *pszBase = RTPathFilename(pImage->pszFilename);
3281 const char *pszExt = RTPathExt(pszBase);
3282 if (pszExt == NULL)
3283 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3284 char *pszBaseBase = RTStrDup(pszBase);
3285 if (!pszBaseBase)
3286 return VERR_NO_MEMORY;
3287 RTPathStripExt(pszBaseBase);
3288 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3289 pszBaseBase, pszExt);
3290 RTStrFree(pszBaseBase);
3291
3292 /* Second pass over the partitions, now define all extents. */
3293 uint64_t uPartOffset = 0;
3294 cExtents = 0;
3295 uStart = 0;
3296 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3297 {
3298 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3299 if (pPart->cbPartitionData)
3300 {
3301 if (uStart != pPart->uPartitionDataStart)
3302 {
3303 pExtent = &pImage->pExtents[cExtents++];
3304 pExtent->pszBasename = NULL;
3305 pExtent->pszFullname = NULL;
3306 pExtent->enmType = VMDKETYPE_ZERO;
3307 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionDataStart - uStart);
3308 pExtent->uSectorOffset = 0;
3309 pExtent->enmAccess = VMDKACCESS_READWRITE;
3310 pExtent->fMetaDirty = false;
3311 }
3312 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3313 pExtent = &pImage->pExtents[cExtents++];
3314 /* Set up basename for extent description. Can't use StrDup. */
3315 size_t cbBasename = strlen(pszPartition) + 1;
3316 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3317 if (!pszBasename)
3318 return VERR_NO_MEMORY;
3319 memcpy(pszBasename, pszPartition, cbBasename);
3320 pExtent->pszBasename = pszBasename;
3321
3322 /* Set up full name for partition extent. */
3323 size_t cbDirname;
3324 char *pszDirname = RTStrDup(pImage->pszFilename);
3325 if (!pszDirname)
3326 return VERR_NO_MEMORY;
3327 RTPathStripFilename(pszDirname);
3328 cbDirname = strlen(pszDirname);
3329 char *pszFullname;
3330 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3331 RTPATH_SLASH, pExtent->pszBasename);
3332 RTStrFree(pszDirname);
3333 if (RT_FAILURE(rc))
3334 return rc;
3335 pExtent->pszFullname = pszFullname;
3336 pExtent->enmType = VMDKETYPE_FLAT;
3337 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3338 pExtent->uSectorOffset = uPartOffset;
3339 pExtent->enmAccess = VMDKACCESS_READWRITE;
3340 pExtent->fMetaDirty = false;
3341
3342 /* Create partition table flat image. */
3343 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3344 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3345 false);
3346 if (RT_FAILURE(rc))
3347 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3348 rc = vmdkFileWriteAt(pExtent->pFile,
3349 VMDK_SECTOR2BYTE(uPartOffset),
3350 pPart->pvPartitionData,
3351 pPart->cbPartitionData, NULL);
3352 if (RT_FAILURE(rc))
3353 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3354 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3355 }
3356 if (pPart->cbPartition)
3357 {
3358 if (uStart != pPart->uPartitionStart)
3359 {
3360 pExtent = &pImage->pExtents[cExtents++];
3361 pExtent->pszBasename = NULL;
3362 pExtent->pszFullname = NULL;
3363 pExtent->enmType = VMDKETYPE_ZERO;
3364 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionStart - uStart);
3365 pExtent->uSectorOffset = 0;
3366 pExtent->enmAccess = VMDKACCESS_READWRITE;
3367 pExtent->fMetaDirty = false;
3368 }
3369 uStart = pPart->uPartitionStart + pPart->cbPartition;
3370 pExtent = &pImage->pExtents[cExtents++];
3371 if (pPart->pszRawDevice)
3372 {
3373 /* Set up basename for extent descr. Can't use StrDup. */
3374 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3375 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3376 if (!pszBasename)
3377 return VERR_NO_MEMORY;
3378 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3379 pExtent->pszBasename = pszBasename;
3380 /* For raw disks full name is identical to base name. */
3381 pExtent->pszFullname = RTStrDup(pszBasename);
3382 if (!pExtent->pszFullname)
3383 return VERR_NO_MEMORY;
3384 pExtent->enmType = VMDKETYPE_FLAT;
3385 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3386 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uPartitionStartOffset);
3387 pExtent->enmAccess = VMDKACCESS_READWRITE;
3388 pExtent->fMetaDirty = false;
3389
3390 /* Open flat image, the raw partition. */
3391 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3392 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3393 false);
3394 if (RT_FAILURE(rc))
3395 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3396 }
3397 else
3398 {
3399 pExtent->pszBasename = NULL;
3400 pExtent->pszFullname = NULL;
3401 pExtent->enmType = VMDKETYPE_ZERO;
3402 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3403 pExtent->uSectorOffset = 0;
3404 pExtent->enmAccess = VMDKACCESS_READWRITE;
3405 pExtent->fMetaDirty = false;
3406 }
3407 }
3408 }
3409 /* Another extent for filling up the rest of the image. */
3410 if (uStart != cbSize)
3411 {
3412 pExtent = &pImage->pExtents[cExtents++];
3413 pExtent->pszBasename = NULL;
3414 pExtent->pszFullname = NULL;
3415 pExtent->enmType = VMDKETYPE_ZERO;
3416 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3417 pExtent->uSectorOffset = 0;
3418 pExtent->enmAccess = VMDKACCESS_READWRITE;
3419 pExtent->fMetaDirty = false;
3420 }
3421 }
3422
3423 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3424 pRaw->fRawDisk ?
3425 "fullDevice" : "partitionedDevice");
3426 if (RT_FAILURE(rc))
3427 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3428 return rc;
3429}
3430
3431/**
3432 * Internal: create a regular (i.e. file-backed) VMDK image.
3433 */
3434static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3435 unsigned uImageFlags,
3436 PFNVMPROGRESS pfnProgress, void *pvUser,
3437 unsigned uPercentStart, unsigned uPercentSpan)
3438{
3439 int rc = VINF_SUCCESS;
3440 unsigned cExtents = 1;
3441 uint64_t cbOffset = 0;
3442 uint64_t cbRemaining = cbSize;
3443
3444 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3445 {
3446 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3447 /* Do proper extent computation: need one smaller extent if the total
3448 * size isn't evenly divisible by the split size. */
3449 if (cbSize % VMDK_2G_SPLIT_SIZE)
3450 cExtents++;
3451 }
3452 rc = vmdkCreateExtents(pImage, cExtents);
3453 if (RT_FAILURE(rc))
3454 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3455
3456 /* Basename strings needed for constructing the extent names. */
3457 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3458 AssertPtr(pszBasenameSubstr);
3459 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3460
3461 /* Create searate descriptor file if necessary. */
3462 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3463 {
3464 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3465 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3466 false);
3467 if (RT_FAILURE(rc))
3468 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3469 }
3470 else
3471 pImage->pFile = NULL;
3472
3473 /* Set up all extents. */
3474 for (unsigned i = 0; i < cExtents; i++)
3475 {
3476 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3477 uint64_t cbExtent = cbRemaining;
3478
3479 /* Set up fullname/basename for extent description. Cannot use StrDup
3480 * for basename, as it is not guaranteed that the memory can be freed
3481 * with RTMemTmpFree, which must be used as in other code paths
3482 * StrDup is not usable. */
3483 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3484 {
3485 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3486 if (!pszBasename)
3487 return VERR_NO_MEMORY;
3488 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3489 pExtent->pszBasename = pszBasename;
3490 }
3491 else
3492 {
3493 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3494 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3495 RTPathStripExt(pszBasenameBase);
3496 char *pszTmp;
3497 size_t cbTmp;
3498 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3499 {
3500 if (cExtents == 1)
3501 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3502 pszBasenameExt);
3503 else
3504 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3505 i+1, pszBasenameExt);
3506 }
3507 else
3508 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3509 pszBasenameExt);
3510 RTStrFree(pszBasenameBase);
3511 if (RT_FAILURE(rc))
3512 return rc;
3513 cbTmp = strlen(pszTmp) + 1;
3514 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3515 if (!pszBasename)
3516 return VERR_NO_MEMORY;
3517 memcpy(pszBasename, pszTmp, cbTmp);
3518 RTStrFree(pszTmp);
3519 pExtent->pszBasename = pszBasename;
3520 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3521 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3522 }
3523 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3524 RTPathStripFilename(pszBasedirectory);
3525 char *pszFullname;
3526 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3527 RTPATH_SLASH, pExtent->pszBasename);
3528 RTStrFree(pszBasedirectory);
3529 if (RT_FAILURE(rc))
3530 return rc;
3531 pExtent->pszFullname = pszFullname;
3532
3533 /* Create file for extent. */
3534 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3535 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3536 false);
3537 if (RT_FAILURE(rc))
3538 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3539 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3540 {
3541 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3542 if (RT_FAILURE(rc))
3543 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3544
3545 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3546 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3547 * file and the guest could complain about an ATA timeout. */
3548
3549 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3550 * Currently supported file systems are ext4 and ocfs2. */
3551
3552 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3553 const size_t cbBuf = 128 * _1K;
3554 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3555 if (!pvBuf)
3556 return VERR_NO_MEMORY;
3557
3558 uint64_t uOff = 0;
3559 /* Write data to all image blocks. */
3560 while (uOff < cbExtent)
3561 {
3562 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3563
3564 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3565 if (RT_FAILURE(rc))
3566 {
3567 RTMemFree(pvBuf);
3568 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3569 }
3570
3571 uOff += cbChunk;
3572
3573 if (pfnProgress)
3574 {
3575 rc = pfnProgress(NULL /* WARNING! pVM=NULL */,
3576 uPercentStart + uOff * uPercentSpan / cbExtent,
3577 pvUser);
3578 if (RT_FAILURE(rc))
3579 {
3580 RTMemFree(pvBuf);
3581 return rc;
3582 }
3583 }
3584 }
3585 RTMemTmpFree(pvBuf);
3586 }
3587
3588 /* Place descriptor file information (where integrated). */
3589 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3590 {
3591 pExtent->uDescriptorSector = 1;
3592 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3593 /* The descriptor is part of the (only) extent. */
3594 pExtent->pDescData = pImage->pDescData;
3595 pImage->pDescData = NULL;
3596 }
3597
3598 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3599 {
3600 uint64_t cSectorsPerGDE, cSectorsPerGD;
3601 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3602 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3603 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3604 pExtent->cGTEntries = 512;
3605 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3606 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3607 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3608 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3609 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3610 {
3611 /* The spec says version is 1 for all VMDKs, but the vast
3612 * majority of streamOptimized VMDKs actually contain
3613 * version 3 - so go with the majority. Both are acepted. */
3614 pExtent->uVersion = 3;
3615 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3616 }
3617 }
3618 else
3619 pExtent->enmType = VMDKETYPE_FLAT;
3620
3621 pExtent->enmAccess = VMDKACCESS_READWRITE;
3622 pExtent->fUncleanShutdown = true;
3623 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3624 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(cbOffset);
3625 pExtent->fMetaDirty = true;
3626
3627 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3628 {
3629 rc = vmdkCreateGrainDirectory(pExtent,
3630 RT_MAX( pExtent->uDescriptorSector
3631 + pExtent->cDescriptorSectors,
3632 1),
3633 true);
3634 if (RT_FAILURE(rc))
3635 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3636 }
3637
3638 if (RT_SUCCESS(rc) && pfnProgress)
3639 pfnProgress(NULL /* WARNING! pVM=NULL */,
3640 uPercentStart + i * uPercentSpan / cExtents,
3641 pvUser);
3642
3643 cbRemaining -= cbExtent;
3644 cbOffset += cbExtent;
3645 }
3646
3647 const char *pszDescType = NULL;
3648 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3649 {
3650 pszDescType = (cExtents == 1)
3651 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3652 }
3653 else
3654 {
3655 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3656 pszDescType = "streamOptimized";
3657 else
3658 {
3659 pszDescType = (cExtents == 1)
3660 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3661 }
3662 }
3663 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3664 pszDescType);
3665 if (RT_FAILURE(rc))
3666 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3667 return rc;
3668}
3669
3670/**
3671 * Internal: The actual code for creating any VMDK variant currently in
3672 * existence on hosted environments.
3673 */
3674static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3675 unsigned uImageFlags, const char *pszComment,
3676 PCPDMMEDIAGEOMETRY pPCHSGeometry,
3677 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3678 PFNVMPROGRESS pfnProgress, void *pvUser,
3679 unsigned uPercentStart, unsigned uPercentSpan)
3680{
3681 int rc;
3682
3683 pImage->uImageFlags = uImageFlags;
3684
3685 /* Try to get error interface. */
3686 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3687 if (pImage->pInterfaceError)
3688 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3689
3690 /* Try to get async I/O interface. */
3691 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
3692 if (pImage->pInterfaceAsyncIO)
3693 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
3694
3695 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3696 &pImage->Descriptor);
3697 if (RT_FAILURE(rc))
3698 {
3699 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
3700 goto out;
3701 }
3702
3703 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3704 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3705 {
3706 /* Raw disk image (includes raw partition). */
3707 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3708 /* As the comment is misused, zap it so that no garbage comment
3709 * is set below. */
3710 pszComment = NULL;
3711 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3712 }
3713 else
3714 {
3715 /* Regular fixed or sparse image (monolithic or split). */
3716 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3717 pfnProgress, pvUser, uPercentStart,
3718 uPercentSpan * 95 / 100);
3719 }
3720
3721 if (RT_FAILURE(rc))
3722 goto out;
3723
3724 if (RT_SUCCESS(rc) && pfnProgress)
3725 pfnProgress(NULL /* WARNING! pVM=NULL */,
3726 uPercentStart + uPercentSpan * 98 / 100, pvUser);
3727
3728 pImage->cbSize = cbSize;
3729
3730 for (unsigned i = 0; i < pImage->cExtents; i++)
3731 {
3732 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3733
3734 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3735 pExtent->cNominalSectors, pExtent->enmType,
3736 pExtent->pszBasename, pExtent->uSectorOffset);
3737 if (RT_FAILURE(rc))
3738 {
3739 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3740 goto out;
3741 }
3742 }
3743 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
3744
3745 if ( pPCHSGeometry->cCylinders != 0
3746 && pPCHSGeometry->cHeads != 0
3747 && pPCHSGeometry->cSectors != 0)
3748 {
3749 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
3750 if (RT_FAILURE(rc))
3751 goto out;
3752 }
3753 if ( pLCHSGeometry->cCylinders != 0
3754 && pLCHSGeometry->cHeads != 0
3755 && pLCHSGeometry->cSectors != 0)
3756 {
3757 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
3758 if (RT_FAILURE(rc))
3759 goto out;
3760 }
3761
3762 pImage->LCHSGeometry = *pLCHSGeometry;
3763 pImage->PCHSGeometry = *pPCHSGeometry;
3764
3765 pImage->ImageUuid = *pUuid;
3766 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3767 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3768 if (RT_FAILURE(rc))
3769 {
3770 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3771 goto out;
3772 }
3773 RTUuidClear(&pImage->ParentUuid);
3774 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3775 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3776 if (RT_FAILURE(rc))
3777 {
3778 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3779 goto out;
3780 }
3781 RTUuidClear(&pImage->ModificationUuid);
3782 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3783 VMDK_DDB_MODIFICATION_UUID,
3784 &pImage->ModificationUuid);
3785 if (RT_FAILURE(rc))
3786 {
3787 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3788 goto out;
3789 }
3790 RTUuidClear(&pImage->ParentModificationUuid);
3791 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3792 VMDK_DDB_PARENT_MODIFICATION_UUID,
3793 &pImage->ParentModificationUuid);
3794 if (RT_FAILURE(rc))
3795 {
3796 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3797 goto out;
3798 }
3799
3800 rc = vmdkAllocateGrainTableCache(pImage);
3801 if (RT_FAILURE(rc))
3802 goto out;
3803
3804 rc = vmdkSetImageComment(pImage, pszComment);
3805 if (RT_FAILURE(rc))
3806 {
3807 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
3808 goto out;
3809 }
3810
3811 if (RT_SUCCESS(rc) && pfnProgress)
3812 pfnProgress(NULL /* WARNING! pVM=NULL */,
3813 uPercentStart + uPercentSpan * 99 / 100, pvUser);
3814
3815 rc = vmdkFlushImage(pImage);
3816
3817out:
3818 if (RT_SUCCESS(rc) && pfnProgress)
3819 pfnProgress(NULL /* WARNING! pVM=NULL */,
3820 uPercentStart + uPercentSpan, pvUser);
3821
3822 if (RT_FAILURE(rc))
3823 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
3824 return rc;
3825}
3826
3827/**
3828 * Internal: Update image comment.
3829 */
3830static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
3831{
3832 char *pszCommentEncoded;
3833 if (pszComment)
3834 {
3835 pszCommentEncoded = vmdkEncodeString(pszComment);
3836 if (!pszCommentEncoded)
3837 return VERR_NO_MEMORY;
3838 }
3839 else
3840 pszCommentEncoded = NULL;
3841 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
3842 "ddb.comment", pszCommentEncoded);
3843 if (pszComment)
3844 RTStrFree(pszCommentEncoded);
3845 if (RT_FAILURE(rc))
3846 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
3847 return VINF_SUCCESS;
3848}
3849
3850/**
3851 * Internal. Free all allocated space for representing an image, and optionally
3852 * delete the image from disk.
3853 */
3854static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
3855{
3856 AssertPtr(pImage);
3857
3858 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3859 {
3860 /* Mark all extents as clean. */
3861 for (unsigned i = 0; i < pImage->cExtents; i++)
3862 {
3863 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
3864#ifdef VBOX_WITH_VMDK_ESX
3865 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
3866#endif /* VBOX_WITH_VMDK_ESX */
3867 )
3868 && pImage->pExtents[i].fUncleanShutdown)
3869 {
3870 pImage->pExtents[i].fUncleanShutdown = false;
3871 pImage->pExtents[i].fMetaDirty = true;
3872 }
3873 }
3874 }
3875 (void)vmdkFlushImage(pImage);
3876
3877 if (pImage->pExtents != NULL)
3878 {
3879 for (unsigned i = 0 ; i < pImage->cExtents; i++)
3880 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
3881 RTMemFree(pImage->pExtents);
3882 pImage->pExtents = NULL;
3883 }
3884 pImage->cExtents = 0;
3885 if (pImage->pFile != NULL)
3886 vmdkFileClose(pImage, &pImage->pFile, fDelete);
3887 vmdkFileCheckAllClose(pImage);
3888 if (pImage->pGTCache)
3889 {
3890 RTMemFree(pImage->pGTCache);
3891 pImage->pGTCache = NULL;
3892 }
3893 if (pImage->pDescData)
3894 {
3895 RTMemFree(pImage->pDescData);
3896 pImage->pDescData = NULL;
3897 }
3898}
3899
3900/**
3901 * Internal. Flush image data (and metadata) to disk.
3902 */
3903static int vmdkFlushImage(PVMDKIMAGE pImage)
3904{
3905 PVMDKEXTENT pExtent;
3906 int rc = VINF_SUCCESS;
3907
3908 /* Update descriptor if changed. */
3909 if (pImage->Descriptor.fDirty)
3910 {
3911 rc = vmdkWriteDescriptor(pImage);
3912 if (RT_FAILURE(rc))
3913 goto out;
3914 }
3915
3916 for (unsigned i = 0; i < pImage->cExtents; i++)
3917 {
3918 pExtent = &pImage->pExtents[i];
3919 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
3920 {
3921 switch (pExtent->enmType)
3922 {
3923 case VMDKETYPE_HOSTED_SPARSE:
3924 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
3925 if (RT_FAILURE(rc))
3926 goto out;
3927 if (pExtent->fFooter)
3928 {
3929 uint64_t cbSize;
3930 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
3931 if (RT_FAILURE(rc))
3932 goto out;
3933 cbSize = RT_ALIGN_64(cbSize, 512);
3934 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
3935 if (RT_FAILURE(rc))
3936 goto out;
3937 }
3938 break;
3939#ifdef VBOX_WITH_VMDK_ESX
3940 case VMDKETYPE_ESX_SPARSE:
3941 /** @todo update the header. */
3942 break;
3943#endif /* VBOX_WITH_VMDK_ESX */
3944 case VMDKETYPE_FLAT:
3945 /* Nothing to do. */
3946 break;
3947 case VMDKETYPE_ZERO:
3948 default:
3949 AssertMsgFailed(("extent with type %d marked as dirty\n",
3950 pExtent->enmType));
3951 break;
3952 }
3953 }
3954 switch (pExtent->enmType)
3955 {
3956 case VMDKETYPE_HOSTED_SPARSE:
3957#ifdef VBOX_WITH_VMDK_ESX
3958 case VMDKETYPE_ESX_SPARSE:
3959#endif /* VBOX_WITH_VMDK_ESX */
3960 case VMDKETYPE_FLAT:
3961 /** @todo implement proper path absolute check. */
3962 if ( pExtent->pFile != NULL
3963 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3964 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
3965 rc = vmdkFileFlush(pExtent->pFile);
3966 break;
3967 case VMDKETYPE_ZERO:
3968 /* No need to do anything for this extent. */
3969 break;
3970 default:
3971 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
3972 break;
3973 }
3974 }
3975
3976out:
3977 return rc;
3978}
3979
3980/**
3981 * Internal. Find extent corresponding to the sector number in the disk.
3982 */
3983static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
3984 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
3985{
3986 PVMDKEXTENT pExtent = NULL;
3987 int rc = VINF_SUCCESS;
3988
3989 for (unsigned i = 0; i < pImage->cExtents; i++)
3990 {
3991 if (offSector < pImage->pExtents[i].cNominalSectors)
3992 {
3993 pExtent = &pImage->pExtents[i];
3994 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
3995 break;
3996 }
3997 offSector -= pImage->pExtents[i].cNominalSectors;
3998 }
3999
4000 if (pExtent)
4001 *ppExtent = pExtent;
4002 else
4003 rc = VERR_IO_SECTOR_NOT_FOUND;
4004
4005 return rc;
4006}
4007
4008/**
4009 * Internal. Hash function for placing the grain table hash entries.
4010 */
4011static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4012 unsigned uExtent)
4013{
4014 /** @todo this hash function is quite simple, maybe use a better one which
4015 * scrambles the bits better. */
4016 return (uSector + uExtent) % pCache->cEntries;
4017}
4018
4019/**
4020 * Internal. Get sector number in the extent file from the relative sector
4021 * number in the extent.
4022 */
4023static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4024 uint64_t uSector, uint64_t *puExtentSector)
4025{
4026 uint64_t uGDIndex, uGTSector, uGTBlock;
4027 uint32_t uGTHash, uGTBlockIndex;
4028 PVMDKGTCACHEENTRY pGTCacheEntry;
4029 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4030 int rc;
4031
4032 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4033 if (uGDIndex >= pExtent->cGDEntries)
4034 return VERR_OUT_OF_RANGE;
4035 uGTSector = pExtent->pGD[uGDIndex];
4036 if (!uGTSector)
4037 {
4038 /* There is no grain table referenced by this grain directory
4039 * entry. So there is absolutely no data in this area. */
4040 *puExtentSector = 0;
4041 return VINF_SUCCESS;
4042 }
4043
4044 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4045 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4046 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4047 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4048 || pGTCacheEntry->uGTBlock != uGTBlock)
4049 {
4050 /* Cache miss, fetch data from disk. */
4051 rc = vmdkFileReadAt(pExtent->pFile,
4052 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4053 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4054 if (RT_FAILURE(rc))
4055 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4056 pGTCacheEntry->uExtent = pExtent->uExtent;
4057 pGTCacheEntry->uGTBlock = uGTBlock;
4058 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4059 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4060 }
4061 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4062 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4063 if (uGrainSector)
4064 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4065 else
4066 *puExtentSector = 0;
4067 return VINF_SUCCESS;
4068}
4069
4070/**
4071 * Internal. Allocates a new grain table (if necessary), writes the grain
4072 * and updates the grain table. The cache is also updated by this operation.
4073 * This is separate from vmdkGetSector, because that should be as fast as
4074 * possible. Most code from vmdkGetSector also appears here.
4075 */
4076static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4077 uint64_t uSector, const void *pvBuf,
4078 uint64_t cbWrite)
4079{
4080 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4081 uint64_t cbExtentSize;
4082 uint32_t uGTHash, uGTBlockIndex;
4083 PVMDKGTCACHEENTRY pGTCacheEntry;
4084 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4085 int rc;
4086
4087 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4088 if (uGDIndex >= pExtent->cGDEntries)
4089 return VERR_OUT_OF_RANGE;
4090 uGTSector = pExtent->pGD[uGDIndex];
4091 if (pExtent->pRGD)
4092 uRGTSector = pExtent->pRGD[uGDIndex];
4093 else
4094 uRGTSector = 0; /**< avoid compiler warning */
4095 if (!uGTSector)
4096 {
4097 /* There is no grain table referenced by this grain directory
4098 * entry. So there is absolutely no data in this area. Allocate
4099 * a new grain table and put the reference to it in the GDs. */
4100 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4101 if (RT_FAILURE(rc))
4102 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4103 Assert(!(cbExtentSize % 512));
4104 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4105 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4106 /* For writable streamOptimized extents the final sector is the
4107 * end-of-stream marker. Will be re-added after the grain table.
4108 * If the file has a footer it also will be re-added before EOS. */
4109 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4110 {
4111 uint64_t uEOSOff = 0;
4112 uGTSector--;
4113 if (pExtent->fFooter)
4114 {
4115 uGTSector--;
4116 uEOSOff = 512;
4117 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4118 if (RT_FAILURE(rc))
4119 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4120 }
4121 pExtent->uLastGrainSector = 0;
4122 uint8_t aEOS[512];
4123 memset(aEOS, '\0', sizeof(aEOS));
4124 rc = vmdkFileWriteAt(pExtent->pFile,
4125 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4126 aEOS, sizeof(aEOS), NULL);
4127 if (RT_FAILURE(rc))
4128 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4129 }
4130 /* Normally the grain table is preallocated for hosted sparse extents
4131 * that support more than 32 bit sector numbers. So this shouldn't
4132 * ever happen on a valid extent. */
4133 if (uGTSector > UINT32_MAX)
4134 return VERR_VD_VMDK_INVALID_HEADER;
4135 /* Write grain table by writing the required number of grain table
4136 * cache chunks. Avoids dynamic memory allocation, but is a bit
4137 * slower. But as this is a pretty infrequently occurring case it
4138 * should be acceptable. */
4139 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4140 for (unsigned i = 0;
4141 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4142 i++)
4143 {
4144 rc = vmdkFileWriteAt(pExtent->pFile,
4145 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4146 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4147 if (RT_FAILURE(rc))
4148 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4149 }
4150 if (pExtent->pRGD)
4151 {
4152 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4153 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4154 if (RT_FAILURE(rc))
4155 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4156 Assert(!(cbExtentSize % 512));
4157 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4158 /* For writable streamOptimized extents the final sector is the
4159 * end-of-stream marker. Will be re-added after the grain table.
4160 * If the file has a footer it also will be re-added before EOS. */
4161 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4162 {
4163 uint64_t uEOSOff = 0;
4164 uRGTSector--;
4165 if (pExtent->fFooter)
4166 {
4167 uRGTSector--;
4168 uEOSOff = 512;
4169 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4170 if (RT_FAILURE(rc))
4171 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4172 }
4173 pExtent->uLastGrainSector = 0;
4174 uint8_t aEOS[512];
4175 memset(aEOS, '\0', sizeof(aEOS));
4176 rc = vmdkFileWriteAt(pExtent->pFile,
4177 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4178 aEOS, sizeof(aEOS), NULL);
4179 if (RT_FAILURE(rc))
4180 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4181 }
4182 /* Normally the redundant grain table is preallocated for hosted
4183 * sparse extents that support more than 32 bit sector numbers. So
4184 * this shouldn't ever happen on a valid extent. */
4185 if (uRGTSector > UINT32_MAX)
4186 return VERR_VD_VMDK_INVALID_HEADER;
4187 /* Write backup grain table by writing the required number of grain
4188 * table cache chunks. Avoids dynamic memory allocation, but is a
4189 * bit slower. But as this is a pretty infrequently occurring case
4190 * it should be acceptable. */
4191 for (unsigned i = 0;
4192 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4193 i++)
4194 {
4195 rc = vmdkFileWriteAt(pExtent->pFile,
4196 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4197 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4198 if (RT_FAILURE(rc))
4199 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4200 }
4201 }
4202
4203 /* Update the grain directory on disk (doing it before writing the
4204 * grain table will result in a garbled extent if the operation is
4205 * aborted for some reason. Otherwise the worst that can happen is
4206 * some unused sectors in the extent. */
4207 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4208 rc = vmdkFileWriteAt(pExtent->pFile,
4209 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4210 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4211 if (RT_FAILURE(rc))
4212 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4213 if (pExtent->pRGD)
4214 {
4215 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4216 rc = vmdkFileWriteAt(pExtent->pFile,
4217 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4218 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4219 if (RT_FAILURE(rc))
4220 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4221 }
4222
4223 /* As the final step update the in-memory copy of the GDs. */
4224 pExtent->pGD[uGDIndex] = uGTSector;
4225 if (pExtent->pRGD)
4226 pExtent->pRGD[uGDIndex] = uRGTSector;
4227 }
4228
4229 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4230 if (RT_FAILURE(rc))
4231 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4232 Assert(!(cbExtentSize % 512));
4233
4234 /* Write the data. Always a full grain, or we're in big trouble. */
4235 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4236 {
4237 /* For streamOptimized extents this is a little more difficult, as the
4238 * cached data also needs to be updated, to handle updating the last
4239 * written block properly. Also we're trying to avoid unnecessary gaps.
4240 * Additionally the end-of-stream marker needs to be written. */
4241 if (!pExtent->uLastGrainSector)
4242 {
4243 cbExtentSize -= 512;
4244 if (pExtent->fFooter)
4245 cbExtentSize -= 512;
4246 }
4247 else
4248 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4249 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4250 uint32_t cbGrain = 0;
4251 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4252 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4253 if (RT_FAILURE(rc))
4254 {
4255 pExtent->uGrainSector = 0;
4256 pExtent->uLastGrainSector = 0;
4257 AssertRC(rc);
4258 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4259 }
4260 cbGrain = RT_ALIGN(cbGrain, 512);
4261 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4262 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4263 pExtent->cbLastGrainWritten = cbGrain;
4264 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4265 pExtent->uGrainSector = uSector;
4266
4267 uint64_t uEOSOff = 0;
4268 if (pExtent->fFooter)
4269 {
4270 uEOSOff = 512;
4271 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4272 if (RT_FAILURE(rc))
4273 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4274 }
4275 uint8_t aEOS[512];
4276 memset(aEOS, '\0', sizeof(aEOS));
4277 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4278 aEOS, sizeof(aEOS), NULL);
4279 if (RT_FAILURE(rc))
4280 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4281 }
4282 else
4283 {
4284 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4285 if (RT_FAILURE(rc))
4286 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4287 }
4288
4289 /* Update the grain table (and the cache). */
4290 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4291 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4292 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4293 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4294 || pGTCacheEntry->uGTBlock != uGTBlock)
4295 {
4296 /* Cache miss, fetch data from disk. */
4297 rc = vmdkFileReadAt(pExtent->pFile,
4298 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4299 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4300 if (RT_FAILURE(rc))
4301 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4302 pGTCacheEntry->uExtent = pExtent->uExtent;
4303 pGTCacheEntry->uGTBlock = uGTBlock;
4304 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4305 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4306 }
4307 else
4308 {
4309 /* Cache hit. Convert grain table block back to disk format, otherwise
4310 * the code below will write garbage for all but the updated entry. */
4311 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4312 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4313 }
4314 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4315 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4316 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4317 /* Update grain table on disk. */
4318 rc = vmdkFileWriteAt(pExtent->pFile,
4319 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4320 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4321 if (RT_FAILURE(rc))
4322 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4323 if (pExtent->pRGD)
4324 {
4325 /* Update backup grain table on disk. */
4326 rc = vmdkFileWriteAt(pExtent->pFile,
4327 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4328 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4329 if (RT_FAILURE(rc))
4330 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4331 }
4332#ifdef VBOX_WITH_VMDK_ESX
4333 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4334 {
4335 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4336 pExtent->fMetaDirty = true;
4337 }
4338#endif /* VBOX_WITH_VMDK_ESX */
4339 return rc;
4340}
4341
4342
4343/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
4344static int vmdkCheckIfValid(const char *pszFilename)
4345{
4346 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
4347 int rc = VINF_SUCCESS;
4348 PVMDKIMAGE pImage;
4349
4350 if ( !pszFilename
4351 || !*pszFilename
4352 || strchr(pszFilename, '"'))
4353 {
4354 rc = VERR_INVALID_PARAMETER;
4355 goto out;
4356 }
4357
4358 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4359 if (!pImage)
4360 {
4361 rc = VERR_NO_MEMORY;
4362 goto out;
4363 }
4364 pImage->pszFilename = pszFilename;
4365 pImage->pFile = NULL;
4366 pImage->pExtents = NULL;
4367 pImage->pFiles = NULL;
4368 pImage->pGTCache = NULL;
4369 pImage->pDescData = NULL;
4370 pImage->pVDIfsDisk = NULL;
4371 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
4372 * much as possible in vmdkOpenImage. */
4373 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
4374 vmdkFreeImage(pImage, false);
4375 RTMemFree(pImage);
4376
4377out:
4378 LogFlowFunc(("returns %Rrc\n", rc));
4379 return rc;
4380}
4381
4382/** @copydoc VBOXHDDBACKEND::pfnOpen */
4383static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
4384 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
4385 void **ppBackendData)
4386{
4387 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
4388 int rc;
4389 PVMDKIMAGE pImage;
4390
4391 /* Check open flags. All valid flags are supported. */
4392 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4393 {
4394 rc = VERR_INVALID_PARAMETER;
4395 goto out;
4396 }
4397
4398 /* Check remaining arguments. */
4399 if ( !VALID_PTR(pszFilename)
4400 || !*pszFilename
4401 || strchr(pszFilename, '"'))
4402 {
4403 rc = VERR_INVALID_PARAMETER;
4404 goto out;
4405 }
4406
4407
4408 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4409 if (!pImage)
4410 {
4411 rc = VERR_NO_MEMORY;
4412 goto out;
4413 }
4414 pImage->pszFilename = pszFilename;
4415 pImage->pFile = NULL;
4416 pImage->pExtents = NULL;
4417 pImage->pFiles = NULL;
4418 pImage->pGTCache = NULL;
4419 pImage->pDescData = NULL;
4420 pImage->pVDIfsDisk = pVDIfsDisk;
4421
4422 rc = vmdkOpenImage(pImage, uOpenFlags);
4423 if (RT_SUCCESS(rc))
4424 *ppBackendData = pImage;
4425
4426out:
4427 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4428 return rc;
4429}
4430
4431/** @copydoc VBOXHDDBACKEND::pfnCreate */
4432static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
4433 unsigned uImageFlags, const char *pszComment,
4434 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4435 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4436 unsigned uOpenFlags, unsigned uPercentStart,
4437 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
4438 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
4439 void **ppBackendData)
4440{
4441 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
4442 int rc;
4443 PVMDKIMAGE pImage;
4444
4445 PFNVMPROGRESS pfnProgress = NULL;
4446 void *pvUser = NULL;
4447 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
4448 VDINTERFACETYPE_PROGRESS);
4449 PVDINTERFACEPROGRESS pCbProgress = NULL;
4450 if (pIfProgress)
4451 {
4452 pCbProgress = VDGetInterfaceProgress(pIfProgress);
4453 pfnProgress = pCbProgress->pfnProgress;
4454 pvUser = pIfProgress->pvUser;
4455 }
4456
4457 /* Check open flags. All valid flags are supported. */
4458 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4459 {
4460 rc = VERR_INVALID_PARAMETER;
4461 goto out;
4462 }
4463
4464 /* Check remaining arguments. */
4465 if ( !VALID_PTR(pszFilename)
4466 || !*pszFilename
4467 || strchr(pszFilename, '"')
4468 || !VALID_PTR(pPCHSGeometry)
4469 || !VALID_PTR(pLCHSGeometry)
4470 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4471 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
4472 {
4473 rc = VERR_INVALID_PARAMETER;
4474 goto out;
4475 }
4476
4477 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4478 if (!pImage)
4479 {
4480 rc = VERR_NO_MEMORY;
4481 goto out;
4482 }
4483 pImage->pszFilename = pszFilename;
4484 pImage->pFile = NULL;
4485 pImage->pExtents = NULL;
4486 pImage->pFiles = NULL;
4487 pImage->pGTCache = NULL;
4488 pImage->pDescData = NULL;
4489 pImage->pVDIfsDisk = NULL;
4490 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
4491 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
4492 if (!pImage->pDescData)
4493 {
4494 rc = VERR_NO_MEMORY;
4495 goto out;
4496 }
4497
4498 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
4499 pPCHSGeometry, pLCHSGeometry, pUuid,
4500 pfnProgress, pvUser, uPercentStart, uPercentSpan);
4501 if (RT_SUCCESS(rc))
4502 {
4503 /* So far the image is opened in read/write mode. Make sure the
4504 * image is opened in read-only mode if the caller requested that. */
4505 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
4506 {
4507 vmdkFreeImage(pImage, false);
4508 rc = vmdkOpenImage(pImage, uOpenFlags);
4509 if (RT_FAILURE(rc))
4510 goto out;
4511 }
4512 *ppBackendData = pImage;
4513 }
4514 else
4515 {
4516 RTMemFree(pImage->pDescData);
4517 RTMemFree(pImage);
4518 }
4519
4520out:
4521 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4522 return rc;
4523}
4524
4525/**
4526 * Replaces a fragment of a string with the specified string.
4527 *
4528 * @returns Pointer to the allocated UTF-8 string.
4529 * @param pszWhere UTF-8 string to search in.
4530 * @param pszWhat UTF-8 string to search for.
4531 * @param pszByWhat UTF-8 string to replace the found string with.
4532 */
4533static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
4534{
4535 AssertPtr(pszWhere);
4536 AssertPtr(pszWhat);
4537 AssertPtr(pszByWhat);
4538 const char *pszFoundStr = strstr(pszWhere, pszWhat);
4539 if (!pszFoundStr)
4540 return NULL;
4541 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
4542 char *pszNewStr = (char *)RTMemAlloc(cFinal);
4543 if (pszNewStr)
4544 {
4545 char *pszTmp = pszNewStr;
4546 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
4547 pszTmp += pszFoundStr - pszWhere;
4548 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
4549 pszTmp += strlen(pszByWhat);
4550 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
4551 }
4552 return pszNewStr;
4553}
4554
4555/** @copydoc VBOXHDDBACKEND::pfnRename */
4556static int vmdkRename(void *pBackendData, const char *pszFilename)
4557{
4558 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
4559
4560 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4561 int rc = VINF_SUCCESS;
4562 char **apszOldName = NULL;
4563 char **apszNewName = NULL;
4564 char **apszNewLines = NULL;
4565 char *pszOldDescName = NULL;
4566 bool fImageFreed = false;
4567 bool fEmbeddedDesc = false;
4568 unsigned cExtents = pImage->cExtents;
4569 char *pszNewBaseName = NULL;
4570 char *pszOldBaseName = NULL;
4571 char *pszNewFullName = NULL;
4572 char *pszOldFullName = NULL;
4573 const char *pszOldImageName;
4574 unsigned i, line;
4575 VMDKDESCRIPTOR DescriptorCopy;
4576 VMDKEXTENT ExtentCopy;
4577
4578 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
4579
4580 /* Check arguments. */
4581 if ( !pImage
4582 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
4583 || !VALID_PTR(pszFilename)
4584 || !*pszFilename)
4585 {
4586 rc = VERR_INVALID_PARAMETER;
4587 goto out;
4588 }
4589
4590 /*
4591 * Allocate an array to store both old and new names of renamed files
4592 * in case we have to roll back the changes. Arrays are initialized
4593 * with zeros. We actually save stuff when and if we change it.
4594 */
4595 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4596 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4597 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
4598 if (!apszOldName || !apszNewName || !apszNewLines)
4599 {
4600 rc = VERR_NO_MEMORY;
4601 goto out;
4602 }
4603
4604 /* Save the descriptor size and position. */
4605 if (pImage->pDescData)
4606 {
4607 /* Separate descriptor file. */
4608 fEmbeddedDesc = false;
4609 }
4610 else
4611 {
4612 /* Embedded descriptor file. */
4613 ExtentCopy = pImage->pExtents[0];
4614 fEmbeddedDesc = true;
4615 }
4616 /* Save the descriptor content. */
4617 DescriptorCopy.cLines = pImage->Descriptor.cLines;
4618 for (i = 0; i < DescriptorCopy.cLines; i++)
4619 {
4620 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
4621 if (!DescriptorCopy.aLines[i])
4622 {
4623 rc = VERR_NO_MEMORY;
4624 goto out;
4625 }
4626 }
4627
4628 /* Prepare both old and new base names used for string replacement. */
4629 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
4630 RTPathStripExt(pszNewBaseName);
4631 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
4632 RTPathStripExt(pszOldBaseName);
4633 /* Prepare both old and new full names used for string replacement. */
4634 pszNewFullName = RTStrDup(pszFilename);
4635 RTPathStripExt(pszNewFullName);
4636 pszOldFullName = RTStrDup(pImage->pszFilename);
4637 RTPathStripExt(pszOldFullName);
4638
4639 /* --- Up to this point we have not done any damage yet. --- */
4640
4641 /* Save the old name for easy access to the old descriptor file. */
4642 pszOldDescName = RTStrDup(pImage->pszFilename);
4643 /* Save old image name. */
4644 pszOldImageName = pImage->pszFilename;
4645
4646 /* Update the descriptor with modified extent names. */
4647 for (i = 0, line = pImage->Descriptor.uFirstExtent;
4648 i < cExtents;
4649 i++, line = pImage->Descriptor.aNextLines[line])
4650 {
4651 /* Assume that vmdkStrReplace will fail. */
4652 rc = VERR_NO_MEMORY;
4653 /* Update the descriptor. */
4654 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
4655 pszOldBaseName, pszNewBaseName);
4656 if (!apszNewLines[i])
4657 goto rollback;
4658 pImage->Descriptor.aLines[line] = apszNewLines[i];
4659 }
4660 /* Make sure the descriptor gets written back. */
4661 pImage->Descriptor.fDirty = true;
4662 /* Flush the descriptor now, in case it is embedded. */
4663 (void)vmdkFlushImage(pImage);
4664
4665 /* Close and rename/move extents. */
4666 for (i = 0; i < cExtents; i++)
4667 {
4668 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4669 /* Compose new name for the extent. */
4670 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
4671 pszOldFullName, pszNewFullName);
4672 if (!apszNewName[i])
4673 goto rollback;
4674 /* Close the extent file. */
4675 vmdkFileClose(pImage, &pExtent->pFile, false);
4676 /* Rename the extent file. */
4677 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
4678 if (RT_FAILURE(rc))
4679 goto rollback;
4680 /* Remember the old name. */
4681 apszOldName[i] = RTStrDup(pExtent->pszFullname);
4682 }
4683 /* Release all old stuff. */
4684 vmdkFreeImage(pImage, false);
4685
4686 fImageFreed = true;
4687
4688 /* Last elements of new/old name arrays are intended for
4689 * storing descriptor's names.
4690 */
4691 apszNewName[cExtents] = RTStrDup(pszFilename);
4692 /* Rename the descriptor file if it's separate. */
4693 if (!fEmbeddedDesc)
4694 {
4695 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
4696 if (RT_FAILURE(rc))
4697 goto rollback;
4698 /* Save old name only if we may need to change it back. */
4699 apszOldName[cExtents] = RTStrDup(pszFilename);
4700 }
4701
4702 /* Update pImage with the new information. */
4703 pImage->pszFilename = pszFilename;
4704
4705 /* Open the new image. */
4706 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4707 if (RT_SUCCESS(rc))
4708 goto out;
4709
4710rollback:
4711 /* Roll back all changes in case of failure. */
4712 if (RT_FAILURE(rc))
4713 {
4714 int rrc;
4715 if (!fImageFreed)
4716 {
4717 /*
4718 * Some extents may have been closed, close the rest. We will
4719 * re-open the whole thing later.
4720 */
4721 vmdkFreeImage(pImage, false);
4722 }
4723 /* Rename files back. */
4724 for (i = 0; i <= cExtents; i++)
4725 {
4726 if (apszOldName[i])
4727 {
4728 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
4729 AssertRC(rrc);
4730 }
4731 }
4732 /* Restore the old descriptor. */
4733 PVMDKFILE pFile;
4734 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
4735 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
4736 AssertRC(rrc);
4737 if (fEmbeddedDesc)
4738 {
4739 ExtentCopy.pFile = pFile;
4740 pImage->pExtents = &ExtentCopy;
4741 }
4742 else
4743 {
4744 /* Shouldn't be null for separate descriptor.
4745 * There will be no access to the actual content.
4746 */
4747 pImage->pDescData = pszOldDescName;
4748 pImage->pFile = pFile;
4749 }
4750 pImage->Descriptor = DescriptorCopy;
4751 vmdkWriteDescriptor(pImage);
4752 vmdkFileClose(pImage, &pFile, false);
4753 /* Get rid of the stuff we implanted. */
4754 pImage->pExtents = NULL;
4755 pImage->pFile = NULL;
4756 pImage->pDescData = NULL;
4757 /* Re-open the image back. */
4758 pImage->pszFilename = pszOldImageName;
4759 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4760 AssertRC(rrc);
4761 }
4762
4763out:
4764 for (i = 0; i < DescriptorCopy.cLines; i++)
4765 if (DescriptorCopy.aLines[i])
4766 RTStrFree(DescriptorCopy.aLines[i]);
4767 if (apszOldName)
4768 {
4769 for (i = 0; i <= cExtents; i++)
4770 if (apszOldName[i])
4771 RTStrFree(apszOldName[i]);
4772 RTMemTmpFree(apszOldName);
4773 }
4774 if (apszNewName)
4775 {
4776 for (i = 0; i <= cExtents; i++)
4777 if (apszNewName[i])
4778 RTStrFree(apszNewName[i]);
4779 RTMemTmpFree(apszNewName);
4780 }
4781 if (apszNewLines)
4782 {
4783 for (i = 0; i < cExtents; i++)
4784 if (apszNewLines[i])
4785 RTStrFree(apszNewLines[i]);
4786 RTMemTmpFree(apszNewLines);
4787 }
4788 if (pszOldDescName)
4789 RTStrFree(pszOldDescName);
4790 if (pszOldBaseName)
4791 RTStrFree(pszOldBaseName);
4792 if (pszNewBaseName)
4793 RTStrFree(pszNewBaseName);
4794 if (pszOldFullName)
4795 RTStrFree(pszOldFullName);
4796 if (pszNewFullName)
4797 RTStrFree(pszNewFullName);
4798 LogFlowFunc(("returns %Rrc\n", rc));
4799 return rc;
4800}
4801
4802/** @copydoc VBOXHDDBACKEND::pfnClose */
4803static int vmdkClose(void *pBackendData, bool fDelete)
4804{
4805 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
4806 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4807 int rc = VINF_SUCCESS;
4808
4809 /* Freeing a never allocated image (e.g. because the open failed) is
4810 * not signalled as an error. After all nothing bad happens. */
4811 if (pImage)
4812 {
4813 vmdkFreeImage(pImage, fDelete);
4814 RTMemFree(pImage);
4815 }
4816
4817 LogFlowFunc(("returns %Rrc\n", rc));
4818 return rc;
4819}
4820
4821/** @copydoc VBOXHDDBACKEND::pfnRead */
4822static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
4823 size_t cbToRead, size_t *pcbActuallyRead)
4824{
4825 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
4826 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4827 PVMDKEXTENT pExtent;
4828 uint64_t uSectorExtentRel;
4829 uint64_t uSectorExtentAbs;
4830 int rc;
4831
4832 AssertPtr(pImage);
4833 Assert(uOffset % 512 == 0);
4834 Assert(cbToRead % 512 == 0);
4835
4836 if ( uOffset + cbToRead > pImage->cbSize
4837 || cbToRead == 0)
4838 {
4839 rc = VERR_INVALID_PARAMETER;
4840 goto out;
4841 }
4842
4843 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
4844 &pExtent, &uSectorExtentRel);
4845 if (RT_FAILURE(rc))
4846 goto out;
4847
4848 /* Check access permissions as defined in the extent descriptor. */
4849 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
4850 {
4851 rc = VERR_VD_VMDK_INVALID_STATE;
4852 goto out;
4853 }
4854
4855 /* Clip read range to remain in this extent. */
4856 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
4857
4858 /* Handle the read according to the current extent type. */
4859 switch (pExtent->enmType)
4860 {
4861 case VMDKETYPE_HOSTED_SPARSE:
4862#ifdef VBOX_WITH_VMDK_ESX
4863 case VMDKETYPE_ESX_SPARSE:
4864#endif /* VBOX_WITH_VMDK_ESX */
4865 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
4866 &uSectorExtentAbs);
4867 if (RT_FAILURE(rc))
4868 goto out;
4869 /* Clip read range to at most the rest of the grain. */
4870 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
4871 Assert(!(cbToRead % 512));
4872 if (uSectorExtentAbs == 0)
4873 rc = VERR_VD_BLOCK_FREE;
4874 else
4875 {
4876 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4877 {
4878 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
4879 uSectorExtentAbs -= uSectorInGrain;
4880 uint64_t uLBA;
4881 if (pExtent->uGrainSector != uSectorExtentAbs)
4882 {
4883 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
4884 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
4885 if (RT_FAILURE(rc))
4886 {
4887 pExtent->uGrainSector = 0;
4888 AssertRC(rc);
4889 goto out;
4890 }
4891 pExtent->uGrainSector = uSectorExtentAbs;
4892 Assert(uLBA == uSectorExtentRel);
4893 }
4894 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
4895 }
4896 else
4897 {
4898 rc = vmdkFileReadAt(pExtent->pFile,
4899 VMDK_SECTOR2BYTE(uSectorExtentAbs),
4900 pvBuf, cbToRead, NULL);
4901 }
4902 }
4903 break;
4904 case VMDKETYPE_FLAT:
4905 rc = vmdkFileReadAt(pExtent->pFile,
4906 VMDK_SECTOR2BYTE(uSectorExtentRel),
4907 pvBuf, cbToRead, NULL);
4908 break;
4909 case VMDKETYPE_ZERO:
4910 memset(pvBuf, '\0', cbToRead);
4911 break;
4912 }
4913 if (pcbActuallyRead)
4914 *pcbActuallyRead = cbToRead;
4915
4916out:
4917 LogFlowFunc(("returns %Rrc\n", rc));
4918 return rc;
4919}
4920
4921/** @copydoc VBOXHDDBACKEND::pfnWrite */
4922static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
4923 size_t cbToWrite, size_t *pcbWriteProcess,
4924 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
4925{
4926 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
4927 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4928 PVMDKEXTENT pExtent;
4929 uint64_t uSectorExtentRel;
4930 uint64_t uSectorExtentAbs;
4931 int rc;
4932
4933 AssertPtr(pImage);
4934 Assert(uOffset % 512 == 0);
4935 Assert(cbToWrite % 512 == 0);
4936
4937 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4938 {
4939 rc = VERR_VD_IMAGE_READ_ONLY;
4940 goto out;
4941 }
4942
4943 if (cbToWrite == 0)
4944 {
4945 rc = VERR_INVALID_PARAMETER;
4946 goto out;
4947 }
4948
4949 /* No size check here, will do that later when the extent is located.
4950 * There are sparse images out there which according to the spec are
4951 * invalid, because the total size is not a multiple of the grain size.
4952 * Also for sparse images which are stitched together in odd ways (not at
4953 * grain boundaries, and with the nominal size not being a multiple of the
4954 * grain size), this would prevent writing to the last grain. */
4955
4956 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
4957 &pExtent, &uSectorExtentRel);
4958 if (RT_FAILURE(rc))
4959 goto out;
4960
4961 /* Check access permissions as defined in the extent descriptor. */
4962 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
4963 {
4964 rc = VERR_VD_VMDK_INVALID_STATE;
4965 goto out;
4966 }
4967
4968 /* Handle the write according to the current extent type. */
4969 switch (pExtent->enmType)
4970 {
4971 case VMDKETYPE_HOSTED_SPARSE:
4972#ifdef VBOX_WITH_VMDK_ESX
4973 case VMDKETYPE_ESX_SPARSE:
4974#endif /* VBOX_WITH_VMDK_ESX */
4975 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
4976 &uSectorExtentAbs);
4977 if (RT_FAILURE(rc))
4978 goto out;
4979 /* Clip write range to at most the rest of the grain. */
4980 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
4981 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4982 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
4983 {
4984 rc = VERR_VD_VMDK_INVALID_WRITE;
4985 goto out;
4986 }
4987 if (uSectorExtentAbs == 0)
4988 {
4989 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4990 {
4991 /* Full block write to a previously unallocated block.
4992 * Check if the caller wants to avoid the automatic alloc. */
4993 if (!(fWrite & VD_WRITE_NO_ALLOC))
4994 {
4995 /* Allocate GT and find out where to store the grain. */
4996 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
4997 uSectorExtentRel, pvBuf, cbToWrite);
4998 }
4999 else
5000 rc = VERR_VD_BLOCK_FREE;
5001 *pcbPreRead = 0;
5002 *pcbPostRead = 0;
5003 }
5004 else
5005 {
5006 /* Clip write range to remain in this extent. */
5007 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5008 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5009 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5010 rc = VERR_VD_BLOCK_FREE;
5011 }
5012 }
5013 else
5014 {
5015 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5016 {
5017 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5018 uSectorExtentAbs -= uSectorInGrain;
5019 uint64_t uLBA;
5020 if ( pExtent->uGrainSector != uSectorExtentAbs
5021 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5022 {
5023 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5024 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5025 if (RT_FAILURE(rc))
5026 {
5027 pExtent->uGrainSector = 0;
5028 pExtent->uLastGrainSector = 0;
5029 AssertRC(rc);
5030 goto out;
5031 }
5032 pExtent->uGrainSector = uSectorExtentAbs;
5033 pExtent->uLastGrainSector = uSectorExtentAbs;
5034 Assert(uLBA == uSectorExtentRel);
5035 }
5036 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5037 uint32_t cbGrain = 0;
5038 rc = vmdkFileDeflateAt(pExtent->pFile,
5039 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5040 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5041 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5042 if (RT_FAILURE(rc))
5043 {
5044 pExtent->uGrainSector = 0;
5045 pExtent->uLastGrainSector = 0;
5046 AssertRC(rc);
5047 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5048 }
5049 cbGrain = RT_ALIGN(cbGrain, 512);
5050 pExtent->uLastGrainSector = uSectorExtentAbs;
5051 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5052 pExtent->cbLastGrainWritten = cbGrain;
5053
5054 uint64_t uEOSOff = 0;
5055 if (pExtent->fFooter)
5056 {
5057 uEOSOff = 512;
5058 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5059 if (RT_FAILURE(rc))
5060 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5061 }
5062 uint8_t aEOS[512];
5063 memset(aEOS, '\0', sizeof(aEOS));
5064 rc = vmdkFileWriteAt(pExtent->pFile,
5065 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5066 aEOS, sizeof(aEOS), NULL);
5067 if (RT_FAILURE(rc))
5068 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5069 }
5070 else
5071 {
5072 rc = vmdkFileWriteAt(pExtent->pFile,
5073 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5074 pvBuf, cbToWrite, NULL);
5075 }
5076 }
5077 break;
5078 case VMDKETYPE_FLAT:
5079 /* Clip write range to remain in this extent. */
5080 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5081 rc = vmdkFileWriteAt(pExtent->pFile,
5082 VMDK_SECTOR2BYTE(uSectorExtentRel),
5083 pvBuf, cbToWrite, NULL);
5084 break;
5085 case VMDKETYPE_ZERO:
5086 /* Clip write range to remain in this extent. */
5087 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5088 break;
5089 }
5090 if (pcbWriteProcess)
5091 *pcbWriteProcess = cbToWrite;
5092
5093out:
5094 LogFlowFunc(("returns %Rrc\n", rc));
5095 return rc;
5096}
5097
5098/** @copydoc VBOXHDDBACKEND::pfnFlush */
5099static int vmdkFlush(void *pBackendData)
5100{
5101 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5102 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5103 int rc;
5104
5105 AssertPtr(pImage);
5106
5107 rc = vmdkFlushImage(pImage);
5108 LogFlowFunc(("returns %Rrc\n", rc));
5109 return rc;
5110}
5111
5112/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5113static unsigned vmdkGetVersion(void *pBackendData)
5114{
5115 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5116 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5117
5118 AssertPtr(pImage);
5119
5120 if (pImage)
5121 return VMDK_IMAGE_VERSION;
5122 else
5123 return 0;
5124}
5125
5126/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5127static uint64_t vmdkGetSize(void *pBackendData)
5128{
5129 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5130 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5131
5132 AssertPtr(pImage);
5133
5134 if (pImage)
5135 return pImage->cbSize;
5136 else
5137 return 0;
5138}
5139
5140/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5141static uint64_t vmdkGetFileSize(void *pBackendData)
5142{
5143 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5144 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5145 uint64_t cb = 0;
5146
5147 AssertPtr(pImage);
5148
5149 if (pImage)
5150 {
5151 uint64_t cbFile;
5152 if (pImage->pFile != NULL)
5153 {
5154 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5155 if (RT_SUCCESS(rc))
5156 cb += cbFile;
5157 }
5158 for (unsigned i = 0; i < pImage->cExtents; i++)
5159 {
5160 if (pImage->pExtents[i].pFile != NULL)
5161 {
5162 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5163 if (RT_SUCCESS(rc))
5164 cb += cbFile;
5165 }
5166 }
5167 }
5168
5169 LogFlowFunc(("returns %lld\n", cb));
5170 return cb;
5171}
5172
5173/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5174static int vmdkGetPCHSGeometry(void *pBackendData,
5175 PPDMMEDIAGEOMETRY pPCHSGeometry)
5176{
5177 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5178 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5179 int rc;
5180
5181 AssertPtr(pImage);
5182
5183 if (pImage)
5184 {
5185 if (pImage->PCHSGeometry.cCylinders)
5186 {
5187 *pPCHSGeometry = pImage->PCHSGeometry;
5188 rc = VINF_SUCCESS;
5189 }
5190 else
5191 rc = VERR_VD_GEOMETRY_NOT_SET;
5192 }
5193 else
5194 rc = VERR_VD_NOT_OPENED;
5195
5196 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5197 return rc;
5198}
5199
5200/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5201static int vmdkSetPCHSGeometry(void *pBackendData,
5202 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5203{
5204 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5205 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5206 int rc;
5207
5208 AssertPtr(pImage);
5209
5210 if (pImage)
5211 {
5212 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5213 {
5214 rc = VERR_VD_IMAGE_READ_ONLY;
5215 goto out;
5216 }
5217 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5218 if (RT_FAILURE(rc))
5219 goto out;
5220
5221 pImage->PCHSGeometry = *pPCHSGeometry;
5222 rc = VINF_SUCCESS;
5223 }
5224 else
5225 rc = VERR_VD_NOT_OPENED;
5226
5227out:
5228 LogFlowFunc(("returns %Rrc\n", rc));
5229 return rc;
5230}
5231
5232/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
5233static int vmdkGetLCHSGeometry(void *pBackendData,
5234 PPDMMEDIAGEOMETRY pLCHSGeometry)
5235{
5236 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5237 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5238 int rc;
5239
5240 AssertPtr(pImage);
5241
5242 if (pImage)
5243 {
5244 if (pImage->LCHSGeometry.cCylinders)
5245 {
5246 *pLCHSGeometry = pImage->LCHSGeometry;
5247 rc = VINF_SUCCESS;
5248 }
5249 else
5250 rc = VERR_VD_GEOMETRY_NOT_SET;
5251 }
5252 else
5253 rc = VERR_VD_NOT_OPENED;
5254
5255 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5256 return rc;
5257}
5258
5259/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5260static int vmdkSetLCHSGeometry(void *pBackendData,
5261 PCPDMMEDIAGEOMETRY pLCHSGeometry)
5262{
5263 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5264 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5265 int rc;
5266
5267 AssertPtr(pImage);
5268
5269 if (pImage)
5270 {
5271 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5272 {
5273 rc = VERR_VD_IMAGE_READ_ONLY;
5274 goto out;
5275 }
5276 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5277 if (RT_FAILURE(rc))
5278 goto out;
5279
5280 pImage->LCHSGeometry = *pLCHSGeometry;
5281 rc = VINF_SUCCESS;
5282 }
5283 else
5284 rc = VERR_VD_NOT_OPENED;
5285
5286out:
5287 LogFlowFunc(("returns %Rrc\n", rc));
5288 return rc;
5289}
5290
5291/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
5292static unsigned vmdkGetImageFlags(void *pBackendData)
5293{
5294 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5295 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5296 unsigned uImageFlags;
5297
5298 AssertPtr(pImage);
5299
5300 if (pImage)
5301 uImageFlags = pImage->uImageFlags;
5302 else
5303 uImageFlags = 0;
5304
5305 LogFlowFunc(("returns %#x\n", uImageFlags));
5306 return uImageFlags;
5307}
5308
5309/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
5310static unsigned vmdkGetOpenFlags(void *pBackendData)
5311{
5312 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5313 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5314 unsigned uOpenFlags;
5315
5316 AssertPtr(pImage);
5317
5318 if (pImage)
5319 uOpenFlags = pImage->uOpenFlags;
5320 else
5321 uOpenFlags = 0;
5322
5323 LogFlowFunc(("returns %#x\n", uOpenFlags));
5324 return uOpenFlags;
5325}
5326
5327/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
5328static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
5329{
5330 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
5331 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5332 int rc;
5333
5334 /* Image must be opened and the new flags must be valid. Just readonly and
5335 * info flags are supported. */
5336 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
5337 {
5338 rc = VERR_INVALID_PARAMETER;
5339 goto out;
5340 }
5341
5342 /* Implement this operation via reopening the image. */
5343 vmdkFreeImage(pImage, false);
5344 rc = vmdkOpenImage(pImage, uOpenFlags);
5345
5346out:
5347 LogFlowFunc(("returns %Rrc\n", rc));
5348 return rc;
5349}
5350
5351/** @copydoc VBOXHDDBACKEND::pfnGetComment */
5352static int vmdkGetComment(void *pBackendData, char *pszComment,
5353 size_t cbComment)
5354{
5355 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
5356 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5357 int rc;
5358
5359 AssertPtr(pImage);
5360
5361 if (pImage)
5362 {
5363 const char *pszCommentEncoded = NULL;
5364 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
5365 "ddb.comment", &pszCommentEncoded);
5366 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
5367 pszCommentEncoded = NULL;
5368 else if (RT_FAILURE(rc))
5369 goto out;
5370
5371 if (pszComment && pszCommentEncoded)
5372 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
5373 else
5374 {
5375 if (pszComment)
5376 *pszComment = '\0';
5377 rc = VINF_SUCCESS;
5378 }
5379 if (pszCommentEncoded)
5380 RTStrFree((char *)(void *)pszCommentEncoded);
5381 }
5382 else
5383 rc = VERR_VD_NOT_OPENED;
5384
5385out:
5386 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
5387 return rc;
5388}
5389
5390/** @copydoc VBOXHDDBACKEND::pfnSetComment */
5391static int vmdkSetComment(void *pBackendData, const char *pszComment)
5392{
5393 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
5394 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5395 int rc;
5396
5397 AssertPtr(pImage);
5398
5399 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5400 {
5401 rc = VERR_VD_IMAGE_READ_ONLY;
5402 goto out;
5403 }
5404
5405 if (pImage)
5406 rc = vmdkSetImageComment(pImage, pszComment);
5407 else
5408 rc = VERR_VD_NOT_OPENED;
5409
5410out:
5411 LogFlowFunc(("returns %Rrc\n", rc));
5412 return rc;
5413}
5414
5415/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
5416static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
5417{
5418 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5419 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5420 int rc;
5421
5422 AssertPtr(pImage);
5423
5424 if (pImage)
5425 {
5426 *pUuid = pImage->ImageUuid;
5427 rc = VINF_SUCCESS;
5428 }
5429 else
5430 rc = VERR_VD_NOT_OPENED;
5431
5432 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5433 return rc;
5434}
5435
5436/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
5437static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
5438{
5439 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5440 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5441 int rc;
5442
5443 LogFlowFunc(("%RTuuid\n", pUuid));
5444 AssertPtr(pImage);
5445
5446 if (pImage)
5447 {
5448 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5449 {
5450 pImage->ImageUuid = *pUuid;
5451 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5452 VMDK_DDB_IMAGE_UUID, pUuid);
5453 if (RT_FAILURE(rc))
5454 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
5455 rc = VINF_SUCCESS;
5456 }
5457 else
5458 rc = VERR_VD_IMAGE_READ_ONLY;
5459 }
5460 else
5461 rc = VERR_VD_NOT_OPENED;
5462
5463 LogFlowFunc(("returns %Rrc\n", rc));
5464 return rc;
5465}
5466
5467/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
5468static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
5469{
5470 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5471 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5472 int rc;
5473
5474 AssertPtr(pImage);
5475
5476 if (pImage)
5477 {
5478 *pUuid = pImage->ModificationUuid;
5479 rc = VINF_SUCCESS;
5480 }
5481 else
5482 rc = VERR_VD_NOT_OPENED;
5483
5484 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5485 return rc;
5486}
5487
5488/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
5489static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
5490{
5491 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5492 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5493 int rc;
5494
5495 AssertPtr(pImage);
5496
5497 if (pImage)
5498 {
5499 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5500 {
5501 pImage->ModificationUuid = *pUuid;
5502 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5503 VMDK_DDB_MODIFICATION_UUID, pUuid);
5504 if (RT_FAILURE(rc))
5505 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
5506 rc = VINF_SUCCESS;
5507 }
5508 else
5509 rc = VERR_VD_IMAGE_READ_ONLY;
5510 }
5511 else
5512 rc = VERR_VD_NOT_OPENED;
5513
5514 LogFlowFunc(("returns %Rrc\n", rc));
5515 return rc;
5516}
5517
5518/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
5519static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
5520{
5521 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5522 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5523 int rc;
5524
5525 AssertPtr(pImage);
5526
5527 if (pImage)
5528 {
5529 *pUuid = pImage->ParentUuid;
5530 rc = VINF_SUCCESS;
5531 }
5532 else
5533 rc = VERR_VD_NOT_OPENED;
5534
5535 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5536 return rc;
5537}
5538
5539/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
5540static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
5541{
5542 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5543 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5544 int rc;
5545
5546 AssertPtr(pImage);
5547
5548 if (pImage)
5549 {
5550 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5551 {
5552 pImage->ParentUuid = *pUuid;
5553 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5554 VMDK_DDB_PARENT_UUID, pUuid);
5555 if (RT_FAILURE(rc))
5556 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5557 rc = VINF_SUCCESS;
5558 }
5559 else
5560 rc = VERR_VD_IMAGE_READ_ONLY;
5561 }
5562 else
5563 rc = VERR_VD_NOT_OPENED;
5564
5565 LogFlowFunc(("returns %Rrc\n", rc));
5566 return rc;
5567}
5568
5569/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
5570static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
5571{
5572 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5573 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5574 int rc;
5575
5576 AssertPtr(pImage);
5577
5578 if (pImage)
5579 {
5580 *pUuid = pImage->ParentModificationUuid;
5581 rc = VINF_SUCCESS;
5582 }
5583 else
5584 rc = VERR_VD_NOT_OPENED;
5585
5586 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5587 return rc;
5588}
5589
5590/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
5591static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
5592{
5593 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5594 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5595 int rc;
5596
5597 AssertPtr(pImage);
5598
5599 if (pImage)
5600 {
5601 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5602 {
5603 pImage->ParentModificationUuid = *pUuid;
5604 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5605 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
5606 if (RT_FAILURE(rc))
5607 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5608 rc = VINF_SUCCESS;
5609 }
5610 else
5611 rc = VERR_VD_IMAGE_READ_ONLY;
5612 }
5613 else
5614 rc = VERR_VD_NOT_OPENED;
5615
5616 LogFlowFunc(("returns %Rrc\n", rc));
5617 return rc;
5618}
5619
5620/** @copydoc VBOXHDDBACKEND::pfnDump */
5621static void vmdkDump(void *pBackendData)
5622{
5623 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5624
5625 AssertPtr(pImage);
5626 if (pImage)
5627 {
5628 RTLogPrintf("Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
5629 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
5630 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
5631 VMDK_BYTE2SECTOR(pImage->cbSize));
5632 RTLogPrintf("Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
5633 RTLogPrintf("Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
5634 RTLogPrintf("Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
5635 RTLogPrintf("Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
5636 }
5637}
5638
5639
5640static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5641{
5642 int rc = VERR_NOT_IMPLEMENTED;
5643 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5644 return rc;
5645}
5646
5647static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5648{
5649 int rc = VERR_NOT_IMPLEMENTED;
5650 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5651 return rc;
5652}
5653
5654static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
5655{
5656 int rc = VERR_NOT_IMPLEMENTED;
5657 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5658 return rc;
5659}
5660
5661static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
5662{
5663 int rc = VERR_NOT_IMPLEMENTED;
5664 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5665 return rc;
5666}
5667
5668static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
5669{
5670 int rc = VERR_NOT_IMPLEMENTED;
5671 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5672 return rc;
5673}
5674
5675static bool vmdkIsAsyncIOSupported(void *pvBackendData)
5676{
5677 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5678 bool fAsyncIOSupported = false;
5679
5680 if (pImage)
5681 {
5682 /* We only support async I/O support if the image only consists of FLAT or ZERO extents. */
5683 fAsyncIOSupported = true;
5684 for (unsigned i = 0; i < pImage->cExtents; i++)
5685 {
5686 if ( (pImage->pExtents[i].enmType != VMDKETYPE_FLAT)
5687 && (pImage->pExtents[i].enmType != VMDKETYPE_ZERO))
5688 {
5689 fAsyncIOSupported = false;
5690 break; /* Stop search */
5691 }
5692 }
5693 }
5694
5695 return fAsyncIOSupported;
5696}
5697
5698static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
5699 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5700{
5701 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5702 PVMDKEXTENT pExtent;
5703 int rc = VINF_SUCCESS;
5704 unsigned cTasksToSubmit = 0;
5705 PPDMDATASEG paSegCurrent = paSeg;
5706 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5707 size_t uOffsetInCurrentSegment = 0;
5708
5709 AssertPtr(pImage);
5710 Assert(uOffset % 512 == 0);
5711 Assert(cbRead % 512 == 0);
5712
5713 if ( uOffset + cbRead > pImage->cbSize
5714 || cbRead == 0)
5715 {
5716 rc = VERR_INVALID_PARAMETER;
5717 goto out;
5718 }
5719
5720 while (cbRead && cSeg)
5721 {
5722 size_t cbToRead;
5723 uint64_t uSectorExtentRel;
5724
5725 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5726 &pExtent, &uSectorExtentRel);
5727 if (RT_FAILURE(rc))
5728 goto out;
5729
5730 /* Check access permissions as defined in the extent descriptor. */
5731 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5732 {
5733 rc = VERR_VD_VMDK_INVALID_STATE;
5734 goto out;
5735 }
5736
5737 /* Clip read range to remain in this extent. */
5738 cbToRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5739 /* Clip read range to remain into current data segment. */
5740 cbToRead = RT_MIN(cbToRead, cbLeftInCurrentSegment);
5741
5742 switch (pExtent->enmType)
5743 {
5744 case VMDKETYPE_FLAT:
5745 {
5746 /* Setup new task. */
5747 void *pTask;
5748 rc = pImage->pInterfaceAsyncIOCallbacks->pfnPrepareRead(pImage->pInterfaceAsyncIO->pvUser, pExtent->pFile->pStorage,
5749 VMDK_SECTOR2BYTE(uSectorExtentRel),
5750 (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment,
5751 cbToRead, &pTask);
5752 if (RT_FAILURE(rc))
5753 {
5754 AssertMsgFailed(("Preparing read failed rc=%Rrc\n", rc));
5755 goto out;
5756 }
5757
5758 /* Check for enough room first. */
5759 if (cTasksToSubmit >= pImage->cTask)
5760 {
5761 /* We reached maximum, resize array. Try to realloc memory first. */
5762 void **apTaskNew = (void **)RTMemRealloc(pImage->apTask, (cTasksToSubmit + 10)*sizeof(void *));
5763
5764 if (!apTaskNew)
5765 {
5766 /* We failed. Allocate completely new. */
5767 apTaskNew = (void **)RTMemAllocZ((cTasksToSubmit + 10)* sizeof(void *));
5768 if (!apTaskNew)
5769 {
5770 /* Damn, we are out of memory. */
5771 rc = VERR_NO_MEMORY;
5772 goto out;
5773 }
5774
5775 /* Copy task handles over. */
5776 for (unsigned i = 0; i < cTasksToSubmit; i++)
5777 apTaskNew[i] = pImage->apTask[i];
5778
5779 /* Free old memory. */
5780 RTMemFree(pImage->apTask);
5781 }
5782
5783 pImage->cTask = cTasksToSubmit + 10;
5784 pImage->apTask = apTaskNew;
5785 }
5786
5787 pImage->apTask[cTasksToSubmit] = pTask;
5788 cTasksToSubmit++;
5789 break;
5790 }
5791 case VMDKETYPE_ZERO:
5792 memset((uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment, 0, cbToRead);
5793 break;
5794 default:
5795 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
5796 }
5797
5798 cbRead -= cbToRead;
5799 uOffset += cbToRead;
5800 cbLeftInCurrentSegment -= cbToRead;
5801 uOffsetInCurrentSegment += cbToRead;
5802 /* Go to next extent if there is no space left in current one. */
5803 if (!cbLeftInCurrentSegment)
5804 {
5805 uOffsetInCurrentSegment = 0;
5806 paSegCurrent++;
5807 cSeg--;
5808 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5809 }
5810 }
5811
5812 AssertMsg(cbRead == 0, ("No segment left but there is still data to read\n"));
5813
5814 if (cTasksToSubmit == 0)
5815 {
5816 /* The request was completely in a ZERO extent nothing to do. */
5817 rc = VINF_VD_ASYNC_IO_FINISHED;
5818 }
5819 else
5820 {
5821 /* Submit tasks. */
5822 rc = pImage->pInterfaceAsyncIOCallbacks->pfnTasksSubmit(pImage->pInterfaceAsyncIO->pvUser,
5823 pImage->apTask, cTasksToSubmit,
5824 NULL, pvUser,
5825 NULL /* Nothing required after read. */);
5826 AssertMsgRC(rc, ("Failed to enqueue tasks rc=%Rrc\n", rc));
5827 }
5828
5829out:
5830 LogFlowFunc(("returns %Rrc\n", rc));
5831 return rc;
5832}
5833
5834static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
5835 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5836{
5837 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5838 PVMDKEXTENT pExtent;
5839 int rc = VINF_SUCCESS;
5840 unsigned cTasksToSubmit = 0;
5841 PPDMDATASEG paSegCurrent = paSeg;
5842 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5843 size_t uOffsetInCurrentSegment = 0;
5844
5845 AssertPtr(pImage);
5846 Assert(uOffset % 512 == 0);
5847 Assert(cbWrite % 512 == 0);
5848
5849 if ( uOffset + cbWrite > pImage->cbSize
5850 || cbWrite == 0)
5851 {
5852 rc = VERR_INVALID_PARAMETER;
5853 goto out;
5854 }
5855
5856 while (cbWrite && cSeg)
5857 {
5858 size_t cbToWrite;
5859 uint64_t uSectorExtentRel;
5860
5861 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5862 &pExtent, &uSectorExtentRel);
5863 if (RT_FAILURE(rc))
5864 goto out;
5865
5866 /* Check access permissions as defined in the extent descriptor. */
5867 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5868 {
5869 rc = VERR_VD_VMDK_INVALID_STATE;
5870 goto out;
5871 }
5872
5873 /* Clip write range to remain in this extent. */
5874 cbToWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5875 /* Clip write range to remain into current data segment. */
5876 cbToWrite = RT_MIN(cbToWrite, cbLeftInCurrentSegment);
5877
5878 switch (pExtent->enmType)
5879 {
5880 case VMDKETYPE_FLAT:
5881 {
5882 /* Setup new task. */
5883 void *pTask;
5884 rc = pImage->pInterfaceAsyncIOCallbacks->pfnPrepareWrite(pImage->pInterfaceAsyncIO->pvUser, pExtent->pFile->pStorage,
5885 VMDK_SECTOR2BYTE(uSectorExtentRel),
5886 (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment,
5887 cbToWrite, &pTask);
5888 if (RT_FAILURE(rc))
5889 {
5890 AssertMsgFailed(("Preparing read failed rc=%Rrc\n", rc));
5891 goto out;
5892 }
5893
5894 /* Check for enough room first. */
5895 if (cTasksToSubmit >= pImage->cTask)
5896 {
5897 /* We reached maximum, resize array. Try to realloc memory first. */
5898 void **apTaskNew = (void **)RTMemRealloc(pImage->apTask, (cTasksToSubmit + 10)*sizeof(void *));
5899
5900 if (!apTaskNew)
5901 {
5902 /* We failed. Allocate completely new. */
5903 apTaskNew = (void **)RTMemAllocZ((cTasksToSubmit + 10)* sizeof(void *));
5904 if (!apTaskNew)
5905 {
5906 /* Damn, we are out of memory. */
5907 rc = VERR_NO_MEMORY;
5908 goto out;
5909 }
5910
5911 /* Copy task handles over. */
5912 for (unsigned i = 0; i < cTasksToSubmit; i++)
5913 apTaskNew[i] = pImage->apTask[i];
5914
5915 /* Free old memory. */
5916 RTMemFree(pImage->apTask);
5917 }
5918
5919 pImage->cTask = cTasksToSubmit + 10;
5920 pImage->apTask = apTaskNew;
5921 }
5922
5923 pImage->apTask[cTasksToSubmit] = pTask;
5924 cTasksToSubmit++;
5925 break;
5926 }
5927 case VMDKETYPE_ZERO:
5928 /* Nothing left to do. */
5929 break;
5930 default:
5931 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
5932 }
5933
5934 cbWrite -= cbToWrite;
5935 uOffset += cbToWrite;
5936 cbLeftInCurrentSegment -= cbToWrite;
5937 uOffsetInCurrentSegment += cbToWrite;
5938 /* Go to next extent if there is no space left in current one. */
5939 if (!cbLeftInCurrentSegment)
5940 {
5941 uOffsetInCurrentSegment = 0;
5942 paSegCurrent++;
5943 cSeg--;
5944 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5945 }
5946 }
5947
5948 AssertMsg(cbWrite == 0, ("No segment left but there is still data to read\n"));
5949
5950 if (cTasksToSubmit == 0)
5951 {
5952 /* The request was completely in a ZERO extent nothing to do. */
5953 rc = VINF_VD_ASYNC_IO_FINISHED;
5954 }
5955 else
5956 {
5957 /* Submit tasks. */
5958 rc = pImage->pInterfaceAsyncIOCallbacks->pfnTasksSubmit(pImage->pInterfaceAsyncIO->pvUser,
5959 pImage->apTask, cTasksToSubmit,
5960 NULL, pvUser,
5961 NULL /* Nothing required after read. */);
5962 AssertMsgRC(rc, ("Failed to enqueue tasks rc=%Rrc\n", rc));
5963 }
5964
5965out:
5966 LogFlowFunc(("returns %Rrc\n", rc));
5967 return rc;
5968
5969}
5970
5971
5972VBOXHDDBACKEND g_VmdkBackend =
5973{
5974 /* pszBackendName */
5975 "VMDK",
5976 /* cbSize */
5977 sizeof(VBOXHDDBACKEND),
5978 /* uBackendCaps */
5979 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
5980 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
5981 /* papszFileExtensions */
5982 s_apszVmdkFileExtensions,
5983 /* paConfigInfo */
5984 NULL,
5985 /* hPlugin */
5986 NIL_RTLDRMOD,
5987 /* pfnCheckIfValid */
5988 vmdkCheckIfValid,
5989 /* pfnOpen */
5990 vmdkOpen,
5991 /* pfnCreate */
5992 vmdkCreate,
5993 /* pfnRename */
5994 vmdkRename,
5995 /* pfnClose */
5996 vmdkClose,
5997 /* pfnRead */
5998 vmdkRead,
5999 /* pfnWrite */
6000 vmdkWrite,
6001 /* pfnFlush */
6002 vmdkFlush,
6003 /* pfnGetVersion */
6004 vmdkGetVersion,
6005 /* pfnGetSize */
6006 vmdkGetSize,
6007 /* pfnGetFileSize */
6008 vmdkGetFileSize,
6009 /* pfnGetPCHSGeometry */
6010 vmdkGetPCHSGeometry,
6011 /* pfnSetPCHSGeometry */
6012 vmdkSetPCHSGeometry,
6013 /* pfnGetLCHSGeometry */
6014 vmdkGetLCHSGeometry,
6015 /* pfnSetLCHSGeometry */
6016 vmdkSetLCHSGeometry,
6017 /* pfnGetImageFlags */
6018 vmdkGetImageFlags,
6019 /* pfnGetOpenFlags */
6020 vmdkGetOpenFlags,
6021 /* pfnSetOpenFlags */
6022 vmdkSetOpenFlags,
6023 /* pfnGetComment */
6024 vmdkGetComment,
6025 /* pfnSetComment */
6026 vmdkSetComment,
6027 /* pfnGetUuid */
6028 vmdkGetUuid,
6029 /* pfnSetUuid */
6030 vmdkSetUuid,
6031 /* pfnGetModificationUuid */
6032 vmdkGetModificationUuid,
6033 /* pfnSetModificationUuid */
6034 vmdkSetModificationUuid,
6035 /* pfnGetParentUuid */
6036 vmdkGetParentUuid,
6037 /* pfnSetParentUuid */
6038 vmdkSetParentUuid,
6039 /* pfnGetParentModificationUuid */
6040 vmdkGetParentModificationUuid,
6041 /* pfnSetParentModificationUuid */
6042 vmdkSetParentModificationUuid,
6043 /* pfnDump */
6044 vmdkDump,
6045 /* pfnGetTimeStamp */
6046 vmdkGetTimeStamp,
6047 /* pfnGetParentTimeStamp */
6048 vmdkGetParentTimeStamp,
6049 /* pfnSetParentTimeStamp */
6050 vmdkSetParentTimeStamp,
6051 /* pfnGetParentFilename */
6052 vmdkGetParentFilename,
6053 /* pfnSetParentFilename */
6054 vmdkSetParentFilename,
6055 /* pfnIsAsyncIOSupported */
6056 vmdkIsAsyncIOSupported,
6057 /* pfnAsyncRead */
6058 vmdkAsyncRead,
6059 /* pfnAsyncWrite */
6060 vmdkAsyncWrite,
6061 /* pfnComposeLocation */
6062 genericFileComposeLocation,
6063 /* pfnComposeName */
6064 genericFileComposeName
6065};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette