Changeset 97255 in vbox for trunk/src/VBox/Storage
- Timestamp:
- Oct 20, 2022 2:56:36 PM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 154235
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Storage/VMDK.cpp
r96842 r97255 3 3 * VMDK disk image, core code. 4 4 */ 5 5 6 /* 6 7 * Copyright (C) 2006-2022 Oracle and/or its affiliates. … … 33 34 #include <VBox/vd-plugin.h> 34 35 #include <VBox/err.h> 36 35 37 #include <iprt/assert.h> 36 38 #include <iprt/alloc.h> … … 92 94 # define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83) 93 95 #endif /* RT_OS_DARWIN */ 96 94 97 #include "VDBackends.h" 95 98 … … 98 101 * Constants And Macros, Structures and Typedefs * 99 102 *********************************************************************************************************************************/ 103 100 104 /** Maximum encoded string size (including NUL) we allow for VMDK images. 101 105 * Deliberately not set high to avoid running out of descriptor space. */ 102 106 #define VMDK_ENCODED_COMMENT_MAX 1024 107 103 108 /** VMDK descriptor DDB entry for PCHS cylinders. */ 104 109 #define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders" 110 105 111 /** VMDK descriptor DDB entry for PCHS heads. */ 106 112 #define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads" 113 107 114 /** VMDK descriptor DDB entry for PCHS sectors. */ 108 115 #define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors" 116 109 117 /** VMDK descriptor DDB entry for LCHS cylinders. */ 110 118 #define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders" 119 111 120 /** VMDK descriptor DDB entry for LCHS heads. */ 112 121 #define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads" 122 113 123 /** VMDK descriptor DDB entry for LCHS sectors. */ 114 124 #define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors" 125 115 126 /** VMDK descriptor DDB entry for image UUID. */ 116 127 #define VMDK_DDB_IMAGE_UUID "ddb.uuid.image" 128 117 129 /** VMDK descriptor DDB entry for image modification UUID. */ 118 130 #define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification" 131 119 132 /** VMDK descriptor DDB entry for parent image UUID. */ 120 133 #define VMDK_DDB_PARENT_UUID "ddb.uuid.parent" 134 121 135 /** VMDK descriptor DDB entry for parent image modification UUID. */ 122 136 #define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification" 137 123 138 /** No compression for streamOptimized files. */ 124 139 #define VMDK_COMPRESSION_NONE 0 140 125 141 /** Deflate compression for streamOptimized files. */ 126 142 #define VMDK_COMPRESSION_DEFLATE 1 143 127 144 /** Marker that the actual GD value is stored in the footer. */ 128 145 #define VMDK_GD_AT_END 0xffffffffffffffffULL 146 129 147 /** Marker for end-of-stream in streamOptimized images. */ 130 148 #define VMDK_MARKER_EOS 0 149 131 150 /** Marker for grain table block in streamOptimized images. */ 132 151 #define VMDK_MARKER_GT 1 152 133 153 /** Marker for grain directory block in streamOptimized images. */ 134 154 #define VMDK_MARKER_GD 2 155 135 156 /** Marker for footer in streamOptimized images. */ 136 157 #define VMDK_MARKER_FOOTER 3 158 137 159 /** Marker for unknown purpose in streamOptimized images. 138 160 * Shows up in very recent images created by vSphere, but only sporadically. 139 161 * They "forgot" to document that one in the VMDK specification. */ 140 162 #define VMDK_MARKER_UNSPECIFIED 4 163 141 164 /** Dummy marker for "don't check the marker value". */ 142 165 #define VMDK_MARKER_IGNORE 0xffffffffU 166 143 167 /** 144 168 * Magic number for hosted images created by VMware Workstation 4, VMware … … 146 170 */ 147 171 #define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */ 172 148 173 /** VMDK sector size in bytes. */ 149 174 #define VMDK_SECTOR_SIZE 512 … … 154 179 /** Grain table size in bytes */ 155 180 #define VMDK_GRAIN_TABLE_SIZE 2048 181 156 182 /** 157 183 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as … … 181 207 } SparseExtentHeader; 182 208 #pragma pack() 209 183 210 /** The maximum allowed descriptor size in the extent header in sectors. */ 184 211 #define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */ 212 185 213 /** VMDK capacity for a single chunk when 2G splitting is turned on. Should be 186 214 * divisible by the default grain size (64K) */ 187 215 #define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024) 216 188 217 /** VMDK streamOptimized file format marker. The type field may or may not 189 218 * be actually valid, but there's always data to read there. */ … … 196 225 } VMDKMARKER, *PVMDKMARKER; 197 226 #pragma pack() 227 228 198 229 /** Convert sector number/size to byte offset/size. */ 199 230 #define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9) 231 200 232 /** Convert byte offset/size to sector number/size. */ 201 233 #define VMDK_BYTE2SECTOR(u) ((u) >> 9) 234 202 235 /** 203 236 * VMDK extent type. … … 214 247 VMDKETYPE_VMFS 215 248 } VMDKETYPE, *PVMDKETYPE; 249 216 250 /** 217 251 * VMDK access type for a extent. … … 226 260 VMDKACCESS_READWRITE 227 261 } VMDKACCESS, *PVMDKACCESS; 262 228 263 /** Forward declaration for PVMDKIMAGE. */ 229 264 typedef struct VMDKIMAGE *PVMDKIMAGE; 265 230 266 /** 231 267 * Extents files entry. Used for opening a particular file only once. … … 252 288 struct VMDKFILE *pPrev; 253 289 } VMDKFILE, *PVMDKFILE; 290 254 291 /** 255 292 * VMDK extent data structure. … … 332 369 struct VMDKIMAGE *pImage; 333 370 } VMDKEXTENT, *PVMDKEXTENT; 371 334 372 /** 335 373 * Grain table cache size. Allocated per image. 336 374 */ 337 375 #define VMDK_GT_CACHE_SIZE 256 376 338 377 /** 339 378 * Grain table block size. Smaller than an actual grain table block to allow … … 342 381 */ 343 382 #define VMDK_GT_CACHELINE_SIZE 128 383 384 344 385 /** 345 386 * Maximum number of lines in a descriptor file. Not worth the effort of … … 349 390 */ 350 391 #define VMDK_DESCRIPTOR_LINES_MAX 1100U 392 351 393 /** 352 394 * Parsed descriptor information. Allows easy access and update of the … … 372 414 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX]; 373 415 } VMDKDESCRIPTOR, *PVMDKDESCRIPTOR; 416 417 374 418 /** 375 419 * Cache entry for translating extent/sector to a sector number in that … … 385 429 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE]; 386 430 } VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY; 431 387 432 /** 388 433 * Cache data structure for blocks of grain table entries. For now this is a … … 398 443 unsigned cEntries; 399 444 } VMDKGTCACHE, *PVMDKGTCACHE; 445 400 446 /** 401 447 * Complete VMDK image data structure. Mainly a collection of extents and a few … … 408 454 /** Descriptor file if applicable. */ 409 455 PVMDKFILE pFile; 456 410 457 /** Pointer to the per-disk VD interface list. */ 411 458 PVDINTERFACE pVDIfsDisk; 412 459 /** Pointer to the per-image VD interface list. */ 413 460 PVDINTERFACE pVDIfsImage; 461 414 462 /** Error interface. */ 415 463 PVDINTERFACEERROR pIfError; 416 464 /** I/O interface. */ 417 465 PVDINTERFACEIOINT pIfIo; 466 467 418 468 /** Pointer to the image extents. */ 419 469 PVMDKEXTENT pExtents; … … 423 473 * times only once (happens mainly with raw partition access). */ 424 474 PVMDKFILE pFiles; 475 425 476 /** 426 477 * Pointer to an array of segment entries for async I/O. … … 432 483 /** Entries available in the segments array. */ 433 484 unsigned cSegments; 485 434 486 /** Open flags passed by VBoxHD layer. */ 435 487 unsigned uOpenFlags; … … 450 502 /** Parent image modification UUID. */ 451 503 RTUUID ParentModificationUuid; 504 452 505 /** Pointer to grain table cache, if this image contains sparse extents. */ 453 506 PVMDKGTCACHE pGTCache; … … 461 514 VDREGIONLIST RegionList; 462 515 } VMDKIMAGE; 516 517 463 518 /** State for the input/output callout of the inflate reader/deflate writer. */ 464 519 typedef struct VMDKCOMPRESSIO … … 473 528 void *pvCompGrain; 474 529 } VMDKCOMPRESSIO; 530 531 475 532 /** Tracks async grain allocation. */ 476 533 typedef struct VMDKGRAINALLOCASYNC … … 494 551 uint64_t uRGTSector; 495 552 } VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC; 553 496 554 /** 497 555 * State information for vmdkRename() and helpers. … … 536 594 * Static Variables * 537 595 *********************************************************************************************************************************/ 596 538 597 /** NULL-terminated array of supported file extensions. */ 539 598 static const VDFILEEXTENSION s_aVmdkFileExtensions[] = … … 542 601 {NULL, VDTYPE_INVALID} 543 602 }; 603 544 604 /** NULL-terminated array of configuration option. */ 545 605 static const VDCONFIGINFO s_aVmdkConfigInfo[] = … … 550 610 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 }, 551 611 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 }, 612 552 613 /* End of options list */ 553 614 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 } … … 558 619 * Internal Functions * 559 620 *********************************************************************************************************************************/ 621 560 622 static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent); 561 623 static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 562 624 bool fDelete); 625 563 626 static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents); 564 627 static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx); 565 628 static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment); 566 629 static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush); 630 567 631 static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, 568 632 void *pvUser, int rcReq); 633 569 634 /** 570 635 * Internal: open a file (using a file descriptor cache to ensure each file … … 576 641 int rc = VINF_SUCCESS; 577 642 PVMDKFILE pVmdkFile; 643 578 644 for (pVmdkFile = pImage->pFiles; 579 645 pVmdkFile != NULL; … … 584 650 Assert(fOpen == pVmdkFile->fOpen); 585 651 pVmdkFile->uReferences++; 652 586 653 *ppVmdkFile = pVmdkFile; 654 587 655 return rc; 588 656 } 589 657 } 658 590 659 /* If we get here, there's no matching entry in the cache. */ 591 660 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE)); … … 595 664 return VERR_NO_MEMORY; 596 665 } 666 597 667 pVmdkFile->pszFilename = RTStrDup(pszFilename); 598 668 if (!pVmdkFile->pszFilename) … … 602 672 return VERR_NO_MEMORY; 603 673 } 674 604 675 if (pszBasename) 605 676 { … … 613 684 } 614 685 } 686 615 687 pVmdkFile->fOpen = fOpen; 688 616 689 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen, 617 690 &pVmdkFile->pStorage); … … 632 705 *ppVmdkFile = NULL; 633 706 } 707 634 708 return rc; 635 709 } 710 636 711 /** 637 712 * Internal: close a file, updating the file descriptor cache. … … 641 716 int rc = VINF_SUCCESS; 642 717 PVMDKFILE pVmdkFile = *ppVmdkFile; 718 643 719 AssertPtr(pVmdkFile); 720 644 721 pVmdkFile->fDelete |= fDelete; 645 722 Assert(pVmdkFile->uReferences); … … 649 726 PVMDKFILE pPrev; 650 727 PVMDKFILE pNext; 728 651 729 /* Unchain the element from the list. */ 652 730 pPrev = pVmdkFile->pPrev; 653 731 pNext = pVmdkFile->pNext; 732 654 733 if (pNext) 655 734 pNext->pPrev = pPrev; … … 658 737 else 659 738 pImage->pFiles = pNext; 739 660 740 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage); 741 661 742 bool fFileDel = pVmdkFile->fDelete; 662 743 if ( pVmdkFile->pszBasename … … 671 752 fFileDel = false; 672 753 } 754 673 755 if (fFileDel) 674 756 { … … 684 766 RTMemFree(pVmdkFile); 685 767 } 768 686 769 *ppVmdkFile = NULL; 687 770 return rc; 688 771 } 772 689 773 /*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */ 690 774 #ifndef VMDK_USE_BLOCK_DECOMP_API … … 693 777 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser; 694 778 size_t cbInjected = 0; 779 695 780 Assert(cbBuf); 696 781 if (pInflateState->iOffset < 0) … … 718 803 } 719 804 #endif 805 720 806 /** 721 807 * Internal: read from a file and inflate the compressed data, … … 733 819 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; 734 820 size_t cbCompSize, cbActuallyRead; 821 735 822 if (!pcvMarker) 736 823 { … … 747 834 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize); 748 835 } 836 749 837 cbCompSize = RT_LE2H_U32(pMarker->cbSize); 750 838 if (cbCompSize == 0) … … 753 841 return VERR_VD_VMDK_INVALID_FORMAT; 754 842 } 843 755 844 /* Sanity check - the expansion ratio should be much less than 2. */ 756 845 Assert(cbCompSize < 2 * cbToRead); 757 846 if (cbCompSize >= 2 * cbToRead) 758 847 return VERR_VD_VMDK_INVALID_FORMAT; 848 759 849 /* Compressed grain marker. Data follows immediately. */ 760 850 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, … … 766 856 512) 767 857 - RT_UOFFSETOF(VMDKMARKER, uType)); 858 768 859 if (puLBA) 769 860 *puLBA = RT_LE2H_U64(pMarker->uSector); … … 772 863 + RT_UOFFSETOF(VMDKMARKER, uType), 773 864 512); 865 774 866 #ifdef VMDK_USE_BLOCK_DECOMP_API 775 867 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/, … … 782 874 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType); 783 875 InflateState.pvCompGrain = pExtent->pvCompGrain; 876 784 877 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper); 785 878 if (RT_FAILURE(rc)) … … 798 891 return rc; 799 892 } 893 800 894 static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf) 801 895 { 802 896 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser; 897 803 898 Assert(cbBuf); 804 899 if (pDeflateState->iOffset < 0) … … 817 912 return VINF_SUCCESS; 818 913 } 914 819 915 /** 820 916 * Internal: deflate the uncompressed data and write to a file, … … 829 925 PRTZIPCOMP pZip = NULL; 830 926 VMDKCOMPRESSIO DeflateState; 927 831 928 DeflateState.pImage = pImage; 832 929 DeflateState.iOffset = -1; 833 930 DeflateState.cbCompGrain = pExtent->cbCompGrain; 834 931 DeflateState.pvCompGrain = pExtent->pvCompGrain; 932 835 933 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, 836 934 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT); … … 845 943 Assert( DeflateState.iOffset > 0 846 944 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain); 945 847 946 /* pad with zeroes to get to a full sector size */ 848 947 uint32_t uSize = DeflateState.iOffset; … … 854 953 uSize = uSizeAlign; 855 954 } 955 856 956 if (pcbMarkerData) 857 957 *pcbMarkerData = uSize; 958 858 959 /* Compressed grain marker. Data follows immediately. */ 859 960 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; … … 868 969 return rc; 869 970 } 971 972 870 973 /** 871 974 * Internal: check if all files are closed, prevent leaking resources. … … 875 978 int rc = VINF_SUCCESS, rc2; 876 979 PVMDKFILE pVmdkFile; 980 877 981 Assert(pImage->pFiles == NULL); 878 982 for (pVmdkFile = pImage->pFiles; … … 883 987 pVmdkFile->pszFilename)); 884 988 pImage->pFiles = pVmdkFile->pNext; 989 885 990 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete); 991 886 992 if (RT_SUCCESS(rc)) 887 993 rc = rc2; … … 889 995 return rc; 890 996 } 997 891 998 /** 892 999 * Internal: truncate a string (at a UTF8 code point boundary) and encode the … … 897 1004 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3]; 898 1005 char *pszDst = szEnc; 1006 899 1007 AssertPtr(psz); 1008 900 1009 for (; *psz; psz = RTStrNextCp(psz)) 901 1010 { … … 928 1037 return RTStrDup(szEnc); 929 1038 } 1039 930 1040 /** 931 1041 * Internal: decode a string and store it into the specified string. … … 935 1045 int rc = VINF_SUCCESS; 936 1046 char szBuf[4]; 1047 937 1048 if (!cb) 938 1049 return VERR_BUFFER_OVERFLOW; 1050 939 1051 AssertPtr(psz); 1052 940 1053 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded)) 941 1054 { … … 960 1073 else 961 1074 pszDst = RTStrPutCp(pszDst, Cp); 1075 962 1076 /* Need to leave space for terminating NUL. */ 963 1077 if ((size_t)(pszDst - szBuf) + 1 >= cb) … … 972 1086 return rc; 973 1087 } 1088 974 1089 /** 975 1090 * Internal: free all buffers associated with grain directories. … … 988 1103 } 989 1104 } 1105 990 1106 /** 991 1107 * Internal: allocate the compressed/uncompressed buffers for streamOptimized … … 995 1111 { 996 1112 int rc = VINF_SUCCESS; 1113 997 1114 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 998 1115 { … … 1013 1130 rc = VERR_NO_MEMORY; 1014 1131 } 1132 1015 1133 if (RT_FAILURE(rc)) 1016 1134 vmdkFreeStreamBuffers(pExtent); 1017 1135 return rc; 1018 1136 } 1137 1019 1138 /** 1020 1139 * Internal: allocate all buffers associated with grain directories. … … 1025 1144 int rc = VINF_SUCCESS; 1026 1145 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1146 1027 1147 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD); 1028 1148 if (RT_LIKELY(pExtent->pGD)) … … 1037 1157 else 1038 1158 rc = VERR_NO_MEMORY; 1159 1039 1160 if (RT_FAILURE(rc)) 1040 1161 vmdkFreeGrainDirectory(pExtent); 1041 1162 return rc; 1042 1163 } 1164 1043 1165 /** 1044 1166 * Converts the grain directory from little to host endianess. … … 1051 1173 { 1052 1174 uint32_t *pGDTmp = pGD; 1175 1053 1176 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++) 1054 1177 *pGDTmp = RT_LE2H_U32(*pGDTmp); 1055 1178 } 1179 1056 1180 /** 1057 1181 * Read the grain directory and allocated grain tables verifying them against … … 1066 1190 int rc = VINF_SUCCESS; 1067 1191 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1192 1068 1193 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE 1069 1194 && pExtent->uSectorGD != VMDK_GD_AT_END 1070 1195 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR); 1196 1071 1197 rc = vmdkAllocGrainDirectory(pImage, pExtent); 1072 1198 if (RT_SUCCESS(rc)) … … 1080 1206 { 1081 1207 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries); 1208 1082 1209 if ( pExtent->uSectorRGD 1083 1210 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)) … … 1091 1218 { 1092 1219 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries); 1220 1093 1221 /* Check grain table and redundant grain table for consistency. */ 1094 1222 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t); 1095 1223 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */ 1096 1224 size_t cbGTBuffersMax = _1M; 1225 1097 1226 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1098 1227 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1228 1099 1229 if ( !pTmpGT1 1100 1230 || !pTmpGT2) 1101 1231 rc = VERR_NO_MEMORY; 1232 1102 1233 size_t i = 0; 1103 1234 uint32_t *pGDTmp = pExtent->pGD; 1104 1235 uint32_t *pRGDTmp = pExtent->pRGD; 1236 1105 1237 /* Loop through all entries. */ 1106 1238 while (i < pExtent->cGDEntries) … … 1109 1241 uint32_t uRGTStart = *pRGDTmp; 1110 1242 size_t cbGTRead = cbGT; 1243 1111 1244 /* If no grain table is allocated skip the entry. */ 1112 1245 if (*pGDTmp == 0 && *pRGDTmp == 0) … … 1115 1248 continue; 1116 1249 } 1250 1117 1251 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1118 1252 { … … 1124 1258 break; 1125 1259 } 1260 1126 1261 i++; 1127 1262 pGDTmp++; 1128 1263 pRGDTmp++; 1264 1129 1265 /* 1130 1266 * Read a few tables at once if adjacent to decrease the number … … 1140 1276 continue; 1141 1277 } 1278 1142 1279 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1143 1280 { … … 1149 1286 break; 1150 1287 } 1288 1151 1289 /* Check that the start offsets are adjacent.*/ 1152 1290 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp) 1153 1291 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp)) 1154 1292 break; 1293 1155 1294 i++; 1156 1295 pGDTmp++; … … 1158 1297 cbGTRead += cbGT; 1159 1298 } 1299 1160 1300 /* Increase buffers if required. */ 1161 1301 if ( RT_SUCCESS(rc) … … 1175 1315 else 1176 1316 rc = VERR_NO_MEMORY; 1317 1177 1318 if (rc == VERR_NO_MEMORY) 1178 1319 { … … 1181 1322 i -= cbGTRead / cbGT; 1182 1323 cbGTRead = cbGT; 1324 1183 1325 /* Don't try to increase the buffer again in the next run. */ 1184 1326 cbGTBuffersMax = cbGTBuffers; 1185 1327 } 1186 1328 } 1329 1187 1330 if (RT_SUCCESS(rc)) 1188 1331 { … … 1217 1360 } 1218 1361 } /* while (i < pExtent->cGDEntries) */ 1362 1219 1363 /** @todo figure out what to do for unclean VMDKs. */ 1220 1364 if (pTmpGT1) … … 1232 1376 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc); 1233 1377 } 1378 1234 1379 if (RT_FAILURE(rc)) 1235 1380 vmdkFreeGrainDirectory(pExtent); 1236 1381 return rc; 1237 1382 } 1383 1238 1384 /** 1239 1385 * Creates a new grain directory for the given extent at the given start sector. … … 1254 1400 size_t cbGTRounded; 1255 1401 uint64_t cbOverhead; 1402 1256 1403 if (fPreAlloc) 1257 1404 { … … 1267 1414 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded; 1268 1415 } 1416 1269 1417 /* For streamOptimized extents there is only one grain directory, 1270 1418 * and for all others take redundant grain directory into account. */ … … 1281 1429 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead); 1282 1430 } 1431 1283 1432 if (RT_SUCCESS(rc)) 1284 1433 { 1285 1434 pExtent->uAppendPosition = cbOverhead; 1286 1435 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead); 1436 1287 1437 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1288 1438 { … … 1295 1445 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded); 1296 1446 } 1447 1297 1448 rc = vmdkAllocStreamBuffers(pImage, pExtent); 1298 1449 if (RT_SUCCESS(rc)) … … 1304 1455 uint32_t uGTSectorLE; 1305 1456 uint64_t uOffsetSectors; 1457 1306 1458 if (pExtent->pRGD) 1307 1459 { … … 1323 1475 } 1324 1476 } 1477 1325 1478 if (RT_SUCCESS(rc)) 1326 1479 { … … 1345 1498 } 1346 1499 } 1500 1347 1501 if (RT_FAILURE(rc)) 1348 1502 vmdkFreeGrainDirectory(pExtent); 1349 1503 return rc; 1350 1504 } 1505 1351 1506 /** 1352 1507 * Unquotes the given string returning the result in a separate buffer. … … 1366 1521 char *pszQ; 1367 1522 char *pszUnquoted; 1523 1368 1524 /* Skip over whitespace. */ 1369 1525 while (*pszStr == ' ' || *pszStr == '\t') 1370 1526 pszStr++; 1527 1371 1528 if (*pszStr != '"') 1372 1529 { … … 1383 1540 pImage->pszFilename, pszStart); 1384 1541 } 1542 1385 1543 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1); 1386 1544 if (!pszUnquoted) … … 1393 1551 return VINF_SUCCESS; 1394 1552 } 1553 1395 1554 static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1396 1555 const char *pszLine) … … 1398 1557 char *pEnd = pDescriptor->aLines[pDescriptor->cLines]; 1399 1558 ssize_t cbDiff = strlen(pszLine) + 1; 1559 1400 1560 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1 1401 1561 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1402 1562 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1563 1403 1564 memcpy(pEnd, pszLine, cbDiff); 1404 1565 pDescriptor->cLines++; 1405 1566 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff; 1406 1567 pDescriptor->fDirty = true; 1568 1407 1569 return VINF_SUCCESS; 1408 1570 } 1571 1409 1572 static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart, 1410 1573 const char *pszKey, const char **ppszValue) … … 1412 1575 size_t cbKey = strlen(pszKey); 1413 1576 const char *pszValue; 1577 1414 1578 while (uStart != 0) 1415 1579 { … … 1430 1594 return !!uStart; 1431 1595 } 1596 1432 1597 static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1433 1598 unsigned uStart, … … 1437 1602 size_t cbKey = strlen(pszKey); 1438 1603 unsigned uLast = 0; 1604 1439 1605 while (uStart != 0) 1440 1606 { … … 1471 1637 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1472 1638 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1639 1473 1640 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal, 1474 1641 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal); … … 1533 1700 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1534 1701 pDescriptor->aLines[i] += cbDiff; 1702 1535 1703 /* Adjust starting line numbers of following descriptor sections. */ 1536 1704 if (uStart <= pDescriptor->uFirstExtent) … … 1542 1710 return VINF_SUCCESS; 1543 1711 } 1712 1544 1713 static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey, 1545 1714 uint32_t *puValue) 1546 1715 { 1547 1716 const char *pszValue; 1717 1548 1718 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1549 1719 &pszValue)) … … 1551 1721 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue); 1552 1722 } 1723 1553 1724 /** 1554 1725 * Returns the value of the given key as a string allocating the necessary memory. … … 1567 1738 const char *pszValue; 1568 1739 char *pszValueUnquoted; 1740 1569 1741 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1570 1742 &pszValue)) … … 1576 1748 return rc; 1577 1749 } 1750 1578 1751 static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1579 1752 const char *pszKey, const char *pszValue) 1580 1753 { 1581 1754 char *pszValueQuoted; 1755 1582 1756 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue); 1583 1757 if (!pszValueQuoted) … … 1588 1762 return rc; 1589 1763 } 1764 1590 1765 static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage, 1591 1766 PVMDKDESCRIPTOR pDescriptor) … … 1594 1769 unsigned uEntry = pDescriptor->uFirstExtent; 1595 1770 ssize_t cbDiff; 1771 1596 1772 if (!uEntry) 1597 1773 return; 1774 1598 1775 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1599 1776 /* Move everything including \0 in the entry marking the end of buffer. */ … … 1611 1788 if (pDescriptor->uFirstDDB) 1612 1789 pDescriptor->uFirstDDB--; 1790 1613 1791 return; 1614 1792 } 1793 1615 1794 static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1616 1795 VMDKACCESS enmAccess, uint64_t cNominalSectors, … … 1624 1803 char szExt[1024]; 1625 1804 ssize_t cbDiff; 1805 1626 1806 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess)); 1627 1807 Assert((unsigned)enmType < RT_ELEMENTS(apszType)); 1808 1628 1809 /* Find last entry in extent description. */ 1629 1810 while (uStart) … … 1633 1814 uStart = pDescriptor->aNextLines[uStart]; 1634 1815 } 1816 1635 1817 if (enmType == VMDKETYPE_ZERO) 1636 1818 { … … 1651 1833 } 1652 1834 cbDiff = strlen(szExt) + 1; 1835 1653 1836 /* Check for buffer overflow. */ 1654 1837 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1) … … 1656 1839 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)) 1657 1840 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1841 1658 1842 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--) 1659 1843 { … … 1674 1858 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1675 1859 pDescriptor->aLines[i] += cbDiff; 1860 1676 1861 /* Adjust starting line numbers of following descriptor sections. */ 1677 1862 if (uStart <= pDescriptor->uFirstDDB) 1678 1863 pDescriptor->uFirstDDB++; 1864 1679 1865 pDescriptor->fDirty = true; 1680 1866 return VINF_SUCCESS; 1681 1867 } 1868 1682 1869 /** 1683 1870 * Returns the value of the given key from the DDB as a string allocating … … 1697 1884 const char *pszValue; 1698 1885 char *pszValueUnquoted; 1886 1699 1887 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1700 1888 &pszValue)) … … 1706 1894 return rc; 1707 1895 } 1896 1708 1897 static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1709 1898 const char *pszKey, uint32_t *puValue) … … 1711 1900 const char *pszValue; 1712 1901 char *pszValueUnquoted; 1902 1713 1903 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1714 1904 &pszValue)) … … 1721 1911 return rc; 1722 1912 } 1913 1723 1914 static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1724 1915 const char *pszKey, PRTUUID pUuid) … … 1726 1917 const char *pszValue; 1727 1918 char *pszValueUnquoted; 1919 1728 1920 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1729 1921 &pszValue)) … … 1736 1928 return rc; 1737 1929 } 1930 1738 1931 static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1739 1932 const char *pszKey, const char *pszVal) … … 1741 1934 int rc; 1742 1935 char *pszValQuoted; 1936 1743 1937 if (pszVal) 1744 1938 { … … 1755 1949 return rc; 1756 1950 } 1951 1757 1952 static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1758 1953 const char *pszKey, PCRTUUID pUuid) 1759 1954 { 1760 1955 char *pszUuid; 1956 1761 1957 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid); 1762 1958 if (!pszUuid) … … 1767 1963 return rc; 1768 1964 } 1965 1769 1966 static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1770 1967 const char *pszKey, uint32_t uValue) 1771 1968 { 1772 1969 char *pszValue; 1970 1773 1971 RTStrAPrintf(&pszValue, "\"%d\"", uValue); 1774 1972 if (!pszValue) … … 1779 1977 return rc; 1780 1978 } 1979 1781 1980 /** 1782 1981 * Splits the descriptor data into individual lines checking for correct line … … 1792 1991 unsigned cLine = 0; 1793 1992 int rc = VINF_SUCCESS; 1993 1794 1994 while ( RT_SUCCESS(rc) 1795 1995 && *pszTmp != '\0') … … 1802 2002 break; 1803 2003 } 2004 1804 2005 while (*pszTmp != '\0' && *pszTmp != '\n') 1805 2006 { … … 1819 2020 pszTmp++; 1820 2021 } 2022 1821 2023 if (RT_FAILURE(rc)) 1822 2024 break; 2025 1823 2026 /* Get rid of LF character. */ 1824 2027 if (*pszTmp == '\n') … … 1828 2031 } 1829 2032 } 2033 1830 2034 if (RT_SUCCESS(rc)) 1831 2035 { … … 1834 2038 pDesc->aLines[cLine] = pszTmp; 1835 2039 } 2040 1836 2041 return rc; 1837 2042 } 2043 1838 2044 static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData, 1839 2045 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 1852 2058 { 1853 2059 unsigned uLastNonEmptyLine = 0; 2060 1854 2061 /* Initialize those, because we need to be able to reopen an image. */ 1855 2062 pDescriptor->uFirstDesc = 0; … … 1917 2124 } 1918 2125 } 2126 1919 2127 return rc; 1920 2128 } 2129 1921 2130 static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage, 1922 2131 PCVDGEOMETRY pPCHSGeometry) … … 1937 2146 return rc; 1938 2147 } 2148 1939 2149 static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage, 1940 2150 PCVDGEOMETRY pLCHSGeometry) … … 1947 2157 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor, 1948 2158 VMDK_DDB_GEO_LCHS_HEADS, 2159 1949 2160 pLCHSGeometry->cHeads); 1950 2161 if (RT_FAILURE(rc)) … … 1955 2166 return rc; 1956 2167 } 2168 1957 2169 static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData, 1958 2170 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 1966 2178 pDescriptor->aLines[pDescriptor->cLines] = pDescData; 1967 2179 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines)); 2180 1968 2181 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile"); 1969 2182 if (RT_SUCCESS(rc)) … … 1997 2210 { 1998 2211 pDescriptor->uFirstDDB = pDescriptor->cLines - 1; 2212 1999 2213 /* Now that the framework is in place, use the normal functions to insert 2000 2214 * the remaining keys. */ … … 2009 2223 if (RT_SUCCESS(rc)) 2010 2224 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide"); 2225 2011 2226 return rc; 2012 2227 } 2228 2013 2229 static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData) 2014 2230 { … … 2017 2233 unsigned uLine; 2018 2234 unsigned i; 2235 2019 2236 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData, 2020 2237 &pImage->Descriptor); 2021 2238 if (RT_FAILURE(rc)) 2022 2239 return rc; 2240 2023 2241 /* Check version, must be 1. */ 2024 2242 uint32_t uVersion; … … 2028 2246 if (uVersion != 1) 2029 2247 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename); 2248 2030 2249 /* Get image creation type and determine image flags. */ 2031 2250 char *pszCreateType = NULL; /* initialized to make gcc shut up */ … … 2045 2264 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX; 2046 2265 RTMemTmpFree(pszCreateType); 2266 2047 2267 /* Count the number of extent config entries. */ 2048 2268 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0; … … 2050 2270 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++) 2051 2271 /* nothing */; 2272 2052 2273 if (!pImage->pDescData && cExtents != 1) 2053 2274 { … … 2055 2276 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename); 2056 2277 } 2278 2057 2279 if (pImage->pDescData) 2058 2280 { … … 2062 2284 return rc; 2063 2285 } 2286 2064 2287 for (i = 0, uLine = pImage->Descriptor.uFirstExtent; 2065 2288 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine]) 2066 2289 { 2067 2290 char *pszLine = pImage->Descriptor.aLines[uLine]; 2291 2068 2292 /* Access type of the extent. */ 2069 2293 if (!strncmp(pszLine, "RW", 2)) … … 2086 2310 if (*pszLine++ != ' ') 2087 2311 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2312 2088 2313 /* Nominal size of the extent. */ 2089 2314 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10, … … 2093 2318 if (*pszLine++ != ' ') 2094 2319 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2320 2095 2321 /* Type of the extent. */ 2096 2322 if (!strncmp(pszLine, "SPARSE", 6)) … … 2116 2342 else 2117 2343 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2344 2118 2345 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO) 2119 2346 { … … 2130 2357 if (*pszLine++ != ' ') 2131 2358 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2359 2132 2360 /* Basename of the image. Surrounded by quotes. */ 2133 2361 char *pszBasename; … … 2148 2376 } 2149 2377 } 2378 2150 2379 if (*pszLine != '\0') 2151 2380 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2152 2381 } 2153 2382 } 2383 2154 2384 /* Determine PCHS geometry (autogenerate if necessary). */ 2155 2385 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2186 2416 pImage->PCHSGeometry.cSectors = 63; 2187 2417 } 2418 2188 2419 /* Determine LCHS geometry (set to 0 if not specified). */ 2189 2420 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2216 2447 pImage->LCHSGeometry.cSectors = 0; 2217 2448 } 2449 2218 2450 /* Get image UUID. */ 2219 2451 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, … … 2239 2471 else if (RT_FAILURE(rc)) 2240 2472 return rc; 2473 2241 2474 /* Get image modification UUID. */ 2242 2475 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2264 2497 else if (RT_FAILURE(rc)) 2265 2498 return rc; 2499 2266 2500 /* Get UUID of parent image. */ 2267 2501 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, … … 2287 2521 else if (RT_FAILURE(rc)) 2288 2522 return rc; 2523 2289 2524 /* Get parent image modification UUID. */ 2290 2525 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2310 2545 else if (RT_FAILURE(rc)) 2311 2546 return rc; 2547 2312 2548 return VINF_SUCCESS; 2313 2549 } 2550 2314 2551 /** 2315 2552 * Internal : Prepares the descriptor to write to the image. … … 2319 2556 { 2320 2557 int rc = VINF_SUCCESS; 2558 2321 2559 /* 2322 2560 * Allocate temporary descriptor buffer. … … 2327 2565 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor); 2328 2566 size_t offDescriptor = 0; 2567 2329 2568 if (!pszDescriptor) 2330 2569 return VERR_NO_MEMORY; 2570 2331 2571 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++) 2332 2572 { 2333 2573 const char *psz = pImage->Descriptor.aLines[i]; 2334 2574 size_t cb = strlen(psz); 2575 2335 2576 /* 2336 2577 * Increase the descriptor if there is no limit and … … 2348 2589 char *pszDescriptorNew = NULL; 2349 2590 LogFlow(("Increasing descriptor cache\n")); 2591 2350 2592 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K); 2351 2593 if (!pszDescriptorNew) … … 2358 2600 } 2359 2601 } 2602 2360 2603 if (cb > 0) 2361 2604 { … … 2363 2606 offDescriptor += cb; 2364 2607 } 2608 2365 2609 memcpy(pszDescriptor + offDescriptor, "\n", 1); 2366 2610 offDescriptor++; 2367 2611 } 2612 2368 2613 if (RT_SUCCESS(rc)) 2369 2614 { … … 2373 2618 else if (pszDescriptor) 2374 2619 RTMemFree(pszDescriptor); 2620 2375 2621 return rc; 2376 2622 } 2623 2377 2624 /** 2378 2625 * Internal: write/update the descriptor part of the image. … … 2386 2633 void *pvDescriptor = NULL; 2387 2634 size_t cbDescriptor; 2635 2388 2636 if (pImage->pDescData) 2389 2637 { … … 2403 2651 if (pDescFile == NULL) 2404 2652 return VERR_INVALID_PARAMETER; 2653 2405 2654 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor); 2406 2655 if (RT_SUCCESS(rc)) … … 2414 2663 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename); 2415 2664 } 2665 2416 2666 if (RT_SUCCESS(rc) && !cbLimit) 2417 2667 { … … 2420 2670 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename); 2421 2671 } 2672 2422 2673 if (RT_SUCCESS(rc)) 2423 2674 pImage->Descriptor.fDirty = false; 2675 2424 2676 if (pvDescriptor) 2425 2677 RTMemFree(pvDescriptor); 2426 2678 return rc; 2427 } 2679 2680 } 2681 2428 2682 /** 2429 2683 * Internal: validate the consistency check values in a binary header. … … 2459 2713 return rc; 2460 2714 } 2715 2461 2716 /** 2462 2717 * Internal: read metadata belonging to an extent with binary header, i.e. … … 2468 2723 SparseExtentHeader Header; 2469 2724 int rc; 2725 2470 2726 if (!fMagicAlreadyRead) 2471 2727 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0, … … 2480 2736 - RT_UOFFSETOF(SparseExtentHeader, version)); 2481 2737 } 2738 2482 2739 if (RT_SUCCESS(rc)) 2483 2740 { … … 2486 2743 { 2487 2744 uint64_t cbFile = 0; 2745 2488 2746 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17)) 2489 2747 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END) 2490 2748 pExtent->fFooter = true; 2749 2491 2750 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 2492 2751 || ( pExtent->fFooter … … 2497 2756 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname); 2498 2757 } 2758 2499 2759 if (RT_SUCCESS(rc)) 2500 2760 { 2501 2761 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 2502 2762 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512); 2763 2503 2764 if ( pExtent->fFooter 2504 2765 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2514 2775 rc = VERR_VD_VMDK_INVALID_HEADER; 2515 2776 } 2777 2516 2778 if (RT_SUCCESS(rc)) 2517 2779 rc = vmdkValidateHeader(pImage, pExtent, &Header); … … 2519 2781 pExtent->uAppendPosition = 0; 2520 2782 } 2783 2521 2784 if (RT_SUCCESS(rc)) 2522 2785 { … … 2541 2804 pExtent->uSectorRGD = 0; 2542 2805 } 2806 2543 2807 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors) 2544 2808 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2545 2809 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname); 2810 2546 2811 if ( RT_SUCCESS(rc) 2547 2812 && ( pExtent->uSectorGD == VMDK_GD_AT_END … … 2551 2816 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2552 2817 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname); 2818 2553 2819 if (RT_SUCCESS(rc)) 2554 2820 { … … 2561 2827 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2562 2828 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2829 2563 2830 /* Fix up the number of descriptor sectors, as some flat images have 2564 2831 * really just one, and this causes failures when inserting the UUID … … 2583 2850 rc = VERR_VD_VMDK_INVALID_HEADER; 2584 2851 } 2852 2585 2853 if (RT_FAILURE(rc)) 2586 2854 vmdkFreeExtentData(pImage, pExtent, false); 2855 2587 2856 return rc; 2588 2857 } 2858 2589 2859 /** 2590 2860 * Internal: read additional metadata belonging to an extent. For those … … 2594 2864 { 2595 2865 int rc = VINF_SUCCESS; 2866 2596 2867 /* disabled the check as there are too many truncated vmdk images out there */ 2597 2868 #ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK … … 2633 2904 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2634 2905 pExtent->uAppendPosition = 0; 2906 2635 2907 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2636 2908 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2646 2918 } 2647 2919 } 2920 2648 2921 if (RT_FAILURE(rc)) 2649 2922 vmdkFreeExtentData(pImage, pExtent, false); 2923 2650 2924 return rc; 2651 2925 } 2926 2652 2927 /** 2653 2928 * Internal: write/update the metadata for a sparse extent. … … 2657 2932 { 2658 2933 SparseExtentHeader Header; 2934 2659 2935 memset(&Header, '\0', sizeof(Header)); 2660 2936 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER); … … 2699 2975 Header.doubleEndLineChar2 = '\n'; 2700 2976 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression); 2977 2701 2978 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, 2702 2979 uOffset, &Header, sizeof(Header), … … 2706 2983 return rc; 2707 2984 } 2985 2708 2986 /** 2709 2987 * Internal: free the buffers used for streamOptimized images. … … 2722 3000 } 2723 3001 } 3002 2724 3003 /** 2725 3004 * Internal: free the memory used by the extent data structure, optionally … … 2735 3014 { 2736 3015 int rc = VINF_SUCCESS; 3016 2737 3017 vmdkFreeGrainDirectory(pExtent); 2738 3018 if (pExtent->pDescData) … … 2761 3041 } 2762 3042 vmdkFreeStreamBuffers(pExtent); 3043 2763 3044 return rc; 2764 3045 } 3046 2765 3047 /** 2766 3048 * Internal: allocate grain table cache if necessary for this image. … … 2769 3051 { 2770 3052 PVMDKEXTENT pExtent; 3053 2771 3054 /* Allocate grain table cache if any sparse extent is present. */ 2772 3055 for (unsigned i = 0; i < pImage->cExtents; i++) … … 2788 3071 } 2789 3072 } 3073 2790 3074 return VINF_SUCCESS; 2791 3075 } 3076 2792 3077 /** 2793 3078 * Internal: allocate the given number of extents. … … 2817 3102 else 2818 3103 rc = VERR_NO_MEMORY; 3104 2819 3105 return rc; 2820 3106 } 3107 2821 3108 /** 2822 3109 * Internal: allocate and describes an additional, file-backed extent … … 2962 3249 { 2963 3250 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors; 3251 2964 3252 pExtent->cDescriptorSectors = 4; 2965 3253 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3026 3314 rc = VERR_NO_MEMORY; 3027 3315 } 3316 3028 3317 return rc; 3029 3318 } 3319 3030 3320 /** 3031 3321 * Reads the descriptor from a pure text file. … … 3114 3404 else 3115 3405 pExtent->pszFullname = NULL; 3406 3116 3407 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0); 3117 3408 switch (pExtent->enmType) … … 3134 3425 if (RT_FAILURE(rc)) 3135 3426 break; 3427 3136 3428 /* Mark extent as unclean if opened in read-write mode. */ 3137 3429 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3170 3462 else if (RT_SUCCESS(rc)) 3171 3463 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename); 3464 3172 3465 return rc; 3173 3466 } 3467 3174 3468 /** 3175 3469 * Read and process the descriptor based on the image type. … … 3182 3476 { 3183 3477 uint32_t u32Magic; 3478 3184 3479 /* Read magic (if present). */ 3185 3480 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, … … 3198 3493 rc = VERR_VD_VMDK_INVALID_HEADER; 3199 3494 } 3495 3200 3496 return rc; 3201 3497 } 3498 3202 3499 /** 3203 3500 * Internal: Open an image, constructing all necessary data structures. … … 3209 3506 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 3210 3507 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 3508 3211 3509 /* 3212 3510 * Open the image. … … 3221 3519 { 3222 3520 pImage->pFile = pFile; 3521 3223 3522 rc = vmdkDescriptorRead(pImage, pFile); 3224 3523 if (RT_SUCCESS(rc)) … … 3238 3537 } 3239 3538 } 3539 3240 3540 /* Update the image metadata now in case has changed. */ 3241 3541 rc = vmdkFlushImage(pImage, NULL); … … 3257 3557 || pExtent->enmType == VMDKETYPE_ZERO) 3258 3558 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED; 3559 3259 3560 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors); 3260 3561 } 3562 3261 3563 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3262 3564 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 3268 3570 /* else: Do NOT signal an appropriate error here, as the VD layer has the 3269 3571 * choice of retrying the open if it failed. */ 3572 3270 3573 if (RT_SUCCESS(rc)) 3271 3574 { … … 3273 3576 pImage->RegionList.fFlags = 0; 3274 3577 pImage->RegionList.cRegions = 1; 3578 3275 3579 pRegion->offRegion = 0; /* Disk start. */ 3276 3580 pRegion->cbBlock = 512; … … 3285 3589 return rc; 3286 3590 } 3591 3287 3592 /** 3288 3593 * Frees a raw descriptor. … … 3293 3598 if (!pRawDesc) 3294 3599 return VINF_SUCCESS; 3600 3295 3601 RTStrFree(pRawDesc->pszRawDisk); 3296 3602 pRawDesc->pszRawDisk = NULL; 3603 3297 3604 /* Partitions: */ 3298 3605 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++) … … 3300 3607 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice); 3301 3608 pRawDesc->pPartDescs[i].pszRawDevice = NULL; 3609 3302 3610 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData); 3303 3611 pRawDesc->pPartDescs[i].pvPartitionData = NULL; 3304 3612 } 3613 3305 3614 RTMemFree(pRawDesc->pPartDescs); 3306 3615 pRawDesc->pPartDescs = NULL; 3616 3307 3617 RTMemFree(pRawDesc); 3308 3618 return VINF_SUCCESS; 3309 3619 } 3620 3310 3621 /** 3311 3622 * Helper that grows the raw partition descriptor table by @a cToAdd entries, … … 3324 3635 pRawDesc->cPartDescs = cNew; 3325 3636 pRawDesc->pPartDescs = paNew; 3637 3326 3638 *ppRet = &paNew[cOld]; 3327 3639 return VINF_SUCCESS; … … 3332 3644 pImage->pszFilename, cOld, cNew); 3333 3645 } 3646 3334 3647 /** 3335 3648 * @callback_method_impl{FNRTSORTCMP} … … 3341 3654 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0; 3342 3655 } 3656 3343 3657 /** 3344 3658 * Post processes the partition descriptors. … … 3352 3666 */ 3353 3667 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL); 3668 3354 3669 /* 3355 3670 * Check that we don't have overlapping descriptors. If we do, that's an … … 3366 3681 paPartDescs[i].pvPartitionData ? " (data)" : ""); 3367 3682 offLast -= 1; 3683 3368 3684 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk) 3369 3685 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS, … … 3378 3694 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize); 3379 3695 } 3696 3380 3697 return VINF_SUCCESS; 3381 3698 } 3699 3700 3382 3701 #ifdef RT_OS_LINUX 3383 3702 /** … … 3402 3721 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir); 3403 3722 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW); 3723 3404 3724 RTDIR hDir = NIL_RTDIR; 3405 3725 int rc = RTDirOpen(&hDir, pszBlockDevDir); … … 3419 3739 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName); 3420 3740 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */ 3741 3421 3742 dev_t uThisDevNo = ~uDevToLocate; 3422 3743 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir); … … 3448 3769 } 3449 3770 #endif /* RT_OS_LINUX */ 3771 3450 3772 #ifdef RT_OS_FREEBSD 3773 3774 3451 3775 /** 3452 3776 * Reads the config data from the provider and returns offset and size … … 3461 3785 gconfig *pConfEntry; 3462 3786 int rc = VERR_NOT_FOUND; 3787 3463 3788 /* 3464 3789 * Required parameters are located in the list containing key/value pairs. … … 3491 3816 return rc; 3492 3817 } 3818 3819 3493 3820 /** 3494 3821 * Searches the partition specified by name and calculates its size and absolute offset. … … 3509 3836 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER); 3510 3837 AssertReturn(pcbSize, VERR_INVALID_PARAMETER); 3838 3511 3839 ggeom *pParentGeom; 3512 3840 int rc = VERR_NOT_FOUND; … … 3521 3849 if (RT_FAILURE(rc)) 3522 3850 return rc; 3851 3523 3852 gprovider *pProvider; 3524 3853 /* … … 3532 3861 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize); 3533 3862 } 3863 3534 3864 /* 3535 3865 * No provider found. Go over the parent geom again … … 3541 3871 * provider 3542 3872 */ 3873 3543 3874 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider) 3544 3875 { … … 3548 3879 if (RT_FAILURE(rc)) 3549 3880 return rc; 3881 3550 3882 uint64_t cbProviderOffset = 0; 3551 3883 uint64_t cbProviderSize = 0; … … 3558 3890 } 3559 3891 } 3892 3560 3893 return VERR_NOT_FOUND; 3561 3894 } 3562 3895 #endif 3896 3897 3563 3898 /** 3564 3899 * Attempts to verify the raw partition path. … … 3570 3905 { 3571 3906 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 3907 3572 3908 /* 3573 3909 * Try open the raw partition device. … … 3579 3915 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"), 3580 3916 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc); 3917 3581 3918 /* 3582 3919 * Compare the partition UUID if we can get it. … … 3584 3921 #ifdef RT_OS_WINDOWS 3585 3922 DWORD cbReturned; 3923 3586 3924 /* 1. Get the device numbers for both handles, they should have the same disk. */ 3587 3925 STORAGE_DEVICE_NUMBER DevNum1; … … 3592 3930 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"), 3593 3931 pImage->pszFilename, pszRawDrive, GetLastError()); 3932 3594 3933 STORAGE_DEVICE_NUMBER DevNum2; 3595 3934 RT_ZERO(DevNum2); … … 3683 4022 rc = VERR_NO_TMP_MEMORY; 3684 4023 } 4024 3685 4025 #elif defined(RT_OS_LINUX) 3686 4026 RT_NOREF(hVol); 4027 3687 4028 /* Stat the two devices first to get their device numbers. (We probably 3688 4029 could make some assumptions here about the major & minor number assignments … … 3705 4046 { 3706 4047 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive); 4048 3707 4049 /* Now, scan the directories under that again for a partition device 3708 4050 matching the hRawPart device's number: */ 3709 4051 if (RT_SUCCESS(rc)) 3710 4052 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice); 4053 3711 4054 /* Having found the /sys/block/device/partition/ path, we can finally 3712 4055 read the partition attributes and compare with hVol. */ … … 3721 4064 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition); 3722 4065 /* else: ignore failure? */ 4066 3723 4067 /* start offset: */ 3724 4068 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */ … … 3734 4078 /* else: ignore failure? */ 3735 4079 } 4080 3736 4081 /* the size: */ 3737 4082 if (RT_SUCCESS(rc)) … … 3750 4095 /* else: We've got nothing to work on, so only do content comparison. */ 3751 4096 } 4097 3752 4098 #elif defined(RT_OS_FREEBSD) 3753 4099 char szDriveDevName[256]; … … 3780 4126 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS, 3781 4127 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename); 4128 4129 3782 4130 if (RT_SUCCESS(rc)) 3783 4131 { … … 3802 4150 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc); 3803 4151 } 4152 3804 4153 geom_deletetree(&geomMesh); 3805 4154 } … … 3808 4157 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err); 3809 4158 } 4159 3810 4160 #elif defined(RT_OS_SOLARIS) 3811 4161 RT_NOREF(hVol); 4162 3812 4163 dk_cinfo dkiDriveInfo; 3813 4164 dk_cinfo dkiPartInfo; … … 3857 4208 * using another way. If there is an error, it returns errno which will be handled below. 3858 4209 */ 4210 3859 4211 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition; 3860 4212 if (numPartition > NDKMAP) … … 3891 4243 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"), 3892 4244 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk); 4245 3893 4246 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData) 3894 4247 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS, … … 3966 4319 #else 3967 4320 RT_NOREF(hVol); /* PORTME */ 4321 rc = VERR_NOT_SUPPORTED; 3968 4322 #endif 3969 4323 if (RT_SUCCESS(rc)) … … 3981 4335 { 3982 4336 uint8_t *pbSector2 = pbSector1 + cbToCompare; 4337 3983 4338 /* Do the comparing, we repeat if it fails and the data might be volatile. */ 3984 4339 uint64_t uPrevCrc1 = 0; … … 3996 4351 { 3997 4352 rc = VERR_MISMATCH; 4353 3998 4354 /* Do data stability checks before repeating: */ 3999 4355 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare); … … 4028 4384 offMissmatch++; 4029 4385 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16); 4386 4030 4387 if (cStable > 0) 4031 4388 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, … … 4041 4398 } 4042 4399 } 4400 4043 4401 RTMemTmpFree(pbSector1); 4044 4402 } … … 4051 4409 return rc; 4052 4410 } 4411 4053 4412 #ifdef RT_OS_WINDOWS 4054 4413 /** … … 4072 4431 } 4073 4432 #endif /* RT_OS_WINDOWS */ 4433 4074 4434 /** 4075 4435 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the … … 4088 4448 { 4089 4449 *phVolToRelease = NIL_RTDVMVOLUME; 4450 4090 4451 /* Check sanity/understanding. */ 4091 4452 Assert(fPartitions); 4092 4453 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */ 4454 4093 4455 /* 4094 4456 * Allocate on descriptor for each volume up front. 4095 4457 */ 4096 4458 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr); 4459 4097 4460 PVDISKRAWPARTDESC paPartDescs = NULL; 4098 4461 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs); 4099 4462 AssertRCReturn(rc, rc); 4463 4100 4464 /* 4101 4465 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them. … … 4120 4484 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs); 4121 4485 *phVolToRelease = hVol = hVolNext; 4486 4122 4487 /* 4123 4488 * Depending on the fPartitions selector and associated read-only mask, … … 4126 4491 */ 4127 4492 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol); 4493 4128 4494 uint64_t offVolumeEndIgnored = 0; 4129 4495 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored); … … 4133 4499 pImage->pszFilename, i, pszRawDrive, rc); 4134 4500 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk); 4501 4135 4502 /* Note! The index must match IHostDrivePartition::number. */ 4136 4503 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST); … … 4141 4508 if (fPartitionsReadOnly & RT_BIT_32(idxPartition)) 4142 4509 paPartDescs[i].uFlags |= VDISKRAW_READONLY; 4510 4143 4511 if (!fRelative) 4144 4512 { … … 4161 4529 */ 4162 4530 paPartDescs[i].offStartInDevice = 0; 4531 4163 4532 #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) 4164 4533 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */ … … 4214 4583 #endif 4215 4584 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY); 4585 4216 4586 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 4217 4587 AssertRCReturn(rc, rc); … … 4225 4595 } 4226 4596 } /* for each volume */ 4597 4227 4598 RTDvmVolumeRelease(hVol); 4228 4599 *phVolToRelease = NIL_RTDVMVOLUME; 4600 4229 4601 /* 4230 4602 * Check that we found all the partitions the user selected. … … 4241 4613 pImage->pszFilename, pszRawDrive, szLeft); 4242 4614 } 4615 4243 4616 return VINF_SUCCESS; 4244 4617 } 4618 4245 4619 /** 4246 4620 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies … … 4273 4647 pImage->pszFilename, pszRawDrive, rc); 4274 4648 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5); 4649 4275 4650 /* We can allocate the partition descriptors here to save an intentation level. */ 4276 4651 PVDISKRAWPARTDESC paPartDescs = NULL; 4277 4652 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs); 4278 4653 AssertRCReturn(rc, rc); 4654 4279 4655 /* Allocate the result table and repeat the location table query: */ 4280 4656 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations); … … 4356 4732 return rc; 4357 4733 } 4734 4358 4735 /** 4359 4736 * Opens the volume manager for the raw drive when in selected-partition mode. … … 4371 4748 { 4372 4749 *phVolMgr = NIL_RTDVM; 4750 4373 4751 RTVFSFILE hVfsFile = NIL_RTVFSFILE; 4374 4752 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile); … … 4377 4755 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"), 4378 4756 pImage->pszFilename, pszRawDrive, rc); 4757 4379 4758 RTDVM hVolMgr = NIL_RTDVM; 4380 4759 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/); 4760 4381 4761 RTVfsFileRelease(hVfsFile); 4762 4382 4763 if (RT_FAILURE(rc)) 4383 4764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4384 4765 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"), 4385 4766 pImage->pszFilename, pszRawDrive, rc); 4767 4386 4768 rc = RTDvmMapOpen(hVolMgr); 4387 4769 if (RT_SUCCESS(rc)) … … 4394 4776 pImage->pszFilename, pszRawDrive, rc); 4395 4777 } 4778 4396 4779 /** 4397 4780 * Opens the raw drive device and get the sizes for it. … … 4417 4800 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"), 4418 4801 pImage->pszFilename, pszRawDrive, rc); 4802 4419 4803 /* 4420 4804 * Get the sector size. … … 4465 4849 return rc; 4466 4850 } 4851 4467 4852 /** 4468 4853 * Reads the raw disk configuration, leaving initalization and cleanup to the … … 4481 4866 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4482 4867 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename); 4868 4483 4869 /* 4484 4870 * RawDrive = path … … 4489 4875 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4490 4876 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3); 4877 4491 4878 /* 4492 4879 * Partitions=n[r][,...] … … 4494 4881 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */; 4495 4882 *pfPartitions = *pfPartitionsReadOnly = 0; 4883 4496 4884 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe); 4497 4885 if (RT_SUCCESS(rc)) … … 4527 4915 pImage->pszFilename, psz); 4528 4916 } 4917 4529 4918 RTStrFree(*ppszFreeMe); 4530 4919 *ppszFreeMe = NULL; … … 4533 4922 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4534 4923 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4924 4535 4925 /* 4536 4926 * BootSector=base64 … … 4552 4942 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"), 4553 4943 pImage->pszFilename, *ppszRawDrive, cbBootSector); 4944 4554 4945 /* Refuse the boot sector if whole-drive. This used to be done quietly, 4555 4946 however, bird disagrees and thinks the user should be told that what … … 4560 4951 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"), 4561 4952 pImage->pszFilename, *ppszRawDrive); 4953 4562 4954 *pcbBootSector = (size_t)cbBootSector; 4563 4955 *ppvBootSector = RTMemAlloc((size_t)cbBootSector); … … 4566 4958 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"), 4567 4959 pImage->pszFilename, cbBootSector, *ppszRawDrive); 4960 4568 4961 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/); 4569 4962 if (RT_FAILURE(rc)) … … 4571 4964 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"), 4572 4965 pImage->pszFilename, *ppszRawDrive, rc); 4966 4573 4967 RTStrFree(*ppszFreeMe); 4574 4968 *ppszFreeMe = NULL; … … 4577 4971 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4578 4972 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4973 4579 4974 /* 4580 4975 * Relative=0/1 … … 4604 4999 *pfRelative = false; 4605 5000 #endif 5001 4606 5002 return VINF_SUCCESS; 4607 5003 } 5004 4608 5005 /** 4609 5006 * Creates a raw drive (nee disk) descriptor. … … 4624 5021 /* Make sure it's NULL. */ 4625 5022 *ppRaw = NULL; 5023 4626 5024 /* 4627 5025 * Read the configuration. … … 4675 5073 //pRawDesc->cPartDescs = 0; 4676 5074 //pRawDesc->pPartDescs = NULL; 5075 4677 5076 /* We need to parse the partition map to complete the descriptor: */ 4678 5077 RTDVM hVolMgr = NIL_RTDVM; … … 4686 5085 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR 4687 5086 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT; 5087 4688 5088 /* Add copies of the partition tables: */ 4689 5089 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive, … … 4697 5097 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease); 4698 5098 RTDvmVolumeRelease(hVolRelease); 5099 4699 5100 /* Finally, sort the partition and check consistency (overlaps, etc): */ 4700 5101 if (RT_SUCCESS(rc)) … … 4740 5141 return rc; 4741 5142 } 5143 4742 5144 /** 4743 5145 * Internal: create VMDK images for raw disk/partition access. … … 4748 5150 int rc = VINF_SUCCESS; 4749 5151 PVMDKEXTENT pExtent; 5152 4750 5153 if (pRaw->uFlags & VDISKRAW_DISK) 4751 5154 { … … 4762 5165 if (RT_FAILURE(rc)) 4763 5166 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5167 4764 5168 /* Set up basename for extent description. Cannot use StrDup. */ 4765 5169 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1; … … 4778 5182 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 4779 5183 pExtent->fMetaDirty = false; 5184 4780 5185 /* Open flat image, the raw disk. */ 4781 5186 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 4790 5195 * file, write the partition information to a flat extent and 4791 5196 * open all the (flat) raw disk partitions. */ 5197 4792 5198 /* First pass over the partition data areas to determine how many 4793 5199 * extents we need. One data area can require up to 2 extents, as … … 4801 5207 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4802 5208 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename); 5209 4803 5210 if (uStart < pPart->offStartInVDisk) 4804 5211 cExtents++; … … 4809 5216 if (uStart != cbSize) 4810 5217 cExtents++; 5218 4811 5219 rc = vmdkCreateExtents(pImage, cExtents); 4812 5220 if (RT_FAILURE(rc)) 4813 5221 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5222 4814 5223 /* Create raw partition descriptor file. */ 4815 5224 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename, … … 4818 5227 if (RT_FAILURE(rc)) 4819 5228 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5229 4820 5230 /* Create base filename for the partition table extent. */ 4821 5231 /** @todo remove fixed buffer without creating memory leaks. */ … … 4832 5242 pszBaseBase, pszSuff); 4833 5243 RTStrFree(pszBaseBase); 5244 4834 5245 /* Second pass over the partitions, now define all extents. */ 4835 5246 uint64_t uPartOffset = 0; … … 4840 5251 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i]; 4841 5252 pExtent = &pImage->pExtents[cExtents++]; 5253 4842 5254 if (uStart < pPart->offStartInVDisk) 4843 5255 { … … 4853 5265 } 4854 5266 uStart = pPart->offStartInVDisk + pPart->cbData; 5267 4855 5268 if (pPart->pvPartitionData) 4856 5269 { … … 4862 5275 memcpy(pszBasename, pszPartition, cbBasename); 4863 5276 pExtent->pszBasename = pszBasename; 5277 4864 5278 /* Set up full name for partition extent. */ 4865 5279 char *pszDirname = RTStrDup(pImage->pszFilename); … … 4877 5291 pExtent->enmAccess = VMDKACCESS_READWRITE; 4878 5292 pExtent->fMetaDirty = false; 5293 4879 5294 /* Create partition table flat image. */ 4880 5295 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 4911 5326 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 4912 5327 pExtent->fMetaDirty = false; 5328 4913 5329 /* Open flat image, the raw partition. */ 4914 5330 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 4943 5359 } 4944 5360 } 5361 4945 5362 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 4946 5363 (pRaw->uFlags & VDISKRAW_DISK) ? … … 4950 5367 return rc; 4951 5368 } 5369 4952 5370 /** 4953 5371 * Internal: create a regular (i.e. file-backed) VMDK image. … … 4961 5379 uint64_t cbOffset = 0; 4962 5380 uint64_t cbRemaining = cbSize; 5381 4963 5382 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 4964 5383 { … … 4972 5391 if (RT_FAILURE(rc)) 4973 5392 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5393 4974 5394 /* Basename strings needed for constructing the extent names. */ 4975 5395 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 4976 5396 AssertPtr(pszBasenameSubstr); 4977 5397 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5398 4978 5399 /* Create separate descriptor file if necessary. */ 4979 5400 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 4987 5408 else 4988 5409 pImage->pFile = NULL; 5410 4989 5411 /* Set up all extents. */ 4990 5412 for (unsigned i = 0; i < cExtents; i++) … … 4992 5414 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 4993 5415 uint64_t cbExtent = cbRemaining; 5416 4994 5417 /* Set up fullname/basename for extent description. Cannot use StrDup 4995 5418 * for basename, as it is not guaranteed that the memory can be freed … … 5048 5471 return VERR_NO_STR_MEMORY; 5049 5472 pExtent->pszFullname = pszFullname; 5473 5050 5474 /* Create file for extent. */ 5051 5475 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5063 5487 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 5064 5488 } 5489 5065 5490 /* Place descriptor file information (where integrated). */ 5066 5491 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5072 5497 pImage->pDescData = NULL; 5073 5498 } 5499 5074 5500 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5075 5501 { … … 5099 5525 pExtent->enmType = VMDKETYPE_FLAT; 5100 5526 } 5527 5101 5528 pExtent->enmAccess = VMDKACCESS_READWRITE; 5102 5529 pExtent->fUncleanShutdown = true; … … 5104 5531 pExtent->uSectorOffset = 0; 5105 5532 pExtent->fMetaDirty = true; 5533 5106 5534 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5107 5535 { … … 5115 5543 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5116 5544 } 5545 5117 5546 cbOffset += cbExtent; 5547 5118 5548 if (RT_SUCCESS(rc)) 5119 5549 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize); 5550 5120 5551 cbRemaining -= cbExtent; 5121 5552 } 5553 5122 5554 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX) 5123 5555 { … … 5128 5560 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename); 5129 5561 } 5562 5130 5563 const char *pszDescType = NULL; 5131 5564 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) … … 5153 5586 return rc; 5154 5587 } 5588 5155 5589 /** 5156 5590 * Internal: Create a real stream optimized VMDK using only linear writes. … … 5161 5595 if (RT_FAILURE(rc)) 5162 5596 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5597 5163 5598 /* Basename strings needed for constructing the extent names. */ 5164 5599 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5165 5600 AssertPtr(pszBasenameSubstr); 5166 5601 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5602 5167 5603 /* No separate descriptor file. */ 5168 5604 pImage->pFile = NULL; 5605 5169 5606 /* Set up all extents. */ 5170 5607 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 5608 5171 5609 /* Set up fullname/basename for extent description. Cannot use StrDup 5172 5610 * for basename, as it is not guaranteed that the memory can be freed … … 5178 5616 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr); 5179 5617 pExtent->pszBasename = pszBasename; 5618 5180 5619 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 5181 5620 RTPathStripFilename(pszBasedirectory); … … 5185 5624 return VERR_NO_STR_MEMORY; 5186 5625 pExtent->pszFullname = pszFullname; 5626 5187 5627 /* Create file for extent. Make it write only, no reading allowed. */ 5188 5628 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5192 5632 if (RT_FAILURE(rc)) 5193 5633 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 5634 5194 5635 /* Place descriptor file information. */ 5195 5636 pExtent->uDescriptorSector = 1; … … 5198 5639 pExtent->pDescData = pImage->pDescData; 5199 5640 pImage->pDescData = NULL; 5641 5200 5642 uint64_t cSectorsPerGDE, cSectorsPerGD; 5201 5643 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; … … 5207 5649 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 5208 5650 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 5651 5209 5652 /* The spec says version is 1 for all VMDKs, but the vast 5210 5653 * majority of streamOptimized VMDKs actually contain … … 5213 5656 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE; 5214 5657 pExtent->fFooter = true; 5658 5215 5659 pExtent->enmAccess = VMDKACCESS_READONLY; 5216 5660 pExtent->fUncleanShutdown = false; … … 5218 5662 pExtent->uSectorOffset = 0; 5219 5663 pExtent->fMetaDirty = true; 5664 5220 5665 /* Create grain directory, without preallocating it straight away. It will 5221 5666 * be constructed on the fly when writing out the data and written when … … 5226 5671 if (RT_FAILURE(rc)) 5227 5672 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5673 5228 5674 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5229 5675 "streamOptimized"); 5230 5676 if (RT_FAILURE(rc)) 5231 5677 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 5678 5232 5679 return rc; 5233 5680 } 5681 5234 5682 /** 5235 5683 * Initializes the UUID fields in the DDB. … … 5267 5715 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 5268 5716 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 5717 5269 5718 return rc; 5270 5719 } 5720 5271 5721 /** 5272 5722 * Internal: The actual code for creating any VMDK variant currently in … … 5281 5731 { 5282 5732 pImage->uImageFlags = uImageFlags; 5733 5283 5734 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); 5284 5735 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 5285 5736 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 5737 5286 5738 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 5287 5739 &pImage->Descriptor); … … 5295 5747 if (RT_FAILURE(rc)) 5296 5748 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename); 5749 5297 5750 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 5298 5751 vmdkRawDescFree(pRaw); … … 5310 5763 uPercentSpan * 95 / 100); 5311 5764 } 5765 5312 5766 if (RT_SUCCESS(rc)) 5313 5767 { 5314 5768 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100); 5769 5315 5770 pImage->cbSize = cbSize; 5771 5316 5772 for (unsigned i = 0; i < pImage->cExtents; i++) 5317 5773 { 5318 5774 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5775 5319 5776 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 5320 5777 pExtent->cNominalSectors, pExtent->enmType, … … 5326 5783 } 5327 5784 } 5785 5328 5786 if (RT_SUCCESS(rc)) 5329 5787 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 5788 5330 5789 if ( RT_SUCCESS(rc) 5331 5790 && pPCHSGeometry->cCylinders != 0 … … 5333 5792 && pPCHSGeometry->cSectors != 0) 5334 5793 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5794 5335 5795 if ( RT_SUCCESS(rc) 5336 5796 && pLCHSGeometry->cCylinders != 0 … … 5338 5798 && pLCHSGeometry->cSectors != 0) 5339 5799 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 5800 5340 5801 pImage->LCHSGeometry = *pLCHSGeometry; 5341 5802 pImage->PCHSGeometry = *pPCHSGeometry; 5803 5342 5804 pImage->ImageUuid = *pUuid; 5343 5805 RTUuidClear(&pImage->ParentUuid); 5344 5806 RTUuidClear(&pImage->ModificationUuid); 5345 5807 RTUuidClear(&pImage->ParentModificationUuid); 5808 5346 5809 if (RT_SUCCESS(rc)) 5347 5810 rc = vmdkCreateImageDdbUuidsInit(pImage); 5811 5348 5812 if (RT_SUCCESS(rc)) 5349 5813 rc = vmdkAllocateGrainTableCache(pImage); 5814 5350 5815 if (RT_SUCCESS(rc)) 5351 5816 { … … 5354 5819 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 5355 5820 } 5821 5356 5822 if (RT_SUCCESS(rc)) 5357 5823 { 5358 5824 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100); 5825 5359 5826 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5360 5827 { … … 5381 5848 else 5382 5849 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 5850 5851 5383 5852 if (RT_SUCCESS(rc)) 5384 5853 { … … 5386 5855 pImage->RegionList.fFlags = 0; 5387 5856 pImage->RegionList.cRegions = 1; 5857 5388 5858 pRegion->offRegion = 0; /* Disk start. */ 5389 5859 pRegion->cbBlock = 512; … … 5393 5863 pRegion->cbMetadata = 0; 5394 5864 pRegion->cRegionBlocksOrBytes = pImage->cbSize; 5865 5395 5866 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan); 5396 5867 } … … 5399 5870 return rc; 5400 5871 } 5872 5401 5873 /** 5402 5874 * Internal: Update image comment. … … 5411 5883 return VERR_NO_MEMORY; 5412 5884 } 5885 5413 5886 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, 5414 5887 "ddb.comment", pszCommentEncoded); … … 5419 5892 return VINF_SUCCESS; 5420 5893 } 5894 5421 5895 /** 5422 5896 * Internal. Clear the grain table buffer for real stream optimized writing. … … 5429 5903 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t)); 5430 5904 } 5905 5431 5906 /** 5432 5907 * Internal. Flush the grain table buffer for real stream optimized writing. … … 5437 5912 int rc = VINF_SUCCESS; 5438 5913 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE; 5914 5439 5915 /* VMware does not write out completely empty grain tables in the case 5440 5916 * of streamOptimized images, which according to my interpretation of … … 5458 5934 if (fAllZero) 5459 5935 return VINF_SUCCESS; 5936 5460 5937 uint64_t uFileOffset = pExtent->uAppendPosition; 5461 5938 if (!uFileOffset) … … 5463 5940 /* Align to sector, as the previous write could have been any size. */ 5464 5941 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 5942 5465 5943 /* Grain table marker. */ 5466 5944 uint8_t aMarker[512]; … … 5473 5951 AssertRC(rc); 5474 5952 uFileOffset += 512; 5953 5475 5954 if (!pExtent->pGD || pExtent->pGD[uGDEntry]) 5476 5955 return VERR_INTERNAL_ERROR; 5956 5477 5957 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset); 5958 5478 5959 for (uint32_t i = 0; i < cCacheLines; i++) 5479 5960 { … … 5483 5964 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++) 5484 5965 *pGTTmp = RT_H2LE_U32(*pGTTmp); 5966 5485 5967 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset, 5486 5968 &pImage->pGTCache->aGTCache[i].aGTData[0], … … 5494 5976 return rc; 5495 5977 } 5978 5496 5979 /** 5497 5980 * Internal. Free all allocated space for representing an image, and optionally … … 5501 5984 { 5502 5985 int rc = VINF_SUCCESS; 5986 5503 5987 /* Freeing a never allocated image (e.g. because the open failed) is 5504 5988 * not signalled as an error. After all nothing bad happens. */ … … 5526 6010 pImage->pExtents[i].fMetaDirty = true; 5527 6011 } 6012 5528 6013 /* From now on it's not safe to append any more data. */ 5529 6014 pImage->pExtents[i].uAppendPosition = 0; … … 5531 6016 } 5532 6017 } 6018 5533 6019 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5534 6020 { … … 5549 6035 AssertRC(rc); 5550 6036 } 6037 5551 6038 uint64_t uFileOffset = pExtent->uAppendPosition; 5552 6039 if (!uFileOffset) 5553 6040 return VERR_INTERNAL_ERROR; 5554 6041 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6042 5555 6043 /* From now on it's not safe to append any more data. */ 5556 6044 pExtent->uAppendPosition = 0; 6045 5557 6046 /* Grain directory marker. */ 5558 6047 uint8_t aMarker[512]; … … 5565 6054 AssertRC(rc); 5566 6055 uFileOffset += 512; 6056 5567 6057 /* Write grain directory in little endian style. The array will 5568 6058 * not be used after this, so convert in place. */ … … 5574 6064 pExtent->cGDEntries * sizeof(uint32_t)); 5575 6065 AssertRC(rc); 6066 5576 6067 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset); 5577 6068 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset); … … 5579 6070 + pExtent->cGDEntries * sizeof(uint32_t), 5580 6071 512); 6072 5581 6073 /* Footer marker. */ 5582 6074 memset(pMarker, '\0', sizeof(aMarker)); … … 5586 6078 uFileOffset, aMarker, sizeof(aMarker)); 5587 6079 AssertRC(rc); 6080 5588 6081 uFileOffset += 512; 5589 6082 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL); 5590 6083 AssertRC(rc); 6084 5591 6085 uFileOffset += 512; 5592 6086 /* End-of-stream marker. */ … … 5599 6093 else if (!fDelete && fFlush) 5600 6094 vmdkFlushImage(pImage, NULL); 6095 5601 6096 if (pImage->pExtents != NULL) 5602 6097 { … … 5620 6115 if (RT_SUCCESS(rc)) 5621 6116 rc = rc2; /* Propogate any error when closing the file. */ 6117 5622 6118 if (pImage->pGTCache) 5623 6119 { … … 5631 6127 } 5632 6128 } 6129 5633 6130 LogFlowFunc(("returns %Rrc\n", rc)); 5634 6131 return rc; 5635 6132 } 6133 5636 6134 /** 5637 6135 * Internal. Flush image data (and metadata) to disk. … … 5641 6139 PVMDKEXTENT pExtent; 5642 6140 int rc = VINF_SUCCESS; 6141 5643 6142 /* Update descriptor if changed. */ 5644 6143 if (pImage->Descriptor.fDirty) 5645 6144 rc = vmdkWriteDescriptor(pImage, pIoCtx); 6145 5646 6146 if (RT_SUCCESS(rc)) 5647 6147 { … … 5679 6179 } 5680 6180 } 6181 5681 6182 if (RT_FAILURE(rc)) 5682 6183 break; 6184 5683 6185 switch (pExtent->enmType) 5684 6186 { … … 5702 6204 } 5703 6205 } 6206 5704 6207 return rc; 5705 6208 } 6209 5706 6210 /** 5707 6211 * Internal. Find extent corresponding to the sector number in the disk. … … 5712 6216 PVMDKEXTENT pExtent = NULL; 5713 6217 int rc = VINF_SUCCESS; 6218 5714 6219 for (unsigned i = 0; i < pImage->cExtents; i++) 5715 6220 { … … 5722 6227 offSector -= pImage->pExtents[i].cNominalSectors; 5723 6228 } 6229 5724 6230 if (pExtent) 5725 6231 *ppExtent = pExtent; 5726 6232 else 5727 6233 rc = VERR_IO_SECTOR_NOT_FOUND; 6234 5728 6235 return rc; 5729 6236 } 6237 5730 6238 /** 5731 6239 * Internal. Hash function for placing the grain table hash entries. … … 5738 6246 return (uSector + uExtent) % pCache->cEntries; 5739 6247 } 6248 5740 6249 /** 5741 6250 * Internal. Get sector number in the extent file from the relative sector … … 5752 6261 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE]; 5753 6262 int rc; 6263 5754 6264 /* For newly created and readonly/sequentially opened streamOptimized 5755 6265 * images this must be a no-op, as the grain directory is not there. */ … … 5763 6273 return VINF_SUCCESS; 5764 6274 } 6275 5765 6276 uGDIndex = uSector / pExtent->cSectorsPerGDE; 5766 6277 if (uGDIndex >= pExtent->cGDEntries) … … 5774 6285 return VINF_SUCCESS; 5775 6286 } 6287 5776 6288 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); 5777 6289 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent); … … 5802 6314 return VINF_SUCCESS; 5803 6315 } 6316 5804 6317 /** 5805 6318 * Internal. Writes the grain and also if necessary the grain tables. … … 5816 6329 const void *pData; 5817 6330 int rc; 6331 5818 6332 /* Very strict requirements: always write at least one full grain, with 5819 6333 * proper alignment. Everything else would require reading of already … … 5828 6342 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors) 5829 6343 return VERR_INVALID_PARAMETER; 6344 5830 6345 /* Clip write range to at most the rest of the grain. */ 5831 6346 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain)); 6347 5832 6348 /* Do not allow to go back. */ 5833 6349 uGrain = uSector / pExtent->cSectorsPerGrain; … … 5838 6354 if (uGrain < pExtent->uLastGrainAccess) 5839 6355 return VERR_VD_VMDK_INVALID_WRITE; 6356 5840 6357 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need 5841 6358 * to allocate something, we also need to detect the situation ourself. */ … … 5843 6360 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */)) 5844 6361 return VINF_SUCCESS; 6362 5845 6363 if (uGDEntry != uLastGDEntry) 5846 6364 { … … 5856 6374 } 5857 6375 } 6376 5858 6377 uint64_t uFileOffset; 5859 6378 uFileOffset = pExtent->uAppendPosition; … … 5862 6381 /* Align to sector, as the previous write could have been any size. */ 5863 6382 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6383 5864 6384 /* Paranoia check: extent type, grain table buffer presence and 5865 6385 * grain table buffer space. Also grain table entry must be clear. */ … … 5869 6389 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry]) 5870 6390 return VERR_INTERNAL_ERROR; 6391 5871 6392 /* Update grain table entry. */ 5872 6393 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset); 6394 5873 6395 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 5874 6396 { … … 5883 6405 unsigned cSegments = 1; 5884 6406 size_t cbSeg = 0; 6407 5885 6408 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 5886 6409 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); … … 5899 6422 pExtent->uLastGrainAccess = uGrain; 5900 6423 pExtent->uAppendPosition += cbGrain; 6424 5901 6425 return rc; 5902 6426 } 6427 5903 6428 /** 5904 6429 * Internal: Updates the grain table during grain allocation. … … 5914 6439 uint64_t uSector = pGrainAlloc->uSector; 5915 6440 PVMDKGTCACHEENTRY pGTCacheEntry; 6441 5916 6442 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n", 5917 6443 pImage, pExtent, pCache, pIoCtx, pGrainAlloc)); 6444 5918 6445 uGTSector = pGrainAlloc->uGTSector; 5919 6446 uRGTSector = pGrainAlloc->uRGTSector; 5920 6447 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6448 5921 6449 /* Update the grain table (and the cache). */ 5922 6450 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); … … 5981 6509 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname); 5982 6510 } 6511 5983 6512 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 5984 6513 return rc; 5985 6514 } 6515 5986 6516 /** 5987 6517 * Internal - complete the grain allocation by updating disk grain table if required. … … 5993 6523 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5994 6524 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser; 6525 5995 6526 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n", 5996 6527 pBackendData, pIoCtx, pvUser, rcReq)); 6528 5997 6529 pGrainAlloc->cIoXfersPending--; 5998 6530 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded) 5999 6531 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc); 6532 6000 6533 if (!pGrainAlloc->cIoXfersPending) 6001 6534 { … … 6003 6536 RTMemFree(pGrainAlloc); 6004 6537 } 6538 6005 6539 LogFlowFunc(("Leaving rc=%Rrc\n", rc)); 6006 6540 return rc; 6007 6541 } 6542 6008 6543 /** 6009 6544 * Internal. Allocates a new grain table (if necessary). … … 6017 6552 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL; 6018 6553 int rc; 6554 6019 6555 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n", 6020 6556 pCache, pExtent, pIoCtx, uSector, cbWrite)); 6557 6021 6558 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC)); 6022 6559 if (!pGrainAlloc) 6023 6560 return VERR_NO_MEMORY; 6561 6024 6562 pGrainAlloc->pExtent = pExtent; 6025 6563 pGrainAlloc->uSector = uSector; 6564 6026 6565 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6027 6566 if (uGDIndex >= pExtent->cGDEntries) … … 6038 6577 { 6039 6578 LogFlow(("Allocating new grain table\n")); 6579 6040 6580 /* There is no grain table referenced by this grain directory 6041 6581 * entry. So there is absolutely no data in this area. Allocate … … 6048 6588 } 6049 6589 Assert(!(uFileOffset % 512)); 6590 6050 6591 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6051 6592 uGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6593 6052 6594 /* Normally the grain table is preallocated for hosted sparse extents 6053 6595 * that support more than 32 bit sector numbers. So this shouldn't … … 6058 6600 return VERR_VD_VMDK_INVALID_HEADER; 6059 6601 } 6602 6060 6603 /* Write grain table by writing the required number of grain table 6061 6604 * cache chunks. Allocate memory dynamically here or we flood the … … 6063 6606 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t); 6064 6607 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp); 6608 6065 6609 if (!paGTDataTmp) 6066 6610 { … … 6068 6612 return VERR_NO_MEMORY; 6069 6613 } 6614 6070 6615 memset(paGTDataTmp, '\0', cbGTDataTmp); 6071 6616 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, … … 6083 6628 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition 6084 6629 + cbGTDataTmp, 512); 6630 6085 6631 if (pExtent->pRGD) 6086 6632 { … … 6091 6637 Assert(!(uFileOffset % 512)); 6092 6638 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6639 6093 6640 /* Normally the redundant grain table is preallocated for hosted 6094 6641 * sparse extents that support more than 32 bit sector numbers. So … … 6099 6646 return VERR_VD_VMDK_INVALID_HEADER; 6100 6647 } 6648 6101 6649 /* Write grain table by writing the required number of grain table 6102 6650 * cache chunks. Allocate memory dynamically here or we flood the … … 6113 6661 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname); 6114 6662 } 6663 6115 6664 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp; 6116 6665 } 6666 6117 6667 RTMemTmpFree(paGTDataTmp); 6668 6118 6669 /* Update the grain directory on disk (doing it before writing the 6119 6670 * grain table will result in a garbled extent if the operation is … … 6141 6692 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname); 6142 6693 } 6694 6143 6695 /* As the final step update the in-memory copy of the GDs. */ 6144 6696 pExtent->pGD[uGDIndex] = uGTSector; … … 6146 6698 pExtent->pRGD[uGDIndex] = uRGTSector; 6147 6699 } 6700 6148 6701 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6149 6702 pGrainAlloc->uGTSector = uGTSector; 6150 6703 pGrainAlloc->uRGTSector = uRGTSector; 6704 6151 6705 uFileOffset = pExtent->uAppendPosition; 6152 6706 if (!uFileOffset) 6153 6707 return VERR_INTERNAL_ERROR; 6154 6708 Assert(!(uFileOffset % 512)); 6709 6155 6710 pGrainAlloc->uGrainOffset = uFileOffset; 6711 6156 6712 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6157 6713 { … … 6159 6715 ("Accesses to stream optimized images must be synchronous\n"), 6160 6716 VERR_INVALID_STATE); 6717 6161 6718 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6162 6719 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname); 6720 6163 6721 /* Invalidate cache, just in case some code incorrectly allows mixing 6164 6722 * of reads and writes. Normally shouldn't be needed. */ 6165 6723 pExtent->uGrainSectorAbs = 0; 6724 6166 6725 /* Write compressed data block and the markers. */ 6167 6726 uint32_t cbGrain = 0; … … 6169 6728 RTSGSEG Segment; 6170 6729 unsigned cSegments = 1; 6730 6171 6731 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6172 6732 &cSegments, cbWrite); 6173 6733 Assert(cbSeg == cbWrite); 6734 6174 6735 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, 6175 6736 Segment.pvSeg, cbWrite, uSector, &cbGrain); … … 6192 6753 else if (RT_FAILURE(rc)) 6193 6754 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname); 6755 6194 6756 pExtent->uAppendPosition += cbWrite; 6195 6757 } 6758 6196 6759 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc); 6760 6197 6761 if (!pGrainAlloc->cIoXfersPending) 6198 6762 { … … 6200 6764 RTMemFree(pGrainAlloc); 6201 6765 } 6766 6202 6767 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6768 6203 6769 return rc; 6204 6770 } 6771 6205 6772 /** 6206 6773 * Internal. Reads the contents by sequentially going over the compressed … … 6212 6779 { 6213 6780 int rc; 6781 6214 6782 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n", 6215 6783 pImage, pExtent, uSector, pIoCtx, cbRead)); 6784 6216 6785 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 6217 6786 ("Async I/O not supported for sequential stream optimized images\n"), 6218 6787 VERR_INVALID_STATE); 6788 6219 6789 /* Do not allow to go back. */ 6220 6790 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain; … … 6222 6792 return VERR_VD_VMDK_INVALID_STATE; 6223 6793 pExtent->uLastGrainAccess = uGrain; 6794 6224 6795 /* After a previous error do not attempt to recover, as it would need 6225 6796 * seeking (in the general case backwards which is forbidden). */ 6226 6797 if (!pExtent->uGrainSectorAbs) 6227 6798 return VERR_VD_VMDK_INVALID_STATE; 6799 6228 6800 /* Check if we need to read something from the image or if what we have 6229 6801 * in the buffer is good to fulfill the request. */ … … 6232 6804 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs 6233 6805 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead); 6806 6234 6807 /* Get the marker from the next data block - and skip everything which 6235 6808 * is not a compressed grain. If it's a compressed grain which is for … … 6246 6819 Marker.uSector = RT_LE2H_U64(Marker.uSector); 6247 6820 Marker.cbSize = RT_LE2H_U32(Marker.cbSize); 6821 6248 6822 if (Marker.cbSize == 0) 6249 6823 { … … 6324 6898 } 6325 6899 } while (Marker.uType != VMDK_MARKER_EOS); 6900 6326 6901 pExtent->uGrainSectorAbs = uGrainSectorAbs; 6902 6327 6903 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS) 6328 6904 { … … 6333 6909 } 6334 6910 } 6911 6335 6912 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain) 6336 6913 { … … 6340 6917 return VERR_VD_BLOCK_FREE; 6341 6918 } 6919 6342 6920 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain; 6343 6921 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, … … 6347 6925 return VINF_SUCCESS; 6348 6926 } 6927 6349 6928 /** 6350 6929 * Replaces a fragment of a string with the specified string. … … 6415 6994 return pszNewStr; 6416 6995 } 6996 6997 6417 6998 /** @copydoc VDIMAGEBACKEND::pfnProbe */ 6418 6999 static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk, … … 6442 7023 vmdkFreeImage(pImage, false, false /*fFlush*/); 6443 7024 RTMemFree(pImage); 7025 6444 7026 if (RT_SUCCESS(rc)) 6445 7027 *penmType = VDTYPE_HDD; … … 6447 7029 else 6448 7030 rc = VERR_NO_MEMORY; 7031 6449 7032 LogFlowFunc(("returns %Rrc\n", rc)); 6450 7033 return rc; 6451 7034 } 7035 6452 7036 /** @copydoc VDIMAGEBACKEND::pfnOpen */ 6453 7037 static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags, … … 6456 7040 { 6457 7041 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */ 7042 6458 7043 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", 6459 7044 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 6460 7045 int rc; 7046 6461 7047 /* Check open flags. All valid flags are supported. */ 6462 7048 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 6463 7049 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER); 6464 7050 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 7051 6465 7052 6466 7053 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); … … 6475 7062 pImage->pVDIfsDisk = pVDIfsDisk; 6476 7063 pImage->pVDIfsImage = pVDIfsImage; 7064 6477 7065 rc = vmdkOpenImage(pImage, uOpenFlags); 6478 7066 if (RT_SUCCESS(rc)) … … 6483 7071 else 6484 7072 rc = VERR_NO_MEMORY; 7073 6485 7074 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6486 7075 return rc; 6487 7076 } 7077 6488 7078 /** @copydoc VDIMAGEBACKEND::pfnCreate */ 6489 7079 static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize, … … 6499 7089 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); 6500 7090 int rc; 7091 6501 7092 /* Check the VD container type and image flags. */ 6502 7093 if ( enmType != VDTYPE_HDD 6503 7094 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 6504 7095 return VERR_VD_INVALID_TYPE; 7096 6505 7097 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */ 6506 7098 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) … … 6508 7100 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))) 6509 7101 return VERR_VD_INVALID_SIZE; 7102 6510 7103 /* Check image flags for invalid combinations. */ 6511 7104 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6512 7105 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))) 6513 7106 return VERR_INVALID_PARAMETER; 7107 6514 7108 /* Check open flags. All valid flags are supported. */ 6515 7109 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); … … 6521 7115 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)), 6522 7116 VERR_INVALID_PARAMETER); 7117 6523 7118 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); 6524 7119 if (RT_LIKELY(pImage)) 6525 7120 { 6526 7121 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 7122 6527 7123 pImage->pszFilename = pszFilename; 6528 7124 pImage->pFile = NULL; … … 6555 7151 rc = vmdkOpenImage(pImage, uOpenFlags); 6556 7152 } 7153 6557 7154 if (RT_SUCCESS(rc)) 6558 7155 *ppBackendData = pImage; 6559 7156 } 7157 6560 7158 if (RT_FAILURE(rc)) 6561 7159 RTMemFree(pImage->pDescData); … … 6563 7161 else 6564 7162 rc = VERR_NO_MEMORY; 7163 6565 7164 if (RT_FAILURE(rc)) 6566 7165 RTMemFree(pImage); … … 6568 7167 else 6569 7168 rc = VERR_NO_MEMORY; 7169 6570 7170 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6571 7171 return rc; 6572 7172 } 7173 6573 7174 /** 6574 7175 * Prepares the state for renaming a VMDK image, setting up the state and allocating … … 6583 7184 { 6584 7185 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER); 7186 6585 7187 int rc = VINF_SUCCESS; 7188 6586 7189 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy)); 7190 6587 7191 /* 6588 7192 * Allocate an array to store both old and new names of renamed files … … 6610 7214 pRenameState->fEmbeddedDesc = true; 6611 7215 } 7216 6612 7217 /* Save the descriptor content. */ 6613 7218 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines; … … 6621 7226 } 6622 7227 } 7228 6623 7229 if (RT_SUCCESS(rc)) 6624 7230 { … … 6627 7233 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY); 6628 7234 RTPathStripSuffix(pRenameState->pszNewBaseName); 7235 6629 7236 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename)); 6630 7237 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY); 6631 7238 RTPathStripSuffix(pRenameState->pszOldBaseName); 7239 6632 7240 /* Prepare both old and new full names used for string replacement. 6633 7241 Note! Must abspath the stuff here, so the strstr weirdness later in … … 6637 7245 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY); 6638 7246 RTPathStripSuffix(pRenameState->pszNewFullName); 7247 6639 7248 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename); 6640 7249 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY); 6641 7250 RTPathStripSuffix(pRenameState->pszOldFullName); 7251 6642 7252 /* Save the old name for easy access to the old descriptor file. */ 6643 7253 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename); 6644 7254 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY); 7255 6645 7256 /* Save old image name. */ 6646 7257 pRenameState->pszOldImageName = pImage->pszFilename; … … 6649 7260 else 6650 7261 rc = VERR_NO_TMP_MEMORY; 7262 6651 7263 return rc; 6652 7264 } 7265 6653 7266 /** 6654 7267 * Destroys the given rename state, freeing all allocated memory. … … 6694 7307 RTStrFree(pRenameState->pszNewFullName); 6695 7308 } 7309 6696 7310 /** 6697 7311 * Rolls back the rename operation to the original state. … … 6704 7318 { 6705 7319 int rc = VINF_SUCCESS; 7320 6706 7321 if (!pRenameState->fImageFreed) 6707 7322 { … … 6712 7327 vmdkFreeImage(pImage, false, true /*fFlush*/); 6713 7328 } 7329 6714 7330 /* Rename files back. */ 6715 7331 for (unsigned i = 0; i <= pRenameState->cExtents; i++) … … 6750 7366 pImage->pszFilename = pRenameState->pszOldImageName; 6751 7367 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); 7368 6752 7369 return rc; 6753 7370 } 7371 6754 7372 /** 6755 7373 * Rename worker doing the real work. … … 6764 7382 int rc = VINF_SUCCESS; 6765 7383 unsigned i, line; 7384 6766 7385 /* Update the descriptor with modified extent names. */ 6767 7386 for (i = 0, line = pImage->Descriptor.uFirstExtent; … … 6780 7399 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i]; 6781 7400 } 7401 6782 7402 if (RT_SUCCESS(rc)) 6783 7403 { … … 6786 7406 /* Flush the descriptor now, in case it is embedded. */ 6787 7407 vmdkFlushImage(pImage, NULL); 7408 6788 7409 /* Close and rename/move extents. */ 6789 7410 for (i = 0; i < pRenameState->cExtents; i++) … … 6803 7424 if (RT_FAILURE(rc)) 6804 7425 break;; 7426 6805 7427 /* Rename the extent file. */ 6806 7428 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0); … … 6810 7432 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname); 6811 7433 } 7434 6812 7435 if (RT_SUCCESS(rc)) 6813 7436 { … … 6817 7440 { 6818 7441 pRenameState->fImageFreed = true; 7442 6819 7443 /* Last elements of new/old name arrays are intended for 6820 7444 * storing descriptor's names. … … 6831 7455 } 6832 7456 } 7457 6833 7458 /* Update pImage with the new information. */ 6834 7459 pImage->pszFilename = pszFilename; 7460 6835 7461 /* Open the new image. */ 6836 7462 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); … … 6838 7464 } 6839 7465 } 7466 6840 7467 return rc; 6841 7468 } 7469 6842 7470 /** @copydoc VDIMAGEBACKEND::pfnRename */ 6843 7471 static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename) 6844 7472 { 6845 7473 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename)); 7474 6846 7475 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6847 7476 VMDKRENAMESTATE RenameState; 7477 6848 7478 memset(&RenameState, 0, sizeof(RenameState)); 7479 6849 7480 /* Check arguments. */ 6850 7481 AssertPtrReturn(pImage, VERR_INVALID_POINTER); … … 6852 7483 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 6853 7484 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER); 7485 6854 7486 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename); 6855 7487 if (RT_SUCCESS(rc)) 6856 7488 { 6857 7489 /* --- Up to this point we have not done any damage yet. --- */ 7490 6858 7491 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename); 6859 7492 /* Roll back all changes in case of failure. */ … … 6864 7497 } 6865 7498 } 7499 6866 7500 vmdkRenameStateDestroy(&RenameState); 6867 7501 LogFlowFunc(("returns %Rrc\n", rc)); 6868 7502 return rc; 6869 7503 } 7504 6870 7505 /** @copydoc VDIMAGEBACKEND::pfnClose */ 6871 7506 static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete) … … 6873 7508 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete)); 6874 7509 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7510 6875 7511 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/); 6876 7512 RTMemFree(pImage); 7513 6877 7514 LogFlowFunc(("returns %Rrc\n", rc)); 6878 7515 return rc; 6879 7516 } 7517 6880 7518 /** @copydoc VDIMAGEBACKEND::pfnRead */ 6881 7519 static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead, … … 6885 7523 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead)); 6886 7524 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7525 6887 7526 AssertPtr(pImage); 6888 7527 Assert(uOffset % 512 == 0); … … 6891 7530 AssertReturn(cbToRead, VERR_INVALID_PARAMETER); 6892 7531 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER); 7532 6893 7533 /* Find the extent and check access permissions as defined in the extent descriptor. */ 6894 7534 PVMDKEXTENT pExtent; … … 6901 7541 /* Clip read range to remain in this extent. */ 6902 7542 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 7543 6903 7544 /* Handle the read according to the current extent type. */ 6904 7545 switch (pExtent->enmType) … … 6907 7548 { 6908 7549 uint64_t uSectorExtentAbs; 7550 6909 7551 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 6910 7552 if (RT_FAILURE(rc)) … … 6930 7572 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 6931 7573 ("Async I/O is not supported for stream optimized VMDK's\n")); 7574 6932 7575 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 6933 7576 uSectorExtentAbs -= uSectorInGrain; … … 6970 7613 { 6971 7614 size_t cbSet; 7615 6972 7616 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 6973 7617 Assert(cbSet == cbToRead); … … 6980 7624 else if (RT_SUCCESS(rc)) 6981 7625 rc = VERR_VD_VMDK_INVALID_STATE; 7626 6982 7627 LogFlowFunc(("returns %Rrc\n", rc)); 6983 7628 return rc; 6984 7629 } 7630 6985 7631 /** @copydoc VDIMAGEBACKEND::pfnWrite */ 6986 7632 static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite, … … 6992 7638 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6993 7639 int rc; 7640 6994 7641 AssertPtr(pImage); 6995 7642 Assert(uOffset % 512 == 0); … … 6997 7644 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER); 6998 7645 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER); 7646 6999 7647 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7000 7648 { … … 7002 7650 uint64_t uSectorExtentRel; 7003 7651 uint64_t uSectorExtentAbs; 7652 7004 7653 /* No size check here, will do that later when the extent is located. 7005 7654 * There are sparse images out there which according to the spec are … … 7008 7657 * grain boundaries, and with the nominal size not being a multiple of the 7009 7658 * grain size), this would prevent writing to the last grain. */ 7659 7010 7660 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 7011 7661 &pExtent, &uSectorExtentRel); … … 7105 7755 } 7106 7756 } 7757 7107 7758 if (pcbWriteProcess) 7108 7759 *pcbWriteProcess = cbToWrite; … … 7111 7762 else 7112 7763 rc = VERR_VD_IMAGE_READ_ONLY; 7764 7113 7765 LogFlowFunc(("returns %Rrc\n", rc)); 7114 7766 return rc; 7115 7767 } 7768 7116 7769 /** @copydoc VDIMAGEBACKEND::pfnFlush */ 7117 7770 static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx) 7118 7771 { 7119 7772 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7773 7120 7774 return vmdkFlushImage(pImage, pIoCtx); 7121 7775 } 7776 7122 7777 /** @copydoc VDIMAGEBACKEND::pfnGetVersion */ 7123 7778 static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData) … … 7125 7780 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7126 7781 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7782 7127 7783 AssertPtrReturn(pImage, 0); 7784 7128 7785 return VMDK_IMAGE_VERSION; 7129 7786 } 7787 7130 7788 /** @copydoc VDIMAGEBACKEND::pfnGetFileSize */ 7131 7789 static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData) … … 7134 7792 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7135 7793 uint64_t cb = 0; 7794 7136 7795 AssertPtrReturn(pImage, 0); 7796 7137 7797 if (pImage->pFile != NULL) 7138 7798 { … … 7152 7812 } 7153 7813 } 7814 7154 7815 LogFlowFunc(("returns %lld\n", cb)); 7155 7816 return cb; 7156 7817 } 7818 7157 7819 /** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */ 7158 7820 static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry) … … 7161 7823 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7162 7824 int rc = VINF_SUCCESS; 7825 7163 7826 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7827 7164 7828 if (pImage->PCHSGeometry.cCylinders) 7165 7829 *pPCHSGeometry = pImage->PCHSGeometry; 7166 7830 else 7167 7831 rc = VERR_VD_GEOMETRY_NOT_SET; 7832 7168 7833 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 7169 7834 return rc; 7170 7835 } 7836 7171 7837 /** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */ 7172 7838 static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry) … … 7176 7842 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7177 7843 int rc = VINF_SUCCESS; 7844 7178 7845 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7846 7179 7847 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7180 7848 { … … 7190 7858 else 7191 7859 rc = VERR_VD_IMAGE_READ_ONLY; 7860 7192 7861 LogFlowFunc(("returns %Rrc\n", rc)); 7193 7862 return rc; 7194 7863 } 7864 7195 7865 /** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */ 7196 7866 static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry) … … 7199 7869 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7200 7870 int rc = VINF_SUCCESS; 7871 7201 7872 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7873 7202 7874 if (pImage->LCHSGeometry.cCylinders) 7203 7875 *pLCHSGeometry = pImage->LCHSGeometry; 7204 7876 else 7205 7877 rc = VERR_VD_GEOMETRY_NOT_SET; 7878 7206 7879 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 7207 7880 return rc; 7208 7881 } 7882 7209 7883 /** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */ 7210 7884 static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry) … … 7214 7888 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7215 7889 int rc = VINF_SUCCESS; 7890 7216 7891 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7892 7217 7893 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7218 7894 { … … 7228 7904 else 7229 7905 rc = VERR_VD_IMAGE_READ_ONLY; 7906 7230 7907 LogFlowFunc(("returns %Rrc\n", rc)); 7231 7908 return rc; 7232 7909 } 7910 7233 7911 /** @copydoc VDIMAGEBACKEND::pfnQueryRegions */ 7234 7912 static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList) … … 7236 7914 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList)); 7237 7915 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7916 7238 7917 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED); 7918 7239 7919 *ppRegionList = &pThis->RegionList; 7240 7920 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS)); 7241 7921 return VINF_SUCCESS; 7242 7922 } 7923 7243 7924 /** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */ 7244 7925 static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList) … … 7248 7929 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7249 7930 AssertPtr(pThis); RT_NOREF(pThis); 7931 7250 7932 /* Nothing to do here. */ 7251 7933 } 7934 7252 7935 /** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */ 7253 7936 static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData) … … 7255 7938 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7256 7939 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7940 7257 7941 AssertPtrReturn(pImage, 0); 7942 7258 7943 LogFlowFunc(("returns %#x\n", pImage->uImageFlags)); 7259 7944 return pImage->uImageFlags; 7260 7945 } 7946 7261 7947 /** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */ 7262 7948 static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData) … … 7264 7950 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7265 7951 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7952 7266 7953 AssertPtrReturn(pImage, 0); 7954 7267 7955 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags)); 7268 7956 return pImage->uOpenFlags; 7269 7957 } 7958 7270 7959 /** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */ 7271 7960 static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags) … … 7274 7963 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7275 7964 int rc; 7965 7276 7966 /* Image must be opened and the new flags must be valid. */ 7277 7967 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO … … 7296 7986 } 7297 7987 } 7988 7298 7989 LogFlowFunc(("returns %Rrc\n", rc)); 7299 7990 return rc; 7300 7991 } 7992 7301 7993 /** @copydoc VDIMAGEBACKEND::pfnGetComment */ 7302 7994 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment) … … 7304 7996 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment)); 7305 7997 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7998 7306 7999 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8000 7307 8001 char *pszCommentEncoded = NULL; 7308 8002 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor, … … 7313 8007 rc = VINF_SUCCESS; 7314 8008 } 8009 7315 8010 if (RT_SUCCESS(rc)) 7316 8011 { … … 7319 8014 else if (pszComment) 7320 8015 *pszComment = '\0'; 8016 7321 8017 if (pszCommentEncoded) 7322 8018 RTMemTmpFree(pszCommentEncoded); 7323 8019 } 8020 7324 8021 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment)); 7325 8022 return rc; 7326 8023 } 8024 7327 8025 /** @copydoc VDIMAGEBACKEND::pfnSetComment */ 7328 8026 static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment) … … 7331 8029 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7332 8030 int rc; 8031 7333 8032 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8033 7334 8034 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7335 8035 { … … 7341 8041 else 7342 8042 rc = VERR_VD_IMAGE_READ_ONLY; 8043 7343 8044 LogFlowFunc(("returns %Rrc\n", rc)); 7344 8045 return rc; 7345 8046 } 8047 7346 8048 /** @copydoc VDIMAGEBACKEND::pfnGetUuid */ 7347 8049 static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid) … … 7349 8051 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7350 8052 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8053 7351 8054 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8055 7352 8056 *pUuid = pImage->ImageUuid; 8057 7353 8058 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7354 8059 return VINF_SUCCESS; 7355 8060 } 8061 7356 8062 /** @copydoc VDIMAGEBACKEND::pfnSetUuid */ 7357 8063 static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid) … … 7360 8066 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7361 8067 int rc = VINF_SUCCESS; 8068 7362 8069 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8070 7363 8071 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7364 8072 { … … 7377 8085 else 7378 8086 rc = VERR_VD_IMAGE_READ_ONLY; 8087 7379 8088 LogFlowFunc(("returns %Rrc\n", rc)); 7380 8089 return rc; 7381 8090 } 8091 7382 8092 /** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */ 7383 8093 static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7385 8095 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7386 8096 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8097 7387 8098 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8099 7388 8100 *pUuid = pImage->ModificationUuid; 8101 7389 8102 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7390 8103 return VINF_SUCCESS; 7391 8104 } 8105 7392 8106 /** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */ 7393 8107 static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7396 8110 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7397 8111 int rc = VINF_SUCCESS; 8112 7398 8113 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8114 7399 8115 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7400 8116 { … … 7416 8132 else 7417 8133 rc = VERR_VD_IMAGE_READ_ONLY; 8134 7418 8135 LogFlowFunc(("returns %Rrc\n", rc)); 7419 8136 return rc; 7420 8137 } 8138 7421 8139 /** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */ 7422 8140 static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid) … … 7424 8142 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7425 8143 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8144 7426 8145 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8146 7427 8147 *pUuid = pImage->ParentUuid; 8148 7428 8149 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7429 8150 return VINF_SUCCESS; 7430 8151 } 8152 7431 8153 /** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */ 7432 8154 static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid) … … 7435 8157 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7436 8158 int rc = VINF_SUCCESS; 8159 7437 8160 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8161 7438 8162 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7439 8163 { … … 7452 8176 else 7453 8177 rc = VERR_VD_IMAGE_READ_ONLY; 8178 7454 8179 LogFlowFunc(("returns %Rrc\n", rc)); 7455 8180 return rc; 7456 8181 } 8182 7457 8183 /** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */ 7458 8184 static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7460 8186 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7461 8187 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8188 7462 8189 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8190 7463 8191 *pUuid = pImage->ParentModificationUuid; 8192 7464 8193 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7465 8194 return VINF_SUCCESS; 7466 8195 } 8196 7467 8197 /** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */ 7468 8198 static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7471 8201 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7472 8202 int rc = VINF_SUCCESS; 8203 7473 8204 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8205 7474 8206 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7475 8207 { … … 7487 8219 else 7488 8220 rc = VERR_VD_IMAGE_READ_ONLY; 8221 7489 8222 LogFlowFunc(("returns %Rrc\n", rc)); 7490 8223 return rc; 7491 8224 } 8225 7492 8226 /** @copydoc VDIMAGEBACKEND::pfnDump */ 7493 8227 static DECLCALLBACK(void) vmdkDump(void *pBackendData) 7494 8228 { 7495 8229 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8230 7496 8231 AssertPtrReturnVoid(pImage); 7497 8232 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", … … 7715 8450 return rc; 7716 8451 } 8452 7717 8453 7718 8454 const VDIMAGEBACKEND g_VmdkBackend =
Note:
See TracChangeset
for help on using the changeset viewer.