Changeset 97836 in vbox for trunk/src/VBox/Storage
- Timestamp:
- Dec 20, 2022 1:58:14 AM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 154922
- Location:
- trunk/src/VBox/Storage
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Storage/VMDK.cpp
r97260 r97836 3 3 * VMDK disk image, core code. 4 4 */ 5 6 5 /* 7 6 * Copyright (C) 2006-2022 Oracle and/or its affiliates. … … 34 33 #include <VBox/vd-plugin.h> 35 34 #include <VBox/err.h> 36 37 35 #include <iprt/assert.h> 38 36 #include <iprt/alloc.h> … … 94 92 # define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83) 95 93 #endif /* RT_OS_DARWIN */ 96 97 94 #include "VDBackends.h" 98 95 … … 101 98 * Constants And Macros, Structures and Typedefs * 102 99 *********************************************************************************************************************************/ 103 104 100 /** Maximum encoded string size (including NUL) we allow for VMDK images. 105 101 * Deliberately not set high to avoid running out of descriptor space. */ 106 102 #define VMDK_ENCODED_COMMENT_MAX 1024 107 108 103 /** VMDK descriptor DDB entry for PCHS cylinders. */ 109 104 #define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders" 110 111 105 /** VMDK descriptor DDB entry for PCHS heads. */ 112 106 #define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads" 113 114 107 /** VMDK descriptor DDB entry for PCHS sectors. */ 115 108 #define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors" 116 117 109 /** VMDK descriptor DDB entry for LCHS cylinders. */ 118 110 #define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders" 119 120 111 /** VMDK descriptor DDB entry for LCHS heads. */ 121 112 #define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads" 122 123 113 /** VMDK descriptor DDB entry for LCHS sectors. */ 124 114 #define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors" 125 126 115 /** VMDK descriptor DDB entry for image UUID. */ 127 116 #define VMDK_DDB_IMAGE_UUID "ddb.uuid.image" 128 129 117 /** VMDK descriptor DDB entry for image modification UUID. */ 130 118 #define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification" 131 132 119 /** VMDK descriptor DDB entry for parent image UUID. */ 133 120 #define VMDK_DDB_PARENT_UUID "ddb.uuid.parent" 134 135 121 /** VMDK descriptor DDB entry for parent image modification UUID. */ 136 122 #define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification" 137 138 123 /** No compression for streamOptimized files. */ 139 124 #define VMDK_COMPRESSION_NONE 0 140 141 125 /** Deflate compression for streamOptimized files. */ 142 126 #define VMDK_COMPRESSION_DEFLATE 1 143 144 127 /** Marker that the actual GD value is stored in the footer. */ 145 128 #define VMDK_GD_AT_END 0xffffffffffffffffULL 146 147 129 /** Marker for end-of-stream in streamOptimized images. */ 148 130 #define VMDK_MARKER_EOS 0 149 150 131 /** Marker for grain table block in streamOptimized images. */ 151 132 #define VMDK_MARKER_GT 1 152 153 133 /** Marker for grain directory block in streamOptimized images. */ 154 134 #define VMDK_MARKER_GD 2 155 156 135 /** Marker for footer in streamOptimized images. */ 157 136 #define VMDK_MARKER_FOOTER 3 158 159 137 /** Marker for unknown purpose in streamOptimized images. 160 138 * Shows up in very recent images created by vSphere, but only sporadically. 161 139 * They "forgot" to document that one in the VMDK specification. */ 162 140 #define VMDK_MARKER_UNSPECIFIED 4 163 164 141 /** Dummy marker for "don't check the marker value". */ 165 142 #define VMDK_MARKER_IGNORE 0xffffffffU 166 167 143 /** 168 144 * Magic number for hosted images created by VMware Workstation 4, VMware … … 170 146 */ 171 147 #define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */ 172 173 148 /** VMDK sector size in bytes. */ 174 149 #define VMDK_SECTOR_SIZE 512 … … 179 154 /** Grain table size in bytes */ 180 155 #define VMDK_GRAIN_TABLE_SIZE 2048 181 182 156 /** 183 157 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as … … 207 181 } SparseExtentHeader; 208 182 #pragma pack() 209 210 183 /** The maximum allowed descriptor size in the extent header in sectors. */ 211 184 #define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */ 212 213 185 /** VMDK capacity for a single chunk when 2G splitting is turned on. Should be 214 186 * divisible by the default grain size (64K) */ 215 187 #define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024) 216 217 188 /** VMDK streamOptimized file format marker. The type field may or may not 218 189 * be actually valid, but there's always data to read there. */ … … 225 196 } VMDKMARKER, *PVMDKMARKER; 226 197 #pragma pack() 227 228 229 198 /** Convert sector number/size to byte offset/size. */ 230 199 #define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9) 231 232 200 /** Convert byte offset/size to sector number/size. */ 233 201 #define VMDK_BYTE2SECTOR(u) ((u) >> 9) 234 235 202 /** 236 203 * VMDK extent type. … … 247 214 VMDKETYPE_VMFS 248 215 } VMDKETYPE, *PVMDKETYPE; 249 250 216 /** 251 217 * VMDK access type for a extent. … … 260 226 VMDKACCESS_READWRITE 261 227 } VMDKACCESS, *PVMDKACCESS; 262 263 228 /** Forward declaration for PVMDKIMAGE. */ 264 229 typedef struct VMDKIMAGE *PVMDKIMAGE; 265 266 230 /** 267 231 * Extents files entry. Used for opening a particular file only once. … … 288 252 struct VMDKFILE *pPrev; 289 253 } VMDKFILE, *PVMDKFILE; 290 291 254 /** 292 255 * VMDK extent data structure. … … 369 332 struct VMDKIMAGE *pImage; 370 333 } VMDKEXTENT, *PVMDKEXTENT; 371 372 334 /** 373 335 * Grain table cache size. Allocated per image. 374 336 */ 375 337 #define VMDK_GT_CACHE_SIZE 256 376 377 338 /** 378 339 * Grain table block size. Smaller than an actual grain table block to allow … … 381 342 */ 382 343 #define VMDK_GT_CACHELINE_SIZE 128 383 384 385 344 /** 386 345 * Maximum number of lines in a descriptor file. Not worth the effort of … … 390 349 */ 391 350 #define VMDK_DESCRIPTOR_LINES_MAX 1100U 392 393 351 /** 394 352 * Parsed descriptor information. Allows easy access and update of the … … 414 372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX]; 415 373 } VMDKDESCRIPTOR, *PVMDKDESCRIPTOR; 416 417 418 374 /** 419 375 * Cache entry for translating extent/sector to a sector number in that … … 429 385 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE]; 430 386 } VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY; 431 432 387 /** 433 388 * Cache data structure for blocks of grain table entries. For now this is a … … 443 398 unsigned cEntries; 444 399 } VMDKGTCACHE, *PVMDKGTCACHE; 445 446 400 /** 447 401 * Complete VMDK image data structure. Mainly a collection of extents and a few … … 454 408 /** Descriptor file if applicable. */ 455 409 PVMDKFILE pFile; 456 457 410 /** Pointer to the per-disk VD interface list. */ 458 411 PVDINTERFACE pVDIfsDisk; 459 412 /** Pointer to the per-image VD interface list. */ 460 413 PVDINTERFACE pVDIfsImage; 461 462 414 /** Error interface. */ 463 415 PVDINTERFACEERROR pIfError; 464 416 /** I/O interface. */ 465 417 PVDINTERFACEIOINT pIfIo; 466 467 468 418 /** Pointer to the image extents. */ 469 419 PVMDKEXTENT pExtents; … … 473 423 * times only once (happens mainly with raw partition access). */ 474 424 PVMDKFILE pFiles; 475 476 425 /** 477 426 * Pointer to an array of segment entries for async I/O. … … 483 432 /** Entries available in the segments array. */ 484 433 unsigned cSegments; 485 486 434 /** Open flags passed by VBoxHD layer. */ 487 435 unsigned uOpenFlags; … … 502 450 /** Parent image modification UUID. */ 503 451 RTUUID ParentModificationUuid; 504 505 452 /** Pointer to grain table cache, if this image contains sparse extents. */ 506 453 PVMDKGTCACHE pGTCache; … … 514 461 VDREGIONLIST RegionList; 515 462 } VMDKIMAGE; 516 517 518 463 /** State for the input/output callout of the inflate reader/deflate writer. */ 519 464 typedef struct VMDKCOMPRESSIO … … 528 473 void *pvCompGrain; 529 474 } VMDKCOMPRESSIO; 530 531 532 475 /** Tracks async grain allocation. */ 533 476 typedef struct VMDKGRAINALLOCASYNC … … 551 494 uint64_t uRGTSector; 552 495 } VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC; 553 554 496 /** 555 497 * State information for vmdkRename() and helpers. … … 594 536 * Static Variables * 595 537 *********************************************************************************************************************************/ 596 597 538 /** NULL-terminated array of supported file extensions. */ 598 539 static const VDFILEEXTENSION s_aVmdkFileExtensions[] = … … 601 542 {NULL, VDTYPE_INVALID} 602 543 }; 603 604 544 /** NULL-terminated array of configuration option. */ 605 545 static const VDCONFIGINFO s_aVmdkConfigInfo[] = … … 610 550 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 }, 611 551 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 }, 612 613 552 /* End of options list */ 614 553 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 } … … 619 558 * Internal Functions * 620 559 *********************************************************************************************************************************/ 621 622 560 static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent); 623 561 static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 624 562 bool fDelete); 625 626 563 static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents); 627 564 static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx); 628 565 static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment); 629 566 static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush); 630 631 567 static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, 632 568 void *pvUser, int rcReq); 633 634 569 /** 635 570 * Internal: open a file (using a file descriptor cache to ensure each file … … 641 576 int rc = VINF_SUCCESS; 642 577 PVMDKFILE pVmdkFile; 643 644 578 for (pVmdkFile = pImage->pFiles; 645 579 pVmdkFile != NULL; … … 650 584 Assert(fOpen == pVmdkFile->fOpen); 651 585 pVmdkFile->uReferences++; 652 653 586 *ppVmdkFile = pVmdkFile; 654 655 587 return rc; 656 588 } 657 589 } 658 659 590 /* If we get here, there's no matching entry in the cache. */ 660 591 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE)); … … 664 595 return VERR_NO_MEMORY; 665 596 } 666 667 597 pVmdkFile->pszFilename = RTStrDup(pszFilename); 668 598 if (!pVmdkFile->pszFilename) … … 672 602 return VERR_NO_MEMORY; 673 603 } 674 675 604 if (pszBasename) 676 605 { … … 684 613 } 685 614 } 686 687 615 pVmdkFile->fOpen = fOpen; 688 689 616 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen, 690 617 &pVmdkFile->pStorage); … … 705 632 *ppVmdkFile = NULL; 706 633 } 707 708 634 return rc; 709 635 } 710 711 636 /** 712 637 * Internal: close a file, updating the file descriptor cache. … … 716 641 int rc = VINF_SUCCESS; 717 642 PVMDKFILE pVmdkFile = *ppVmdkFile; 718 719 643 AssertPtr(pVmdkFile); 720 721 644 pVmdkFile->fDelete |= fDelete; 722 645 Assert(pVmdkFile->uReferences); … … 726 649 PVMDKFILE pPrev; 727 650 PVMDKFILE pNext; 728 729 651 /* Unchain the element from the list. */ 730 652 pPrev = pVmdkFile->pPrev; 731 653 pNext = pVmdkFile->pNext; 732 733 654 if (pNext) 734 655 pNext->pPrev = pPrev; … … 737 658 else 738 659 pImage->pFiles = pNext; 739 740 660 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage); 741 742 661 bool fFileDel = pVmdkFile->fDelete; 743 662 if ( pVmdkFile->pszBasename … … 752 671 fFileDel = false; 753 672 } 754 755 673 if (fFileDel) 756 674 { … … 766 684 RTMemFree(pVmdkFile); 767 685 } 768 769 686 *ppVmdkFile = NULL; 770 687 return rc; 771 688 } 772 773 689 /*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */ 774 690 #ifndef VMDK_USE_BLOCK_DECOMP_API … … 777 693 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser; 778 694 size_t cbInjected = 0; 779 780 695 Assert(cbBuf); 781 696 if (pInflateState->iOffset < 0) … … 803 718 } 804 719 #endif 805 806 720 /** 807 721 * Internal: read from a file and inflate the compressed data, … … 819 733 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; 820 734 size_t cbCompSize, cbActuallyRead; 821 822 735 if (!pcvMarker) 823 736 { … … 834 747 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize); 835 748 } 836 837 749 cbCompSize = RT_LE2H_U32(pMarker->cbSize); 838 750 if (cbCompSize == 0) … … 841 753 return VERR_VD_VMDK_INVALID_FORMAT; 842 754 } 843 844 755 /* Sanity check - the expansion ratio should be much less than 2. */ 845 756 Assert(cbCompSize < 2 * cbToRead); 846 757 if (cbCompSize >= 2 * cbToRead) 847 758 return VERR_VD_VMDK_INVALID_FORMAT; 848 849 759 /* Compressed grain marker. Data follows immediately. */ 850 760 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, … … 856 766 512) 857 767 - RT_UOFFSETOF(VMDKMARKER, uType)); 858 859 768 if (puLBA) 860 769 *puLBA = RT_LE2H_U64(pMarker->uSector); … … 863 772 + RT_UOFFSETOF(VMDKMARKER, uType), 864 773 512); 865 866 774 #ifdef VMDK_USE_BLOCK_DECOMP_API 867 775 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/, … … 874 782 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType); 875 783 InflateState.pvCompGrain = pExtent->pvCompGrain; 876 877 784 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper); 878 785 if (RT_FAILURE(rc)) … … 891 798 return rc; 892 799 } 893 894 800 static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf) 895 801 { 896 802 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser; 897 898 803 Assert(cbBuf); 899 804 if (pDeflateState->iOffset < 0) … … 912 817 return VINF_SUCCESS; 913 818 } 914 915 819 /** 916 820 * Internal: deflate the uncompressed data and write to a file, … … 925 829 PRTZIPCOMP pZip = NULL; 926 830 VMDKCOMPRESSIO DeflateState; 927 928 831 DeflateState.pImage = pImage; 929 832 DeflateState.iOffset = -1; 930 833 DeflateState.cbCompGrain = pExtent->cbCompGrain; 931 834 DeflateState.pvCompGrain = pExtent->pvCompGrain; 932 933 835 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, 934 836 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT); … … 943 845 Assert( DeflateState.iOffset > 0 944 846 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain); 945 946 847 /* pad with zeroes to get to a full sector size */ 947 848 uint32_t uSize = DeflateState.iOffset; … … 953 854 uSize = uSizeAlign; 954 855 } 955 956 856 if (pcbMarkerData) 957 857 *pcbMarkerData = uSize; 958 959 858 /* Compressed grain marker. Data follows immediately. */ 960 859 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; … … 969 868 return rc; 970 869 } 971 972 973 870 /** 974 871 * Internal: check if all files are closed, prevent leaking resources. … … 978 875 int rc = VINF_SUCCESS, rc2; 979 876 PVMDKFILE pVmdkFile; 980 981 877 Assert(pImage->pFiles == NULL); 982 878 for (pVmdkFile = pImage->pFiles; … … 987 883 pVmdkFile->pszFilename)); 988 884 pImage->pFiles = pVmdkFile->pNext; 989 990 885 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete); 991 992 886 if (RT_SUCCESS(rc)) 993 887 rc = rc2; … … 995 889 return rc; 996 890 } 997 998 891 /** 999 892 * Internal: truncate a string (at a UTF8 code point boundary) and encode the … … 1004 897 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3]; 1005 898 char *pszDst = szEnc; 1006 1007 899 AssertPtr(psz); 1008 1009 900 for (; *psz; psz = RTStrNextCp(psz)) 1010 901 { … … 1037 928 return RTStrDup(szEnc); 1038 929 } 1039 1040 930 /** 1041 931 * Internal: decode a string and store it into the specified string. … … 1045 935 int rc = VINF_SUCCESS; 1046 936 char szBuf[4]; 1047 1048 937 if (!cb) 1049 938 return VERR_BUFFER_OVERFLOW; 1050 1051 939 AssertPtr(psz); 1052 1053 940 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded)) 1054 941 { … … 1073 960 else 1074 961 pszDst = RTStrPutCp(pszDst, Cp); 1075 1076 962 /* Need to leave space for terminating NUL. */ 1077 963 if ((size_t)(pszDst - szBuf) + 1 >= cb) … … 1086 972 return rc; 1087 973 } 1088 1089 974 /** 1090 975 * Internal: free all buffers associated with grain directories. … … 1103 988 } 1104 989 } 1105 1106 990 /** 1107 991 * Internal: allocate the compressed/uncompressed buffers for streamOptimized … … 1111 995 { 1112 996 int rc = VINF_SUCCESS; 1113 1114 997 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1115 998 { … … 1130 1013 rc = VERR_NO_MEMORY; 1131 1014 } 1132 1133 1015 if (RT_FAILURE(rc)) 1134 1016 vmdkFreeStreamBuffers(pExtent); 1135 1017 return rc; 1136 1018 } 1137 1138 1019 /** 1139 1020 * Internal: allocate all buffers associated with grain directories. … … 1144 1025 int rc = VINF_SUCCESS; 1145 1026 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1146 1147 1027 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD); 1148 1028 if (RT_LIKELY(pExtent->pGD)) … … 1157 1037 else 1158 1038 rc = VERR_NO_MEMORY; 1159 1160 1039 if (RT_FAILURE(rc)) 1161 1040 vmdkFreeGrainDirectory(pExtent); 1162 1041 return rc; 1163 1042 } 1164 1165 1043 /** 1166 1044 * Converts the grain directory from little to host endianess. … … 1173 1051 { 1174 1052 uint32_t *pGDTmp = pGD; 1175 1176 1053 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++) 1177 1054 *pGDTmp = RT_LE2H_U32(*pGDTmp); 1178 1055 } 1179 1180 1056 /** 1181 1057 * Read the grain directory and allocated grain tables verifying them against … … 1190 1066 int rc = VINF_SUCCESS; 1191 1067 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1192 1193 1068 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE 1194 1069 && pExtent->uSectorGD != VMDK_GD_AT_END 1195 1070 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR); 1196 1197 1071 rc = vmdkAllocGrainDirectory(pImage, pExtent); 1198 1072 if (RT_SUCCESS(rc)) … … 1206 1080 { 1207 1081 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries); 1208 1209 1082 if ( pExtent->uSectorRGD 1210 1083 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)) … … 1218 1091 { 1219 1092 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries); 1220 1221 1093 /* Check grain table and redundant grain table for consistency. */ 1222 1094 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t); 1223 1095 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */ 1224 1096 size_t cbGTBuffersMax = _1M; 1225 1226 1097 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1227 1098 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1228 1229 1099 if ( !pTmpGT1 1230 1100 || !pTmpGT2) 1231 1101 rc = VERR_NO_MEMORY; 1232 1233 1102 size_t i = 0; 1234 1103 uint32_t *pGDTmp = pExtent->pGD; 1235 1104 uint32_t *pRGDTmp = pExtent->pRGD; 1236 1237 1105 /* Loop through all entries. */ 1238 1106 while (i < pExtent->cGDEntries) … … 1241 1109 uint32_t uRGTStart = *pRGDTmp; 1242 1110 size_t cbGTRead = cbGT; 1243 1244 1111 /* If no grain table is allocated skip the entry. */ 1245 1112 if (*pGDTmp == 0 && *pRGDTmp == 0) … … 1248 1115 continue; 1249 1116 } 1250 1251 1117 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1252 1118 { … … 1258 1124 break; 1259 1125 } 1260 1261 1126 i++; 1262 1127 pGDTmp++; 1263 1128 pRGDTmp++; 1264 1265 1129 /* 1266 1130 * Read a few tables at once if adjacent to decrease the number … … 1276 1140 continue; 1277 1141 } 1278 1279 1142 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1280 1143 { … … 1286 1149 break; 1287 1150 } 1288 1289 1151 /* Check that the start offsets are adjacent.*/ 1290 1152 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp) 1291 1153 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp)) 1292 1154 break; 1293 1294 1155 i++; 1295 1156 pGDTmp++; … … 1297 1158 cbGTRead += cbGT; 1298 1159 } 1299 1300 1160 /* Increase buffers if required. */ 1301 1161 if ( RT_SUCCESS(rc) … … 1315 1175 else 1316 1176 rc = VERR_NO_MEMORY; 1317 1318 1177 if (rc == VERR_NO_MEMORY) 1319 1178 { … … 1322 1181 i -= cbGTRead / cbGT; 1323 1182 cbGTRead = cbGT; 1324 1325 1183 /* Don't try to increase the buffer again in the next run. */ 1326 1184 cbGTBuffersMax = cbGTBuffers; 1327 1185 } 1328 1186 } 1329 1330 1187 if (RT_SUCCESS(rc)) 1331 1188 { … … 1360 1217 } 1361 1218 } /* while (i < pExtent->cGDEntries) */ 1362 1363 1219 /** @todo figure out what to do for unclean VMDKs. */ 1364 1220 if (pTmpGT1) … … 1376 1232 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc); 1377 1233 } 1378 1379 1234 if (RT_FAILURE(rc)) 1380 1235 vmdkFreeGrainDirectory(pExtent); 1381 1236 return rc; 1382 1237 } 1383 1384 1238 /** 1385 1239 * Creates a new grain directory for the given extent at the given start sector. … … 1400 1254 size_t cbGTRounded; 1401 1255 uint64_t cbOverhead; 1402 1403 1256 if (fPreAlloc) 1404 1257 { … … 1414 1267 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded; 1415 1268 } 1416 1417 1269 /* For streamOptimized extents there is only one grain directory, 1418 1270 * and for all others take redundant grain directory into account. */ … … 1429 1281 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead); 1430 1282 } 1431 1432 1283 if (RT_SUCCESS(rc)) 1433 1284 { 1434 1285 pExtent->uAppendPosition = cbOverhead; 1435 1286 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead); 1436 1437 1287 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1438 1288 { … … 1445 1295 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded); 1446 1296 } 1447 1448 1297 rc = vmdkAllocStreamBuffers(pImage, pExtent); 1449 1298 if (RT_SUCCESS(rc)) … … 1455 1304 uint32_t uGTSectorLE; 1456 1305 uint64_t uOffsetSectors; 1457 1458 1306 if (pExtent->pRGD) 1459 1307 { … … 1475 1323 } 1476 1324 } 1477 1478 1325 if (RT_SUCCESS(rc)) 1479 1326 { … … 1498 1345 } 1499 1346 } 1500 1501 1347 if (RT_FAILURE(rc)) 1502 1348 vmdkFreeGrainDirectory(pExtent); 1503 1349 return rc; 1504 1350 } 1505 1506 1351 /** 1507 1352 * Unquotes the given string returning the result in a separate buffer. … … 1521 1366 char *pszQ; 1522 1367 char *pszUnquoted; 1523 1524 1368 /* Skip over whitespace. */ 1525 1369 while (*pszStr == ' ' || *pszStr == '\t') 1526 1370 pszStr++; 1527 1528 1371 if (*pszStr != '"') 1529 1372 { … … 1540 1383 pImage->pszFilename, pszStart); 1541 1384 } 1542 1543 1385 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1); 1544 1386 if (!pszUnquoted) … … 1551 1393 return VINF_SUCCESS; 1552 1394 } 1553 1554 1395 static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1555 1396 const char *pszLine) … … 1557 1398 char *pEnd = pDescriptor->aLines[pDescriptor->cLines]; 1558 1399 ssize_t cbDiff = strlen(pszLine) + 1; 1559 1560 1400 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1 1561 1401 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1562 1402 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1563 1564 1403 memcpy(pEnd, pszLine, cbDiff); 1565 1404 pDescriptor->cLines++; 1566 1405 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff; 1567 1406 pDescriptor->fDirty = true; 1568 1569 1407 return VINF_SUCCESS; 1570 1408 } 1571 1572 1409 static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart, 1573 1410 const char *pszKey, const char **ppszValue) … … 1575 1412 size_t cbKey = strlen(pszKey); 1576 1413 const char *pszValue; 1577 1578 1414 while (uStart != 0) 1579 1415 { … … 1594 1430 return !!uStart; 1595 1431 } 1596 1597 1432 static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1598 1433 unsigned uStart, … … 1602 1437 size_t cbKey = strlen(pszKey); 1603 1438 unsigned uLast = 0; 1604 1605 1439 while (uStart != 0) 1606 1440 { … … 1637 1471 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1638 1472 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1639 1640 1473 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal, 1641 1474 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal); … … 1700 1533 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1701 1534 pDescriptor->aLines[i] += cbDiff; 1702 1703 1535 /* Adjust starting line numbers of following descriptor sections. */ 1704 1536 if (uStart <= pDescriptor->uFirstExtent) … … 1710 1542 return VINF_SUCCESS; 1711 1543 } 1712 1713 1544 static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey, 1714 1545 uint32_t *puValue) 1715 1546 { 1716 1547 const char *pszValue; 1717 1718 1548 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1719 1549 &pszValue)) … … 1721 1551 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue); 1722 1552 } 1723 1724 1553 /** 1725 1554 * Returns the value of the given key as a string allocating the necessary memory. … … 1738 1567 const char *pszValue; 1739 1568 char *pszValueUnquoted; 1740 1741 1569 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1742 1570 &pszValue)) … … 1748 1576 return rc; 1749 1577 } 1750 1751 1578 static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1752 1579 const char *pszKey, const char *pszValue) 1753 1580 { 1754 1581 char *pszValueQuoted; 1755 1756 1582 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue); 1757 1583 if (!pszValueQuoted) … … 1762 1588 return rc; 1763 1589 } 1764 1765 1590 static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage, 1766 1591 PVMDKDESCRIPTOR pDescriptor) … … 1769 1594 unsigned uEntry = pDescriptor->uFirstExtent; 1770 1595 ssize_t cbDiff; 1771 1772 1596 if (!uEntry) 1773 1597 return; 1774 1775 1598 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1776 1599 /* Move everything including \0 in the entry marking the end of buffer. */ … … 1788 1611 if (pDescriptor->uFirstDDB) 1789 1612 pDescriptor->uFirstDDB--; 1790 1791 1613 return; 1792 1614 } 1793 1615 static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage, 1616 PVMDKDESCRIPTOR pDescriptor, unsigned uLine) 1617 { 1618 RT_NOREF1(pImage); 1619 unsigned uEntry = uLine; 1620 ssize_t cbDiff; 1621 if (!uEntry) 1622 return; 1623 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1624 /* Move everything including \0 in the entry marking the end of buffer. */ 1625 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1], 1626 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1); 1627 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++) 1628 { 1629 if (i != uEntry) 1630 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff; 1631 if (pDescriptor->aNextLines[i]) 1632 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1; 1633 else 1634 pDescriptor->aNextLines[i - 1] = 0; 1635 } 1636 pDescriptor->cLines--; 1637 if (pDescriptor->uFirstDDB) 1638 pDescriptor->uFirstDDB--; 1639 return; 1640 } 1794 1641 static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1795 1642 VMDKACCESS enmAccess, uint64_t cNominalSectors, … … 1803 1650 char szExt[1024]; 1804 1651 ssize_t cbDiff; 1805 1806 1652 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess)); 1807 1653 Assert((unsigned)enmType < RT_ELEMENTS(apszType)); 1808 1809 1654 /* Find last entry in extent description. */ 1810 1655 while (uStart) … … 1814 1659 uStart = pDescriptor->aNextLines[uStart]; 1815 1660 } 1816 1817 1661 if (enmType == VMDKETYPE_ZERO) 1818 1662 { … … 1833 1677 } 1834 1678 cbDiff = strlen(szExt) + 1; 1835 1836 1679 /* Check for buffer overflow. */ 1837 1680 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1) 1838 1681 || ( pDescriptor->aLines[pDescriptor->cLines] 1839 1682 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)) 1840 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1683 { 1684 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 1685 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)) 1686 { 1687 pImage->cbDescAlloc *= 2; 1688 pDescriptor->cbDescAlloc *= 2; 1689 } 1690 else 1691 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1692 } 1841 1693 1842 1694 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--) … … 1858 1710 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1859 1711 pDescriptor->aLines[i] += cbDiff; 1860 1861 1712 /* Adjust starting line numbers of following descriptor sections. */ 1862 1713 if (uStart <= pDescriptor->uFirstDDB) 1863 1714 pDescriptor->uFirstDDB++; 1864 1865 1715 pDescriptor->fDirty = true; 1866 1716 return VINF_SUCCESS; 1867 1717 } 1868 1869 1718 /** 1870 1719 * Returns the value of the given key from the DDB as a string allocating … … 1884 1733 const char *pszValue; 1885 1734 char *pszValueUnquoted; 1886 1887 1735 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1888 1736 &pszValue)) … … 1894 1742 return rc; 1895 1743 } 1896 1897 1744 static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1898 1745 const char *pszKey, uint32_t *puValue) … … 1900 1747 const char *pszValue; 1901 1748 char *pszValueUnquoted; 1902 1903 1749 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1904 1750 &pszValue)) … … 1911 1757 return rc; 1912 1758 } 1913 1914 1759 static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1915 1760 const char *pszKey, PRTUUID pUuid) … … 1917 1762 const char *pszValue; 1918 1763 char *pszValueUnquoted; 1919 1920 1764 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1921 1765 &pszValue)) … … 1928 1772 return rc; 1929 1773 } 1930 1931 1774 static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1932 1775 const char *pszKey, const char *pszVal) … … 1934 1777 int rc; 1935 1778 char *pszValQuoted; 1936 1937 1779 if (pszVal) 1938 1780 { … … 1949 1791 return rc; 1950 1792 } 1951 1952 1793 static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1953 1794 const char *pszKey, PCRTUUID pUuid) 1954 1795 { 1955 1796 char *pszUuid; 1956 1957 1797 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid); 1958 1798 if (!pszUuid) … … 1963 1803 return rc; 1964 1804 } 1965 1966 1805 static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1967 1806 const char *pszKey, uint32_t uValue) 1968 1807 { 1969 1808 char *pszValue; 1970 1971 1809 RTStrAPrintf(&pszValue, "\"%d\"", uValue); 1972 1810 if (!pszValue) … … 1977 1815 return rc; 1978 1816 } 1979 1980 1817 /** 1981 1818 * Splits the descriptor data into individual lines checking for correct line … … 1991 1828 unsigned cLine = 0; 1992 1829 int rc = VINF_SUCCESS; 1993 1994 1830 while ( RT_SUCCESS(rc) 1995 1831 && *pszTmp != '\0') … … 2002 1838 break; 2003 1839 } 2004 2005 1840 while (*pszTmp != '\0' && *pszTmp != '\n') 2006 1841 { … … 2020 1855 pszTmp++; 2021 1856 } 2022 2023 1857 if (RT_FAILURE(rc)) 2024 1858 break; 2025 2026 1859 /* Get rid of LF character. */ 2027 1860 if (*pszTmp == '\n') … … 2031 1864 } 2032 1865 } 2033 2034 1866 if (RT_SUCCESS(rc)) 2035 1867 { … … 2038 1870 pDesc->aLines[cLine] = pszTmp; 2039 1871 } 2040 2041 1872 return rc; 2042 1873 } 2043 2044 1874 static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData, 2045 1875 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 2058 1888 { 2059 1889 unsigned uLastNonEmptyLine = 0; 2060 2061 1890 /* Initialize those, because we need to be able to reopen an image. */ 2062 1891 pDescriptor->uFirstDesc = 0; … … 2124 1953 } 2125 1954 } 2126 2127 1955 return rc; 2128 1956 } 2129 2130 1957 static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage, 2131 1958 PCVDGEOMETRY pPCHSGeometry) … … 2146 1973 return rc; 2147 1974 } 2148 2149 1975 static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage, 2150 1976 PCVDGEOMETRY pLCHSGeometry) … … 2157 1983 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor, 2158 1984 VMDK_DDB_GEO_LCHS_HEADS, 2159 2160 1985 pLCHSGeometry->cHeads); 2161 1986 if (RT_FAILURE(rc)) … … 2166 1991 return rc; 2167 1992 } 2168 2169 1993 static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData, 2170 1994 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 2178 2002 pDescriptor->aLines[pDescriptor->cLines] = pDescData; 2179 2003 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines)); 2180 2181 2004 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile"); 2182 2005 if (RT_SUCCESS(rc)) … … 2210 2033 { 2211 2034 pDescriptor->uFirstDDB = pDescriptor->cLines - 1; 2212 2213 2035 /* Now that the framework is in place, use the normal functions to insert 2214 2036 * the remaining keys. */ … … 2223 2045 if (RT_SUCCESS(rc)) 2224 2046 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide"); 2225 2226 2047 return rc; 2227 2048 } 2228 2229 2049 static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData) 2230 2050 { … … 2233 2053 unsigned uLine; 2234 2054 unsigned i; 2235 2236 2055 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData, 2237 2056 &pImage->Descriptor); 2238 2057 if (RT_FAILURE(rc)) 2239 2058 return rc; 2240 2241 2059 /* Check version, must be 1. */ 2242 2060 uint32_t uVersion; … … 2246 2064 if (uVersion != 1) 2247 2065 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename); 2248 2249 2066 /* Get image creation type and determine image flags. */ 2250 2067 char *pszCreateType = NULL; /* initialized to make gcc shut up */ … … 2264 2081 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX; 2265 2082 RTMemTmpFree(pszCreateType); 2266 2267 2083 /* Count the number of extent config entries. */ 2268 2084 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0; … … 2270 2086 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++) 2271 2087 /* nothing */; 2272 2273 2088 if (!pImage->pDescData && cExtents != 1) 2274 2089 { … … 2276 2091 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename); 2277 2092 } 2278 2279 2093 if (pImage->pDescData) 2280 2094 { … … 2284 2098 return rc; 2285 2099 } 2286 2287 2100 for (i = 0, uLine = pImage->Descriptor.uFirstExtent; 2288 2101 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine]) 2289 2102 { 2290 2103 char *pszLine = pImage->Descriptor.aLines[uLine]; 2291 2292 2104 /* Access type of the extent. */ 2293 2105 if (!strncmp(pszLine, "RW", 2)) … … 2310 2122 if (*pszLine++ != ' ') 2311 2123 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2312 2313 2124 /* Nominal size of the extent. */ 2314 2125 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10, … … 2318 2129 if (*pszLine++ != ' ') 2319 2130 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2320 2321 2131 /* Type of the extent. */ 2322 2132 if (!strncmp(pszLine, "SPARSE", 6)) … … 2342 2152 else 2343 2153 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2344 2345 2154 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO) 2346 2155 { … … 2357 2166 if (*pszLine++ != ' ') 2358 2167 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2359 2360 2168 /* Basename of the image. Surrounded by quotes. */ 2361 2169 char *pszBasename; … … 2376 2184 } 2377 2185 } 2378 2379 2186 if (*pszLine != '\0') 2380 2187 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2381 2188 } 2382 2189 } 2383 2384 2190 /* Determine PCHS geometry (autogenerate if necessary). */ 2385 2191 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2416 2222 pImage->PCHSGeometry.cSectors = 63; 2417 2223 } 2418 2419 2224 /* Determine LCHS geometry (set to 0 if not specified). */ 2420 2225 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2447 2252 pImage->LCHSGeometry.cSectors = 0; 2448 2253 } 2449 2450 2254 /* Get image UUID. */ 2451 2255 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, … … 2471 2275 else if (RT_FAILURE(rc)) 2472 2276 return rc; 2473 2474 2277 /* Get image modification UUID. */ 2475 2278 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2497 2300 else if (RT_FAILURE(rc)) 2498 2301 return rc; 2499 2500 2302 /* Get UUID of parent image. */ 2501 2303 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, … … 2521 2323 else if (RT_FAILURE(rc)) 2522 2324 return rc; 2523 2524 2325 /* Get parent image modification UUID. */ 2525 2326 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2545 2346 else if (RT_FAILURE(rc)) 2546 2347 return rc; 2547 2548 2348 return VINF_SUCCESS; 2549 2349 } 2550 2551 2350 /** 2552 2351 * Internal : Prepares the descriptor to write to the image. … … 2556 2355 { 2557 2356 int rc = VINF_SUCCESS; 2558 2559 2357 /* 2560 2358 * Allocate temporary descriptor buffer. … … 2565 2363 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor); 2566 2364 size_t offDescriptor = 0; 2567 2568 2365 if (!pszDescriptor) 2569 2366 return VERR_NO_MEMORY; 2570 2571 2367 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++) 2572 2368 { 2573 2369 const char *psz = pImage->Descriptor.aLines[i]; 2574 2370 size_t cb = strlen(psz); 2575 2576 2371 /* 2577 2372 * Increase the descriptor if there is no limit and … … 2589 2384 char *pszDescriptorNew = NULL; 2590 2385 LogFlow(("Increasing descriptor cache\n")); 2591 2592 2386 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K); 2593 2387 if (!pszDescriptorNew) … … 2600 2394 } 2601 2395 } 2602 2603 2396 if (cb > 0) 2604 2397 { … … 2606 2399 offDescriptor += cb; 2607 2400 } 2608 2609 2401 memcpy(pszDescriptor + offDescriptor, "\n", 1); 2610 2402 offDescriptor++; 2611 2403 } 2612 2613 2404 if (RT_SUCCESS(rc)) 2614 2405 { … … 2618 2409 else if (pszDescriptor) 2619 2410 RTMemFree(pszDescriptor); 2620 2621 2411 return rc; 2622 2412 } 2623 2624 2413 /** 2625 2414 * Internal: write/update the descriptor part of the image. … … 2633 2422 void *pvDescriptor = NULL; 2634 2423 size_t cbDescriptor; 2635 2636 2424 if (pImage->pDescData) 2637 2425 { … … 2651 2439 if (pDescFile == NULL) 2652 2440 return VERR_INVALID_PARAMETER; 2653 2654 2441 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor); 2655 2442 if (RT_SUCCESS(rc)) … … 2663 2450 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename); 2664 2451 } 2665 2666 2452 if (RT_SUCCESS(rc) && !cbLimit) 2667 2453 { … … 2670 2456 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename); 2671 2457 } 2672 2673 2458 if (RT_SUCCESS(rc)) 2674 2459 pImage->Descriptor.fDirty = false; 2675 2676 2460 if (pvDescriptor) 2677 2461 RTMemFree(pvDescriptor); 2678 2462 return rc; 2679 2680 } 2681 2463 } 2682 2464 /** 2683 2465 * Internal: validate the consistency check values in a binary header. … … 2713 2495 return rc; 2714 2496 } 2715 2716 2497 /** 2717 2498 * Internal: read metadata belonging to an extent with binary header, i.e. … … 2723 2504 SparseExtentHeader Header; 2724 2505 int rc; 2725 2726 2506 if (!fMagicAlreadyRead) 2727 2507 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0, … … 2736 2516 - RT_UOFFSETOF(SparseExtentHeader, version)); 2737 2517 } 2738 2739 2518 if (RT_SUCCESS(rc)) 2740 2519 { … … 2743 2522 { 2744 2523 uint64_t cbFile = 0; 2745 2746 2524 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17)) 2747 2525 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END) 2748 2526 pExtent->fFooter = true; 2749 2750 2527 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 2751 2528 || ( pExtent->fFooter … … 2756 2533 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname); 2757 2534 } 2758 2759 2535 if (RT_SUCCESS(rc)) 2760 2536 { 2761 2537 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 2762 2538 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512); 2763 2764 2539 if ( pExtent->fFooter 2765 2540 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2775 2550 rc = VERR_VD_VMDK_INVALID_HEADER; 2776 2551 } 2777 2778 2552 if (RT_SUCCESS(rc)) 2779 2553 rc = vmdkValidateHeader(pImage, pExtent, &Header); … … 2781 2555 pExtent->uAppendPosition = 0; 2782 2556 } 2783 2784 2557 if (RT_SUCCESS(rc)) 2785 2558 { … … 2804 2577 pExtent->uSectorRGD = 0; 2805 2578 } 2806 2807 2579 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors) 2808 2580 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2809 2581 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname); 2810 2811 2582 if ( RT_SUCCESS(rc) 2812 2583 && ( pExtent->uSectorGD == VMDK_GD_AT_END … … 2816 2587 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2817 2588 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname); 2818 2819 2589 if (RT_SUCCESS(rc)) 2820 2590 { … … 2827 2597 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2828 2598 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2829 2830 2599 /* Fix up the number of descriptor sectors, as some flat images have 2831 2600 * really just one, and this causes failures when inserting the UUID … … 2850 2619 rc = VERR_VD_VMDK_INVALID_HEADER; 2851 2620 } 2852 2853 2621 if (RT_FAILURE(rc)) 2854 2622 vmdkFreeExtentData(pImage, pExtent, false); 2855 2856 2623 return rc; 2857 2624 } 2858 2859 2625 /** 2860 2626 * Internal: read additional metadata belonging to an extent. For those … … 2864 2630 { 2865 2631 int rc = VINF_SUCCESS; 2866 2867 2632 /* disabled the check as there are too many truncated vmdk images out there */ 2868 2633 #ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK … … 2904 2669 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2905 2670 pExtent->uAppendPosition = 0; 2906 2907 2671 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2908 2672 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2918 2682 } 2919 2683 } 2920 2921 2684 if (RT_FAILURE(rc)) 2922 2685 vmdkFreeExtentData(pImage, pExtent, false); 2923 2924 2686 return rc; 2925 2687 } 2926 2927 2688 /** 2928 2689 * Internal: write/update the metadata for a sparse extent. … … 2932 2693 { 2933 2694 SparseExtentHeader Header; 2934 2935 2695 memset(&Header, '\0', sizeof(Header)); 2936 2696 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER); … … 2975 2735 Header.doubleEndLineChar2 = '\n'; 2976 2736 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression); 2977 2978 2737 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, 2979 2738 uOffset, &Header, sizeof(Header), … … 2983 2742 return rc; 2984 2743 } 2985 2986 2744 /** 2987 2745 * Internal: free the buffers used for streamOptimized images. … … 3000 2758 } 3001 2759 } 3002 3003 2760 /** 3004 2761 * Internal: free the memory used by the extent data structure, optionally … … 3014 2771 { 3015 2772 int rc = VINF_SUCCESS; 3016 3017 2773 vmdkFreeGrainDirectory(pExtent); 3018 2774 if (pExtent->pDescData) … … 3041 2797 } 3042 2798 vmdkFreeStreamBuffers(pExtent); 3043 3044 2799 return rc; 3045 2800 } 3046 3047 2801 /** 3048 2802 * Internal: allocate grain table cache if necessary for this image. … … 3051 2805 { 3052 2806 PVMDKEXTENT pExtent; 3053 3054 2807 /* Allocate grain table cache if any sparse extent is present. */ 3055 2808 for (unsigned i = 0; i < pImage->cExtents; i++) … … 3071 2824 } 3072 2825 } 3073 3074 2826 return VINF_SUCCESS; 3075 2827 } 3076 3077 2828 /** 3078 2829 * Internal: allocate the given number of extents. … … 3102 2853 else 3103 2854 rc = VERR_NO_MEMORY; 3104 3105 2855 return rc; 3106 2856 } 3107 2857 3108 2858 /** 3109 * Internal: allocate and describes an additional, file-backed extent 3110 * for the given size. Preserves original extents. 2859 * Internal: Create an additional file backed extent in split images. 2860 * Supports split sparse and flat images. 2861 * 2862 * @returns VBox status code. 2863 * @param pImage VMDK image instance. 2864 * @param cbSize Desiried size in bytes of new extent. 3111 2865 */ 3112 2866 static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize) 3113 2867 { 3114 2868 int rc = VINF_SUCCESS; 2869 unsigned uImageFlags = pImage->uImageFlags; 2870 2871 /* Check for unsupported image type. */ 2872 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX) 2873 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2874 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)) 2875 { 2876 return VERR_NOT_SUPPORTED; 2877 } 2878 2879 /* Allocate array of extents and copy existing extents to it. */ 3115 2880 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT)); 3116 if (pNewExtents) 3117 { 3118 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT)); 3119 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents]; 3120 3121 pExtent->pFile = NULL; 3122 pExtent->pszBasename = NULL; 3123 pExtent->pszFullname = NULL; 3124 pExtent->pGD = NULL; 3125 pExtent->pRGD = NULL; 3126 pExtent->pDescData = NULL; 3127 pExtent->uVersion = 1; 3128 pExtent->uCompression = VMDK_COMPRESSION_NONE; 3129 pExtent->uExtent = pImage->cExtents; 3130 pExtent->pImage = pImage; 3131 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize); 2881 if (!pNewExtents) 2882 { 2883 return VERR_NO_MEMORY; 2884 } 2885 2886 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT)); 2887 /** @todo r=jack - free old extent pointer */ 2888 2889 /* Locate newly created extent and populate default metadata. */ 2890 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents]; 2891 2892 pExtent->pFile = NULL; 2893 pExtent->pszBasename = NULL; 2894 pExtent->pszFullname = NULL; 2895 pExtent->pGD = NULL; 2896 pExtent->pRGD = NULL; 2897 pExtent->pDescData = NULL; 2898 pExtent->uVersion = 1; 2899 pExtent->uCompression = VMDK_COMPRESSION_NONE; 2900 pExtent->uExtent = pImage->cExtents; 2901 pExtent->pImage = pImage; 2902 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize); 2903 pExtent->enmAccess = VMDKACCESS_READWRITE; 2904 pExtent->uSectorOffset = 0; 2905 pExtent->fMetaDirty = true; 2906 2907 /* Apply image type specific meta data. */ 2908 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) 2909 { 3132 2910 pExtent->enmType = VMDKETYPE_FLAT; 3133 pExtent->enmAccess = VMDKACCESS_READWRITE; 3134 pExtent->uSectorOffset = 0; 3135 3136 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 3137 AssertPtr(pszBasenameSubstr); 3138 3139 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr); 3140 char *pszBasenameBase = RTStrDup(pszBasenameSubstr); 3141 RTPathStripSuffix(pszBasenameBase); 3142 char *pszTmp; 3143 size_t cbTmp; 3144 3145 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED) 3146 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase, 3147 pExtent->uExtent + 1, pszBasenameSuff); 3148 else 3149 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1, 3150 pszBasenameSuff); 3151 3152 RTStrFree(pszBasenameBase); 3153 if (!pszTmp) 3154 return VERR_NO_STR_MEMORY; 3155 cbTmp = strlen(pszTmp) + 1; 3156 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp); 3157 if (!pszBasename) 3158 { 3159 RTStrFree(pszTmp); 3160 return VERR_NO_MEMORY; 3161 } 3162 3163 memcpy(pszBasename, pszTmp, cbTmp); 2911 } 2912 else 2913 { 2914 uint64_t cSectorsPerGDE, cSectorsPerGD; 2915 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; 2916 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K)); 2917 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K); 2918 pExtent->cGTEntries = 512; 2919 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain; 2920 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2921 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2922 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 2923 } 2924 2925 /* Allocate and set file name for extent. */ 2926 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 2927 AssertPtr(pszBasenameSubstr); 2928 2929 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr); 2930 char *pszBasenameBase = RTStrDup(pszBasenameSubstr); 2931 RTPathStripSuffix(pszBasenameBase); 2932 char *pszTmp; 2933 size_t cbTmp; 2934 2935 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED) 2936 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase, 2937 pExtent->uExtent + 1, pszBasenameSuff); 2938 else 2939 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1, 2940 pszBasenameSuff); 2941 2942 RTStrFree(pszBasenameBase); 2943 if (!pszTmp) 2944 return VERR_NO_STR_MEMORY; 2945 cbTmp = strlen(pszTmp) + 1; 2946 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp); 2947 if (!pszBasename) 2948 { 3164 2949 RTStrFree(pszTmp); 3165 3166 pExtent->pszBasename = pszBasename; 3167 3168 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 3169 if (!pszBasedirectory) 3170 return VERR_NO_STR_MEMORY; 3171 RTPathStripFilename(pszBasedirectory); 3172 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename); 3173 RTStrFree(pszBasedirectory); 3174 if (!pszFullname) 3175 return VERR_NO_STR_MEMORY; 3176 pExtent->pszFullname = pszFullname; 3177 3178 /* Create file for extent. */ 3179 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, 3180 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, 3181 true /* fCreate */)); 3182 if (RT_FAILURE(rc)) 3183 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 3184 3185 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 3186 pExtent->cNominalSectors, pExtent->enmType, 3187 pExtent->pszBasename, pExtent->uSectorOffset); 3188 if (RT_FAILURE(rc)) 3189 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3190 2950 return VERR_NO_MEMORY; 2951 } 2952 2953 memcpy(pszBasename, pszTmp, cbTmp); 2954 RTStrFree(pszTmp); 2955 2956 pExtent->pszBasename = pszBasename; 2957 2958 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 2959 if (!pszBasedirectory) 2960 return VERR_NO_STR_MEMORY; 2961 RTPathStripFilename(pszBasedirectory); 2962 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename); 2963 RTStrFree(pszBasedirectory); 2964 if (!pszFullname) 2965 return VERR_NO_STR_MEMORY; 2966 pExtent->pszFullname = pszFullname; 2967 2968 /* Create file for extent. */ 2969 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, 2970 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, 2971 true /* fCreate */)); 2972 if (RT_FAILURE(rc)) 2973 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 2974 2975 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) 2976 { 2977 /* For flat images: Pre allocate file space. */ 3191 2978 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize, 3192 2979 0 /* fFlags */, NULL, 0, 0); 3193 3194 2980 if (RT_FAILURE(rc)) 3195 2981 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 3196 3197 pImage->pExtents = pNewExtents;3198 pImage->cExtents++;3199 2982 } 3200 2983 else 3201 rc = VERR_NO_MEMORY; 2984 { 2985 /* For sparse images: Allocate new grain directories/tables. */ 2986 /* fPreAlloc should never be false because VMware can't use such images. */ 2987 rc = vmdkCreateGrainDirectory(pImage, pExtent, 2988 RT_MAX( pExtent->uDescriptorSector 2989 + pExtent->cDescriptorSectors, 2990 1), 2991 true /* fPreAlloc */); 2992 if (RT_FAILURE(rc)) 2993 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 2994 } 2995 2996 /* Insert new extent into descriptor file. */ 2997 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 2998 pExtent->cNominalSectors, pExtent->enmType, 2999 pExtent->pszBasename, pExtent->uSectorOffset); 3000 if (RT_FAILURE(rc)) 3001 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3002 3003 pImage->pExtents = pNewExtents; 3004 pImage->cExtents++; 3005 3202 3006 return rc; 3203 3007 } 3008 3204 3009 /** 3205 3010 * Reads and processes the descriptor embedded in sparse images. … … 3249 3054 { 3250 3055 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors; 3251 3252 3056 pExtent->cDescriptorSectors = 4; 3253 3057 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3314 3118 rc = VERR_NO_MEMORY; 3315 3119 } 3316 3317 3120 return rc; 3318 3121 } 3319 3320 3122 /** 3321 3123 * Reads the descriptor from a pure text file. … … 3404 3206 else 3405 3207 pExtent->pszFullname = NULL; 3406 3407 3208 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0); 3408 3209 switch (pExtent->enmType) … … 3425 3226 if (RT_FAILURE(rc)) 3426 3227 break; 3427 3428 3228 /* Mark extent as unclean if opened in read-write mode. */ 3429 3229 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3462 3262 else if (RT_SUCCESS(rc)) 3463 3263 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename); 3464 3465 3264 return rc; 3466 3265 } 3467 3468 3266 /** 3469 3267 * Read and process the descriptor based on the image type. … … 3476 3274 { 3477 3275 uint32_t u32Magic; 3478 3479 3276 /* Read magic (if present). */ 3480 3277 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, … … 3493 3290 rc = VERR_VD_VMDK_INVALID_HEADER; 3494 3291 } 3495 3496 3292 return rc; 3497 3293 } 3498 3499 3294 /** 3500 3295 * Internal: Open an image, constructing all necessary data structures. … … 3506 3301 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 3507 3302 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 3508 3509 3303 /* 3510 3304 * Open the image. … … 3519 3313 { 3520 3314 pImage->pFile = pFile; 3521 3522 3315 rc = vmdkDescriptorRead(pImage, pFile); 3523 3316 if (RT_SUCCESS(rc)) … … 3537 3330 } 3538 3331 } 3539 3540 3332 /* Update the image metadata now in case has changed. */ 3541 3333 rc = vmdkFlushImage(pImage, NULL); … … 3557 3349 || pExtent->enmType == VMDKETYPE_ZERO) 3558 3350 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED; 3559 3560 3351 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors); 3561 3352 } 3562 3563 3353 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3564 3354 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 3570 3360 /* else: Do NOT signal an appropriate error here, as the VD layer has the 3571 3361 * choice of retrying the open if it failed. */ 3572 3573 3362 if (RT_SUCCESS(rc)) 3574 3363 { … … 3576 3365 pImage->RegionList.fFlags = 0; 3577 3366 pImage->RegionList.cRegions = 1; 3578 3579 3367 pRegion->offRegion = 0; /* Disk start. */ 3580 3368 pRegion->cbBlock = 512; … … 3589 3377 return rc; 3590 3378 } 3591 3592 3379 /** 3593 3380 * Frees a raw descriptor. … … 3598 3385 if (!pRawDesc) 3599 3386 return VINF_SUCCESS; 3600 3601 3387 RTStrFree(pRawDesc->pszRawDisk); 3602 3388 pRawDesc->pszRawDisk = NULL; 3603 3604 3389 /* Partitions: */ 3605 3390 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++) … … 3607 3392 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice); 3608 3393 pRawDesc->pPartDescs[i].pszRawDevice = NULL; 3609 3610 3394 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData); 3611 3395 pRawDesc->pPartDescs[i].pvPartitionData = NULL; 3612 3396 } 3613 3614 3397 RTMemFree(pRawDesc->pPartDescs); 3615 3398 pRawDesc->pPartDescs = NULL; 3616 3617 3399 RTMemFree(pRawDesc); 3618 3400 return VINF_SUCCESS; 3619 3401 } 3620 3621 3402 /** 3622 3403 * Helper that grows the raw partition descriptor table by @a cToAdd entries, … … 3635 3416 pRawDesc->cPartDescs = cNew; 3636 3417 pRawDesc->pPartDescs = paNew; 3637 3638 3418 *ppRet = &paNew[cOld]; 3639 3419 return VINF_SUCCESS; … … 3644 3424 pImage->pszFilename, cOld, cNew); 3645 3425 } 3646 3647 3426 /** 3648 3427 * @callback_method_impl{FNRTSORTCMP} … … 3654 3433 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0; 3655 3434 } 3656 3657 3435 /** 3658 3436 * Post processes the partition descriptors. … … 3666 3444 */ 3667 3445 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL); 3668 3669 3446 /* 3670 3447 * Check that we don't have overlapping descriptors. If we do, that's an … … 3681 3458 paPartDescs[i].pvPartitionData ? " (data)" : ""); 3682 3459 offLast -= 1; 3683 3684 3460 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk) 3685 3461 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS, … … 3694 3470 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize); 3695 3471 } 3696 3697 3472 return VINF_SUCCESS; 3698 3473 } 3699 3700 3701 3474 #ifdef RT_OS_LINUX 3702 3475 /** … … 3721 3494 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir); 3722 3495 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW); 3723 3724 3496 RTDIR hDir = NIL_RTDIR; 3725 3497 int rc = RTDirOpen(&hDir, pszBlockDevDir); … … 3739 3511 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName); 3740 3512 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */ 3741 3742 3513 dev_t uThisDevNo = ~uDevToLocate; 3743 3514 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir); … … 3769 3540 } 3770 3541 #endif /* RT_OS_LINUX */ 3771 3772 3542 #ifdef RT_OS_FREEBSD 3773 3774 3775 3543 /** 3776 3544 * Reads the config data from the provider and returns offset and size … … 3785 3553 gconfig *pConfEntry; 3786 3554 int rc = VERR_NOT_FOUND; 3787 3788 3555 /* 3789 3556 * Required parameters are located in the list containing key/value pairs. … … 3816 3583 return rc; 3817 3584 } 3818 3819 3820 3585 /** 3821 3586 * Searches the partition specified by name and calculates its size and absolute offset. … … 3836 3601 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER); 3837 3602 AssertReturn(pcbSize, VERR_INVALID_PARAMETER); 3838 3839 3603 ggeom *pParentGeom; 3840 3604 int rc = VERR_NOT_FOUND; … … 3849 3613 if (RT_FAILURE(rc)) 3850 3614 return rc; 3851 3852 3615 gprovider *pProvider; 3853 3616 /* … … 3861 3624 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize); 3862 3625 } 3863 3864 3626 /* 3865 3627 * No provider found. Go over the parent geom again … … 3871 3633 * provider 3872 3634 */ 3873 3874 3635 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider) 3875 3636 { … … 3879 3640 if (RT_FAILURE(rc)) 3880 3641 return rc; 3881 3882 3642 uint64_t cbProviderOffset = 0; 3883 3643 uint64_t cbProviderSize = 0; … … 3890 3650 } 3891 3651 } 3892 3893 3652 return VERR_NOT_FOUND; 3894 3653 } 3895 3654 #endif 3896 3897 3898 3655 /** 3899 3656 * Attempts to verify the raw partition path. … … 3905 3662 { 3906 3663 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 3907 3908 3664 /* 3909 3665 * Try open the raw partition device. … … 3915 3671 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"), 3916 3672 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc); 3917 3918 3673 /* 3919 3674 * Compare the partition UUID if we can get it. … … 3921 3676 #ifdef RT_OS_WINDOWS 3922 3677 DWORD cbReturned; 3923 3924 3678 /* 1. Get the device numbers for both handles, they should have the same disk. */ 3925 3679 STORAGE_DEVICE_NUMBER DevNum1; … … 3930 3684 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"), 3931 3685 pImage->pszFilename, pszRawDrive, GetLastError()); 3932 3933 3686 STORAGE_DEVICE_NUMBER DevNum2; 3934 3687 RT_ZERO(DevNum2); … … 4022 3775 rc = VERR_NO_TMP_MEMORY; 4023 3776 } 4024 4025 3777 #elif defined(RT_OS_LINUX) 4026 3778 RT_NOREF(hVol); 4027 4028 3779 /* Stat the two devices first to get their device numbers. (We probably 4029 3780 could make some assumptions here about the major & minor number assignments … … 4046 3797 { 4047 3798 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive); 4048 4049 3799 /* Now, scan the directories under that again for a partition device 4050 3800 matching the hRawPart device's number: */ 4051 3801 if (RT_SUCCESS(rc)) 4052 3802 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice); 4053 4054 3803 /* Having found the /sys/block/device/partition/ path, we can finally 4055 3804 read the partition attributes and compare with hVol. */ … … 4064 3813 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition); 4065 3814 /* else: ignore failure? */ 4066 4067 3815 /* start offset: */ 4068 3816 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */ … … 4078 3826 /* else: ignore failure? */ 4079 3827 } 4080 4081 3828 /* the size: */ 4082 3829 if (RT_SUCCESS(rc)) … … 4095 3842 /* else: We've got nothing to work on, so only do content comparison. */ 4096 3843 } 4097 4098 3844 #elif defined(RT_OS_FREEBSD) 4099 3845 char szDriveDevName[256]; … … 4126 3872 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS, 4127 3873 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename); 4128 4129 4130 3874 if (RT_SUCCESS(rc)) 4131 3875 { … … 4150 3894 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc); 4151 3895 } 4152 4153 3896 geom_deletetree(&geomMesh); 4154 3897 } … … 4157 3900 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err); 4158 3901 } 4159 4160 3902 #elif defined(RT_OS_SOLARIS) 4161 3903 RT_NOREF(hVol); 4162 4163 3904 dk_cinfo dkiDriveInfo; 4164 3905 dk_cinfo dkiPartInfo; … … 4208 3949 * using another way. If there is an error, it returns errno which will be handled below. 4209 3950 */ 4210 4211 3951 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition; 4212 3952 if (numPartition > NDKMAP) … … 4243 3983 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"), 4244 3984 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk); 4245 4246 3985 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData) 4247 3986 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS, … … 4319 4058 #else 4320 4059 RT_NOREF(hVol); /* PORTME */ 4321 rc = VERR_NOT_SUPPORTED;4322 4060 #endif 4323 4061 if (RT_SUCCESS(rc)) … … 4335 4073 { 4336 4074 uint8_t *pbSector2 = pbSector1 + cbToCompare; 4337 4338 4075 /* Do the comparing, we repeat if it fails and the data might be volatile. */ 4339 4076 uint64_t uPrevCrc1 = 0; … … 4351 4088 { 4352 4089 rc = VERR_MISMATCH; 4353 4354 4090 /* Do data stability checks before repeating: */ 4355 4091 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare); … … 4384 4120 offMissmatch++; 4385 4121 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16); 4386 4387 4122 if (cStable > 0) 4388 4123 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, … … 4398 4133 } 4399 4134 } 4400 4401 4135 RTMemTmpFree(pbSector1); 4402 4136 } … … 4409 4143 return rc; 4410 4144 } 4411 4412 4145 #ifdef RT_OS_WINDOWS 4413 4146 /** … … 4431 4164 } 4432 4165 #endif /* RT_OS_WINDOWS */ 4433 4434 4166 /** 4435 4167 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the … … 4448 4180 { 4449 4181 *phVolToRelease = NIL_RTDVMVOLUME; 4450 4451 4182 /* Check sanity/understanding. */ 4452 4183 Assert(fPartitions); 4453 4184 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */ 4454 4455 4185 /* 4456 4186 * Allocate on descriptor for each volume up front. 4457 4187 */ 4458 4188 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr); 4459 4460 4189 PVDISKRAWPARTDESC paPartDescs = NULL; 4461 4190 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs); 4462 4191 AssertRCReturn(rc, rc); 4463 4464 4192 /* 4465 4193 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them. … … 4484 4212 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs); 4485 4213 *phVolToRelease = hVol = hVolNext; 4486 4487 4214 /* 4488 4215 * Depending on the fPartitions selector and associated read-only mask, … … 4491 4218 */ 4492 4219 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol); 4493 4494 4220 uint64_t offVolumeEndIgnored = 0; 4495 4221 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored); … … 4499 4225 pImage->pszFilename, i, pszRawDrive, rc); 4500 4226 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk); 4501 4502 4227 /* Note! The index must match IHostDrivePartition::number. */ 4503 4228 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST); … … 4508 4233 if (fPartitionsReadOnly & RT_BIT_32(idxPartition)) 4509 4234 paPartDescs[i].uFlags |= VDISKRAW_READONLY; 4510 4511 4235 if (!fRelative) 4512 4236 { … … 4529 4253 */ 4530 4254 paPartDescs[i].offStartInDevice = 0; 4531 4532 4255 #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) 4533 4256 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */ … … 4583 4306 #endif 4584 4307 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY); 4585 4586 4308 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 4587 4309 AssertRCReturn(rc, rc); … … 4595 4317 } 4596 4318 } /* for each volume */ 4597 4598 4319 RTDvmVolumeRelease(hVol); 4599 4320 *phVolToRelease = NIL_RTDVMVOLUME; 4600 4601 4321 /* 4602 4322 * Check that we found all the partitions the user selected. … … 4613 4333 pImage->pszFilename, pszRawDrive, szLeft); 4614 4334 } 4615 4616 4335 return VINF_SUCCESS; 4617 4336 } 4618 4619 4337 /** 4620 4338 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies … … 4647 4365 pImage->pszFilename, pszRawDrive, rc); 4648 4366 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5); 4649 4650 4367 /* We can allocate the partition descriptors here to save an intentation level. */ 4651 4368 PVDISKRAWPARTDESC paPartDescs = NULL; 4652 4369 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs); 4653 4370 AssertRCReturn(rc, rc); 4654 4655 4371 /* Allocate the result table and repeat the location table query: */ 4656 4372 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations); … … 4732 4448 return rc; 4733 4449 } 4734 4735 4450 /** 4736 4451 * Opens the volume manager for the raw drive when in selected-partition mode. … … 4748 4463 { 4749 4464 *phVolMgr = NIL_RTDVM; 4750 4751 4465 RTVFSFILE hVfsFile = NIL_RTVFSFILE; 4752 4466 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile); … … 4755 4469 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"), 4756 4470 pImage->pszFilename, pszRawDrive, rc); 4757 4758 4471 RTDVM hVolMgr = NIL_RTDVM; 4759 4472 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/); 4760 4761 4473 RTVfsFileRelease(hVfsFile); 4762 4763 4474 if (RT_FAILURE(rc)) 4764 4475 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4765 4476 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"), 4766 4477 pImage->pszFilename, pszRawDrive, rc); 4767 4768 4478 rc = RTDvmMapOpen(hVolMgr); 4769 4479 if (RT_SUCCESS(rc)) … … 4776 4486 pImage->pszFilename, pszRawDrive, rc); 4777 4487 } 4778 4779 4488 /** 4780 4489 * Opens the raw drive device and get the sizes for it. … … 4800 4509 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"), 4801 4510 pImage->pszFilename, pszRawDrive, rc); 4802 4803 4511 /* 4804 4512 * Get the sector size. … … 4849 4557 return rc; 4850 4558 } 4851 4852 4559 /** 4853 4560 * Reads the raw disk configuration, leaving initalization and cleanup to the … … 4866 4573 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4867 4574 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename); 4868 4869 4575 /* 4870 4576 * RawDrive = path … … 4875 4581 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4876 4582 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3); 4877 4878 4583 /* 4879 4584 * Partitions=n[r][,...] … … 4881 4586 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */; 4882 4587 *pfPartitions = *pfPartitionsReadOnly = 0; 4883 4884 4588 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe); 4885 4589 if (RT_SUCCESS(rc)) … … 4915 4619 pImage->pszFilename, psz); 4916 4620 } 4917 4918 4621 RTStrFree(*ppszFreeMe); 4919 4622 *ppszFreeMe = NULL; … … 4922 4625 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4923 4626 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4924 4925 4627 /* 4926 4628 * BootSector=base64 … … 4942 4644 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"), 4943 4645 pImage->pszFilename, *ppszRawDrive, cbBootSector); 4944 4945 4646 /* Refuse the boot sector if whole-drive. This used to be done quietly, 4946 4647 however, bird disagrees and thinks the user should be told that what … … 4951 4652 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"), 4952 4653 pImage->pszFilename, *ppszRawDrive); 4953 4954 4654 *pcbBootSector = (size_t)cbBootSector; 4955 4655 *ppvBootSector = RTMemAlloc((size_t)cbBootSector); … … 4958 4658 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"), 4959 4659 pImage->pszFilename, cbBootSector, *ppszRawDrive); 4960 4961 4660 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/); 4962 4661 if (RT_FAILURE(rc)) … … 4964 4663 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"), 4965 4664 pImage->pszFilename, *ppszRawDrive, rc); 4966 4967 4665 RTStrFree(*ppszFreeMe); 4968 4666 *ppszFreeMe = NULL; … … 4971 4669 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4972 4670 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4973 4974 4671 /* 4975 4672 * Relative=0/1 … … 4999 4696 *pfRelative = false; 5000 4697 #endif 5001 5002 4698 return VINF_SUCCESS; 5003 4699 } 5004 5005 4700 /** 5006 4701 * Creates a raw drive (nee disk) descriptor. … … 5021 4716 /* Make sure it's NULL. */ 5022 4717 *ppRaw = NULL; 5023 5024 4718 /* 5025 4719 * Read the configuration. … … 5046 4740 if (RT_SUCCESS(rc)) 5047 4741 { 5048 pImage->cbSize = cbSize;5049 4742 /* 5050 4743 * Create the raw-drive descriptor … … 5074 4767 //pRawDesc->cPartDescs = 0; 5075 4768 //pRawDesc->pPartDescs = NULL; 5076 5077 4769 /* We need to parse the partition map to complete the descriptor: */ 5078 4770 RTDVM hVolMgr = NIL_RTDVM; … … 5086 4778 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR 5087 4779 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT; 5088 5089 4780 /* Add copies of the partition tables: */ 5090 4781 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive, … … 5098 4789 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease); 5099 4790 RTDvmVolumeRelease(hVolRelease); 5100 5101 4791 /* Finally, sort the partition and check consistency (overlaps, etc): */ 5102 4792 if (RT_SUCCESS(rc)) … … 5142 4832 return rc; 5143 4833 } 5144 5145 4834 /** 5146 4835 * Internal: create VMDK images for raw disk/partition access. … … 5151 4840 int rc = VINF_SUCCESS; 5152 4841 PVMDKEXTENT pExtent; 5153 5154 4842 if (pRaw->uFlags & VDISKRAW_DISK) 5155 4843 { … … 5166 4854 if (RT_FAILURE(rc)) 5167 4855 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5168 5169 4856 /* Set up basename for extent description. Cannot use StrDup. */ 5170 4857 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1; … … 5183 4870 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 5184 4871 pExtent->fMetaDirty = false; 5185 5186 4872 /* Open flat image, the raw disk. */ 5187 4873 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5196 4882 * file, write the partition information to a flat extent and 5197 4883 * open all the (flat) raw disk partitions. */ 5198 5199 4884 /* First pass over the partition data areas to determine how many 5200 4885 * extents we need. One data area can require up to 2 extents, as … … 5208 4893 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 5209 4894 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename); 5210 5211 4895 if (uStart < pPart->offStartInVDisk) 5212 4896 cExtents++; … … 5217 4901 if (uStart != cbSize) 5218 4902 cExtents++; 5219 5220 4903 rc = vmdkCreateExtents(pImage, cExtents); 5221 4904 if (RT_FAILURE(rc)) 5222 4905 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5223 5224 4906 /* Create raw partition descriptor file. */ 5225 4907 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename, … … 5228 4910 if (RT_FAILURE(rc)) 5229 4911 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5230 5231 4912 /* Create base filename for the partition table extent. */ 5232 4913 /** @todo remove fixed buffer without creating memory leaks. */ … … 5243 4924 pszBaseBase, pszSuff); 5244 4925 RTStrFree(pszBaseBase); 5245 5246 4926 /* Second pass over the partitions, now define all extents. */ 5247 4927 uint64_t uPartOffset = 0; … … 5252 4932 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i]; 5253 4933 pExtent = &pImage->pExtents[cExtents++]; 5254 5255 4934 if (uStart < pPart->offStartInVDisk) 5256 4935 { … … 5266 4945 } 5267 4946 uStart = pPart->offStartInVDisk + pPart->cbData; 5268 5269 4947 if (pPart->pvPartitionData) 5270 4948 { … … 5276 4954 memcpy(pszBasename, pszPartition, cbBasename); 5277 4955 pExtent->pszBasename = pszBasename; 5278 5279 4956 /* Set up full name for partition extent. */ 5280 4957 char *pszDirname = RTStrDup(pImage->pszFilename); … … 5292 4969 pExtent->enmAccess = VMDKACCESS_READWRITE; 5293 4970 pExtent->fMetaDirty = false; 5294 5295 4971 /* Create partition table flat image. */ 5296 4972 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5327 5003 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 5328 5004 pExtent->fMetaDirty = false; 5329 5330 5005 /* Open flat image, the raw partition. */ 5331 5006 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5360 5035 } 5361 5036 } 5362 5363 5037 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5364 5038 (pRaw->uFlags & VDISKRAW_DISK) ? … … 5368 5042 return rc; 5369 5043 } 5370 5371 5044 /** 5372 5045 * Internal: create a regular (i.e. file-backed) VMDK image. … … 5380 5053 uint64_t cbOffset = 0; 5381 5054 uint64_t cbRemaining = cbSize; 5382 5383 5055 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 5384 5056 { … … 5392 5064 if (RT_FAILURE(rc)) 5393 5065 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5394 5395 5066 /* Basename strings needed for constructing the extent names. */ 5396 5067 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5397 5068 AssertPtr(pszBasenameSubstr); 5398 5069 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5399 5400 5070 /* Create separate descriptor file if necessary. */ 5401 5071 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5409 5079 else 5410 5080 pImage->pFile = NULL; 5411 5412 5081 /* Set up all extents. */ 5413 5082 for (unsigned i = 0; i < cExtents; i++) … … 5415 5084 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5416 5085 uint64_t cbExtent = cbRemaining; 5417 5418 5086 /* Set up fullname/basename for extent description. Cannot use StrDup 5419 5087 * for basename, as it is not guaranteed that the memory can be freed … … 5472 5140 return VERR_NO_STR_MEMORY; 5473 5141 pExtent->pszFullname = pszFullname; 5474 5475 5142 /* Create file for extent. */ 5476 5143 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5488 5155 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 5489 5156 } 5490 5491 5157 /* Place descriptor file information (where integrated). */ 5492 5158 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5498 5164 pImage->pDescData = NULL; 5499 5165 } 5500 5501 5166 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5502 5167 { … … 5526 5191 pExtent->enmType = VMDKETYPE_FLAT; 5527 5192 } 5528 5529 5193 pExtent->enmAccess = VMDKACCESS_READWRITE; 5530 5194 pExtent->fUncleanShutdown = true; … … 5532 5196 pExtent->uSectorOffset = 0; 5533 5197 pExtent->fMetaDirty = true; 5534 5535 5198 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5536 5199 { … … 5544 5207 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5545 5208 } 5546 5547 5209 cbOffset += cbExtent; 5548 5549 5210 if (RT_SUCCESS(rc)) 5550 5211 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize); 5551 5552 5212 cbRemaining -= cbExtent; 5553 5213 } 5554 5555 5214 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX) 5556 5215 { … … 5561 5220 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename); 5562 5221 } 5563 5564 5222 const char *pszDescType = NULL; 5565 5223 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) … … 5587 5245 return rc; 5588 5246 } 5589 5590 5247 /** 5591 5248 * Internal: Create a real stream optimized VMDK using only linear writes. … … 5596 5253 if (RT_FAILURE(rc)) 5597 5254 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5598 5599 5255 /* Basename strings needed for constructing the extent names. */ 5600 5256 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5601 5257 AssertPtr(pszBasenameSubstr); 5602 5258 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5603 5604 5259 /* No separate descriptor file. */ 5605 5260 pImage->pFile = NULL; 5606 5607 5261 /* Set up all extents. */ 5608 5262 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 5609 5610 5263 /* Set up fullname/basename for extent description. Cannot use StrDup 5611 5264 * for basename, as it is not guaranteed that the memory can be freed … … 5617 5270 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr); 5618 5271 pExtent->pszBasename = pszBasename; 5619 5620 5272 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 5621 5273 RTPathStripFilename(pszBasedirectory); … … 5625 5277 return VERR_NO_STR_MEMORY; 5626 5278 pExtent->pszFullname = pszFullname; 5627 5628 5279 /* Create file for extent. Make it write only, no reading allowed. */ 5629 5280 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5633 5284 if (RT_FAILURE(rc)) 5634 5285 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 5635 5636 5286 /* Place descriptor file information. */ 5637 5287 pExtent->uDescriptorSector = 1; … … 5640 5290 pExtent->pDescData = pImage->pDescData; 5641 5291 pImage->pDescData = NULL; 5642 5643 5292 uint64_t cSectorsPerGDE, cSectorsPerGD; 5644 5293 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; … … 5650 5299 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 5651 5300 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 5652 5653 5301 /* The spec says version is 1 for all VMDKs, but the vast 5654 5302 * majority of streamOptimized VMDKs actually contain … … 5657 5305 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE; 5658 5306 pExtent->fFooter = true; 5659 5660 5307 pExtent->enmAccess = VMDKACCESS_READONLY; 5661 5308 pExtent->fUncleanShutdown = false; … … 5663 5310 pExtent->uSectorOffset = 0; 5664 5311 pExtent->fMetaDirty = true; 5665 5666 5312 /* Create grain directory, without preallocating it straight away. It will 5667 5313 * be constructed on the fly when writing out the data and written when … … 5672 5318 if (RT_FAILURE(rc)) 5673 5319 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5674 5675 5320 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5676 5321 "streamOptimized"); 5677 5322 if (RT_FAILURE(rc)) 5678 5323 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 5679 5680 5324 return rc; 5681 5325 } 5682 5683 5326 /** 5684 5327 * Initializes the UUID fields in the DDB. … … 5716 5359 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 5717 5360 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 5718 5719 5361 return rc; 5720 5362 } 5721 5722 5363 /** 5723 5364 * Internal: The actual code for creating any VMDK variant currently in … … 5732 5373 { 5733 5374 pImage->uImageFlags = uImageFlags; 5734 5735 5375 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); 5736 5376 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 5737 5377 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 5738 5739 5378 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 5740 5379 &pImage->Descriptor); … … 5747 5386 rc = vmdkMakeRawDescriptor(pImage, &pRaw); 5748 5387 if (RT_FAILURE(rc)) 5749 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"), 5750 pImage->pszFilename); 5751 if (!cbSize) 5752 cbSize = pImage->cbSize; 5753 5388 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename); 5754 5389 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 5755 5390 vmdkRawDescFree(pRaw); … … 5767 5402 uPercentSpan * 95 / 100); 5768 5403 } 5769 5770 5404 if (RT_SUCCESS(rc)) 5771 5405 { 5772 5406 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100); 5773 5774 5407 pImage->cbSize = cbSize; 5775 5776 5408 for (unsigned i = 0; i < pImage->cExtents; i++) 5777 5409 { 5778 5410 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5779 5780 5411 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 5781 5412 pExtent->cNominalSectors, pExtent->enmType, … … 5787 5418 } 5788 5419 } 5789 5790 5420 if (RT_SUCCESS(rc)) 5791 5421 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 5792 5793 pImage->LCHSGeometry = *pLCHSGeometry; 5794 pImage->PCHSGeometry = *pPCHSGeometry; 5795 5796 if (RT_SUCCESS(rc)) 5797 { 5798 if ( pPCHSGeometry->cCylinders != 0 5799 && pPCHSGeometry->cHeads != 0 5800 && pPCHSGeometry->cSectors != 0) 5801 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5802 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) 5803 { 5804 VDGEOMETRY RawDiskPCHSGeometry; 5805 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383); 5806 RawDiskPCHSGeometry.cHeads = 16; 5807 RawDiskPCHSGeometry.cSectors = 63; 5808 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry); 5809 } 5810 } 5811 5422 if ( RT_SUCCESS(rc) 5423 && pPCHSGeometry->cCylinders != 0 5424 && pPCHSGeometry->cHeads != 0 5425 && pPCHSGeometry->cSectors != 0) 5426 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5812 5427 if ( RT_SUCCESS(rc) 5813 5428 && pLCHSGeometry->cCylinders != 0 … … 5815 5430 && pLCHSGeometry->cSectors != 0) 5816 5431 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 5817 5432 pImage->LCHSGeometry = *pLCHSGeometry; 5433 pImage->PCHSGeometry = *pPCHSGeometry; 5818 5434 pImage->ImageUuid = *pUuid; 5819 5435 RTUuidClear(&pImage->ParentUuid); 5820 5436 RTUuidClear(&pImage->ModificationUuid); 5821 5437 RTUuidClear(&pImage->ParentModificationUuid); 5822 5823 5438 if (RT_SUCCESS(rc)) 5824 5439 rc = vmdkCreateImageDdbUuidsInit(pImage); 5825 5826 5440 if (RT_SUCCESS(rc)) 5827 5441 rc = vmdkAllocateGrainTableCache(pImage); 5828 5829 5442 if (RT_SUCCESS(rc)) 5830 5443 { … … 5833 5446 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 5834 5447 } 5835 5836 5448 if (RT_SUCCESS(rc)) 5837 5449 { 5838 5450 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100); 5839 5840 5451 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5841 5452 { … … 5862 5473 else 5863 5474 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 5864 5865 5866 5475 if (RT_SUCCESS(rc)) 5867 5476 { … … 5869 5478 pImage->RegionList.fFlags = 0; 5870 5479 pImage->RegionList.cRegions = 1; 5871 5872 5480 pRegion->offRegion = 0; /* Disk start. */ 5873 5481 pRegion->cbBlock = 512; … … 5877 5485 pRegion->cbMetadata = 0; 5878 5486 pRegion->cRegionBlocksOrBytes = pImage->cbSize; 5879 5880 5487 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan); 5881 5488 } … … 5884 5491 return rc; 5885 5492 } 5886 5887 5493 /** 5888 5494 * Internal: Update image comment. … … 5897 5503 return VERR_NO_MEMORY; 5898 5504 } 5899 5900 5505 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, 5901 5506 "ddb.comment", pszCommentEncoded); … … 5906 5511 return VINF_SUCCESS; 5907 5512 } 5908 5909 5513 /** 5910 5514 * Internal. Clear the grain table buffer for real stream optimized writing. … … 5917 5521 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t)); 5918 5522 } 5919 5920 5523 /** 5921 5524 * Internal. Flush the grain table buffer for real stream optimized writing. … … 5926 5529 int rc = VINF_SUCCESS; 5927 5530 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE; 5928 5929 5531 /* VMware does not write out completely empty grain tables in the case 5930 5532 * of streamOptimized images, which according to my interpretation of … … 5948 5550 if (fAllZero) 5949 5551 return VINF_SUCCESS; 5950 5951 5552 uint64_t uFileOffset = pExtent->uAppendPosition; 5952 5553 if (!uFileOffset) … … 5954 5555 /* Align to sector, as the previous write could have been any size. */ 5955 5556 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 5956 5957 5557 /* Grain table marker. */ 5958 5558 uint8_t aMarker[512]; … … 5965 5565 AssertRC(rc); 5966 5566 uFileOffset += 512; 5967 5968 5567 if (!pExtent->pGD || pExtent->pGD[uGDEntry]) 5969 5568 return VERR_INTERNAL_ERROR; 5970 5971 5569 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset); 5972 5973 5570 for (uint32_t i = 0; i < cCacheLines; i++) 5974 5571 { … … 5978 5575 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++) 5979 5576 *pGTTmp = RT_H2LE_U32(*pGTTmp); 5980 5981 5577 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset, 5982 5578 &pImage->pGTCache->aGTCache[i].aGTData[0], … … 5990 5586 return rc; 5991 5587 } 5992 5993 5588 /** 5994 5589 * Internal. Free all allocated space for representing an image, and optionally … … 5998 5593 { 5999 5594 int rc = VINF_SUCCESS; 6000 6001 5595 /* Freeing a never allocated image (e.g. because the open failed) is 6002 5596 * not signalled as an error. After all nothing bad happens. */ … … 6024 5618 pImage->pExtents[i].fMetaDirty = true; 6025 5619 } 6026 6027 5620 /* From now on it's not safe to append any more data. */ 6028 5621 pImage->pExtents[i].uAppendPosition = 0; … … 6030 5623 } 6031 5624 } 6032 6033 5625 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6034 5626 { … … 6049 5641 AssertRC(rc); 6050 5642 } 6051 6052 5643 uint64_t uFileOffset = pExtent->uAppendPosition; 6053 5644 if (!uFileOffset) 6054 5645 return VERR_INTERNAL_ERROR; 6055 5646 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6056 6057 5647 /* From now on it's not safe to append any more data. */ 6058 5648 pExtent->uAppendPosition = 0; 6059 6060 5649 /* Grain directory marker. */ 6061 5650 uint8_t aMarker[512]; … … 6068 5657 AssertRC(rc); 6069 5658 uFileOffset += 512; 6070 6071 5659 /* Write grain directory in little endian style. The array will 6072 5660 * not be used after this, so convert in place. */ … … 6078 5666 pExtent->cGDEntries * sizeof(uint32_t)); 6079 5667 AssertRC(rc); 6080 6081 5668 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset); 6082 5669 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset); … … 6084 5671 + pExtent->cGDEntries * sizeof(uint32_t), 6085 5672 512); 6086 6087 5673 /* Footer marker. */ 6088 5674 memset(pMarker, '\0', sizeof(aMarker)); … … 6092 5678 uFileOffset, aMarker, sizeof(aMarker)); 6093 5679 AssertRC(rc); 6094 6095 5680 uFileOffset += 512; 6096 5681 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL); 6097 5682 AssertRC(rc); 6098 6099 5683 uFileOffset += 512; 6100 5684 /* End-of-stream marker. */ … … 6107 5691 else if (!fDelete && fFlush) 6108 5692 vmdkFlushImage(pImage, NULL); 6109 6110 5693 if (pImage->pExtents != NULL) 6111 5694 { … … 6129 5712 if (RT_SUCCESS(rc)) 6130 5713 rc = rc2; /* Propogate any error when closing the file. */ 6131 6132 5714 if (pImage->pGTCache) 6133 5715 { … … 6141 5723 } 6142 5724 } 6143 6144 5725 LogFlowFunc(("returns %Rrc\n", rc)); 6145 5726 return rc; 6146 5727 } 6147 6148 5728 /** 6149 5729 * Internal. Flush image data (and metadata) to disk. … … 6153 5733 PVMDKEXTENT pExtent; 6154 5734 int rc = VINF_SUCCESS; 6155 6156 5735 /* Update descriptor if changed. */ 6157 5736 if (pImage->Descriptor.fDirty) 6158 5737 rc = vmdkWriteDescriptor(pImage, pIoCtx); 6159 6160 5738 if (RT_SUCCESS(rc)) 6161 5739 { … … 6193 5771 } 6194 5772 } 6195 6196 5773 if (RT_FAILURE(rc)) 6197 5774 break; 6198 6199 5775 switch (pExtent->enmType) 6200 5776 { … … 6218 5794 } 6219 5795 } 6220 6221 5796 return rc; 6222 5797 } 6223 6224 5798 /** 6225 5799 * Internal. Find extent corresponding to the sector number in the disk. … … 6230 5804 PVMDKEXTENT pExtent = NULL; 6231 5805 int rc = VINF_SUCCESS; 6232 6233 5806 for (unsigned i = 0; i < pImage->cExtents; i++) 6234 5807 { … … 6241 5814 offSector -= pImage->pExtents[i].cNominalSectors; 6242 5815 } 6243 6244 5816 if (pExtent) 6245 5817 *ppExtent = pExtent; 6246 5818 else 6247 5819 rc = VERR_IO_SECTOR_NOT_FOUND; 6248 6249 5820 return rc; 6250 5821 } 6251 6252 5822 /** 6253 5823 * Internal. Hash function for placing the grain table hash entries. … … 6260 5830 return (uSector + uExtent) % pCache->cEntries; 6261 5831 } 6262 6263 5832 /** 6264 5833 * Internal. Get sector number in the extent file from the relative sector … … 6275 5844 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE]; 6276 5845 int rc; 6277 6278 5846 /* For newly created and readonly/sequentially opened streamOptimized 6279 5847 * images this must be a no-op, as the grain directory is not there. */ … … 6287 5855 return VINF_SUCCESS; 6288 5856 } 6289 6290 5857 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6291 5858 if (uGDIndex >= pExtent->cGDEntries) … … 6299 5866 return VINF_SUCCESS; 6300 5867 } 6301 6302 5868 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); 6303 5869 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent); … … 6328 5894 return VINF_SUCCESS; 6329 5895 } 6330 6331 5896 /** 6332 5897 * Internal. Writes the grain and also if necessary the grain tables. … … 6343 5908 const void *pData; 6344 5909 int rc; 6345 6346 5910 /* Very strict requirements: always write at least one full grain, with 6347 5911 * proper alignment. Everything else would require reading of already … … 6356 5920 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors) 6357 5921 return VERR_INVALID_PARAMETER; 6358 6359 5922 /* Clip write range to at most the rest of the grain. */ 6360 5923 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain)); 6361 6362 5924 /* Do not allow to go back. */ 6363 5925 uGrain = uSector / pExtent->cSectorsPerGrain; … … 6368 5930 if (uGrain < pExtent->uLastGrainAccess) 6369 5931 return VERR_VD_VMDK_INVALID_WRITE; 6370 6371 5932 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need 6372 5933 * to allocate something, we also need to detect the situation ourself. */ … … 6374 5935 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */)) 6375 5936 return VINF_SUCCESS; 6376 6377 5937 if (uGDEntry != uLastGDEntry) 6378 5938 { … … 6388 5948 } 6389 5949 } 6390 6391 5950 uint64_t uFileOffset; 6392 5951 uFileOffset = pExtent->uAppendPosition; … … 6395 5954 /* Align to sector, as the previous write could have been any size. */ 6396 5955 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6397 6398 5956 /* Paranoia check: extent type, grain table buffer presence and 6399 5957 * grain table buffer space. Also grain table entry must be clear. */ … … 6403 5961 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry]) 6404 5962 return VERR_INTERNAL_ERROR; 6405 6406 5963 /* Update grain table entry. */ 6407 5964 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset); 6408 6409 5965 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6410 5966 { … … 6419 5975 unsigned cSegments = 1; 6420 5976 size_t cbSeg = 0; 6421 6422 5977 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6423 5978 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); … … 6436 5991 pExtent->uLastGrainAccess = uGrain; 6437 5992 pExtent->uAppendPosition += cbGrain; 6438 6439 5993 return rc; 6440 5994 } 6441 6442 5995 /** 6443 5996 * Internal: Updates the grain table during grain allocation. … … 6453 6006 uint64_t uSector = pGrainAlloc->uSector; 6454 6007 PVMDKGTCACHEENTRY pGTCacheEntry; 6455 6456 6008 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n", 6457 6009 pImage, pExtent, pCache, pIoCtx, pGrainAlloc)); 6458 6459 6010 uGTSector = pGrainAlloc->uGTSector; 6460 6011 uRGTSector = pGrainAlloc->uRGTSector; 6461 6012 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6462 6463 6013 /* Update the grain table (and the cache). */ 6464 6014 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); … … 6523 6073 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname); 6524 6074 } 6525 6526 6075 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6527 6076 return rc; 6528 6077 } 6529 6530 6078 /** 6531 6079 * Internal - complete the grain allocation by updating disk grain table if required. … … 6537 6085 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6538 6086 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser; 6539 6540 6087 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n", 6541 6088 pBackendData, pIoCtx, pvUser, rcReq)); 6542 6543 6089 pGrainAlloc->cIoXfersPending--; 6544 6090 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded) 6545 6091 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc); 6546 6547 6092 if (!pGrainAlloc->cIoXfersPending) 6548 6093 { … … 6550 6095 RTMemFree(pGrainAlloc); 6551 6096 } 6552 6553 6097 LogFlowFunc(("Leaving rc=%Rrc\n", rc)); 6554 6098 return rc; 6555 6099 } 6556 6557 6100 /** 6558 6101 * Internal. Allocates a new grain table (if necessary). … … 6566 6109 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL; 6567 6110 int rc; 6568 6569 6111 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n", 6570 6112 pCache, pExtent, pIoCtx, uSector, cbWrite)); 6571 6572 6113 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC)); 6573 6114 if (!pGrainAlloc) 6574 6115 return VERR_NO_MEMORY; 6575 6576 6116 pGrainAlloc->pExtent = pExtent; 6577 6117 pGrainAlloc->uSector = uSector; 6578 6579 6118 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6580 6119 if (uGDIndex >= pExtent->cGDEntries) … … 6591 6130 { 6592 6131 LogFlow(("Allocating new grain table\n")); 6593 6594 6132 /* There is no grain table referenced by this grain directory 6595 6133 * entry. So there is absolutely no data in this area. Allocate … … 6602 6140 } 6603 6141 Assert(!(uFileOffset % 512)); 6604 6605 6142 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6606 6143 uGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6607 6608 6144 /* Normally the grain table is preallocated for hosted sparse extents 6609 6145 * that support more than 32 bit sector numbers. So this shouldn't … … 6614 6150 return VERR_VD_VMDK_INVALID_HEADER; 6615 6151 } 6616 6617 6152 /* Write grain table by writing the required number of grain table 6618 6153 * cache chunks. Allocate memory dynamically here or we flood the … … 6620 6155 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t); 6621 6156 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp); 6622 6623 6157 if (!paGTDataTmp) 6624 6158 { … … 6626 6160 return VERR_NO_MEMORY; 6627 6161 } 6628 6629 6162 memset(paGTDataTmp, '\0', cbGTDataTmp); 6630 6163 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, … … 6642 6175 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition 6643 6176 + cbGTDataTmp, 512); 6644 6645 6177 if (pExtent->pRGD) 6646 6178 { … … 6651 6183 Assert(!(uFileOffset % 512)); 6652 6184 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6653 6654 6185 /* Normally the redundant grain table is preallocated for hosted 6655 6186 * sparse extents that support more than 32 bit sector numbers. So … … 6660 6191 return VERR_VD_VMDK_INVALID_HEADER; 6661 6192 } 6662 6663 6193 /* Write grain table by writing the required number of grain table 6664 6194 * cache chunks. Allocate memory dynamically here or we flood the … … 6675 6205 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname); 6676 6206 } 6677 6678 6207 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp; 6679 6208 } 6680 6681 6209 RTMemTmpFree(paGTDataTmp); 6682 6683 6210 /* Update the grain directory on disk (doing it before writing the 6684 6211 * grain table will result in a garbled extent if the operation is … … 6706 6233 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname); 6707 6234 } 6708 6709 6235 /* As the final step update the in-memory copy of the GDs. */ 6710 6236 pExtent->pGD[uGDIndex] = uGTSector; … … 6712 6238 pExtent->pRGD[uGDIndex] = uRGTSector; 6713 6239 } 6714 6715 6240 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6716 6241 pGrainAlloc->uGTSector = uGTSector; 6717 6242 pGrainAlloc->uRGTSector = uRGTSector; 6718 6719 6243 uFileOffset = pExtent->uAppendPosition; 6720 6244 if (!uFileOffset) 6721 6245 return VERR_INTERNAL_ERROR; 6722 6246 Assert(!(uFileOffset % 512)); 6723 6724 6247 pGrainAlloc->uGrainOffset = uFileOffset; 6725 6726 6248 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6727 6249 { … … 6729 6251 ("Accesses to stream optimized images must be synchronous\n"), 6730 6252 VERR_INVALID_STATE); 6731 6732 6253 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6733 6254 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname); 6734 6735 6255 /* Invalidate cache, just in case some code incorrectly allows mixing 6736 6256 * of reads and writes. Normally shouldn't be needed. */ 6737 6257 pExtent->uGrainSectorAbs = 0; 6738 6739 6258 /* Write compressed data block and the markers. */ 6740 6259 uint32_t cbGrain = 0; … … 6742 6261 RTSGSEG Segment; 6743 6262 unsigned cSegments = 1; 6744 6745 6263 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6746 6264 &cSegments, cbWrite); 6747 6265 Assert(cbSeg == cbWrite); 6748 6749 6266 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, 6750 6267 Segment.pvSeg, cbWrite, uSector, &cbGrain); … … 6767 6284 else if (RT_FAILURE(rc)) 6768 6285 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname); 6769 6770 6286 pExtent->uAppendPosition += cbWrite; 6771 6287 } 6772 6773 6288 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc); 6774 6775 6289 if (!pGrainAlloc->cIoXfersPending) 6776 6290 { … … 6778 6292 RTMemFree(pGrainAlloc); 6779 6293 } 6780 6781 6294 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6782 6783 6295 return rc; 6784 6296 } 6785 6786 6297 /** 6787 6298 * Internal. Reads the contents by sequentially going over the compressed … … 6793 6304 { 6794 6305 int rc; 6795 6796 6306 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n", 6797 6307 pImage, pExtent, uSector, pIoCtx, cbRead)); 6798 6799 6308 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 6800 6309 ("Async I/O not supported for sequential stream optimized images\n"), 6801 6310 VERR_INVALID_STATE); 6802 6803 6311 /* Do not allow to go back. */ 6804 6312 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain; … … 6806 6314 return VERR_VD_VMDK_INVALID_STATE; 6807 6315 pExtent->uLastGrainAccess = uGrain; 6808 6809 6316 /* After a previous error do not attempt to recover, as it would need 6810 6317 * seeking (in the general case backwards which is forbidden). */ 6811 6318 if (!pExtent->uGrainSectorAbs) 6812 6319 return VERR_VD_VMDK_INVALID_STATE; 6813 6814 6320 /* Check if we need to read something from the image or if what we have 6815 6321 * in the buffer is good to fulfill the request. */ … … 6818 6324 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs 6819 6325 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead); 6820 6821 6326 /* Get the marker from the next data block - and skip everything which 6822 6327 * is not a compressed grain. If it's a compressed grain which is for … … 6833 6338 Marker.uSector = RT_LE2H_U64(Marker.uSector); 6834 6339 Marker.cbSize = RT_LE2H_U32(Marker.cbSize); 6835 6836 6340 if (Marker.cbSize == 0) 6837 6341 { … … 6912 6416 } 6913 6417 } while (Marker.uType != VMDK_MARKER_EOS); 6914 6915 6418 pExtent->uGrainSectorAbs = uGrainSectorAbs; 6916 6917 6419 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS) 6918 6420 { … … 6923 6425 } 6924 6426 } 6925 6926 6427 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain) 6927 6428 { … … 6931 6432 return VERR_VD_BLOCK_FREE; 6932 6433 } 6933 6934 6434 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain; 6935 6435 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, … … 6939 6439 return VINF_SUCCESS; 6940 6440 } 6941 6942 6441 /** 6943 6442 * Replaces a fragment of a string with the specified string. … … 7008 6507 return pszNewStr; 7009 6508 } 7010 7011 7012 6509 /** @copydoc VDIMAGEBACKEND::pfnProbe */ 7013 6510 static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk, … … 7037 6534 vmdkFreeImage(pImage, false, false /*fFlush*/); 7038 6535 RTMemFree(pImage); 7039 7040 6536 if (RT_SUCCESS(rc)) 7041 6537 *penmType = VDTYPE_HDD; … … 7043 6539 else 7044 6540 rc = VERR_NO_MEMORY; 7045 7046 6541 LogFlowFunc(("returns %Rrc\n", rc)); 7047 6542 return rc; 7048 6543 } 7049 7050 6544 /** @copydoc VDIMAGEBACKEND::pfnOpen */ 7051 6545 static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags, … … 7054 6548 { 7055 6549 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */ 7056 7057 6550 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", 7058 6551 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 7059 6552 int rc; 7060 7061 6553 /* Check open flags. All valid flags are supported. */ 7062 6554 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 7063 6555 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER); 7064 6556 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 7065 7066 6557 7067 6558 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); … … 7076 6567 pImage->pVDIfsDisk = pVDIfsDisk; 7077 6568 pImage->pVDIfsImage = pVDIfsImage; 7078 7079 6569 rc = vmdkOpenImage(pImage, uOpenFlags); 7080 6570 if (RT_SUCCESS(rc)) … … 7085 6575 else 7086 6576 rc = VERR_NO_MEMORY; 7087 7088 6577 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 7089 6578 return rc; 7090 6579 } 7091 7092 6580 /** @copydoc VDIMAGEBACKEND::pfnCreate */ 7093 6581 static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize, … … 7103 6591 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); 7104 6592 int rc; 7105 7106 6593 /* Check the VD container type and image flags. */ 7107 6594 if ( enmType != VDTYPE_HDD 7108 6595 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 7109 6596 return VERR_VD_INVALID_TYPE; 7110 7111 6597 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */ 7112 6598 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) … … 7114 6600 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))) 7115 6601 return VERR_VD_INVALID_SIZE; 7116 7117 6602 /* Check image flags for invalid combinations. */ 7118 6603 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 7119 6604 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))) 7120 6605 return VERR_INVALID_PARAMETER; 7121 7122 6606 /* Check open flags. All valid flags are supported. */ 7123 6607 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); … … 7129 6613 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)), 7130 6614 VERR_INVALID_PARAMETER); 7131 7132 6615 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); 7133 6616 if (RT_LIKELY(pImage)) 7134 6617 { 7135 6618 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 7136 7137 6619 pImage->pszFilename = pszFilename; 7138 6620 pImage->pFile = NULL; … … 7165 6647 rc = vmdkOpenImage(pImage, uOpenFlags); 7166 6648 } 7167 7168 6649 if (RT_SUCCESS(rc)) 7169 6650 *ppBackendData = pImage; 7170 6651 } 7171 7172 6652 if (RT_FAILURE(rc)) 7173 6653 RTMemFree(pImage->pDescData); … … 7175 6655 else 7176 6656 rc = VERR_NO_MEMORY; 7177 7178 6657 if (RT_FAILURE(rc)) 7179 6658 RTMemFree(pImage); … … 7181 6660 else 7182 6661 rc = VERR_NO_MEMORY; 7183 7184 6662 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 7185 6663 return rc; 7186 6664 } 7187 7188 6665 /** 7189 6666 * Prepares the state for renaming a VMDK image, setting up the state and allocating … … 7198 6675 { 7199 6676 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER); 7200 7201 6677 int rc = VINF_SUCCESS; 7202 7203 6678 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy)); 7204 7205 6679 /* 7206 6680 * Allocate an array to store both old and new names of renamed files … … 7228 6702 pRenameState->fEmbeddedDesc = true; 7229 6703 } 7230 7231 6704 /* Save the descriptor content. */ 7232 6705 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines; … … 7240 6713 } 7241 6714 } 7242 7243 6715 if (RT_SUCCESS(rc)) 7244 6716 { … … 7247 6719 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY); 7248 6720 RTPathStripSuffix(pRenameState->pszNewBaseName); 7249 7250 6721 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename)); 7251 6722 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY); 7252 6723 RTPathStripSuffix(pRenameState->pszOldBaseName); 7253 7254 6724 /* Prepare both old and new full names used for string replacement. 7255 6725 Note! Must abspath the stuff here, so the strstr weirdness later in … … 7259 6729 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY); 7260 6730 RTPathStripSuffix(pRenameState->pszNewFullName); 7261 7262 6731 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename); 7263 6732 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY); 7264 6733 RTPathStripSuffix(pRenameState->pszOldFullName); 7265 7266 6734 /* Save the old name for easy access to the old descriptor file. */ 7267 6735 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename); 7268 6736 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY); 7269 7270 6737 /* Save old image name. */ 7271 6738 pRenameState->pszOldImageName = pImage->pszFilename; … … 7274 6741 else 7275 6742 rc = VERR_NO_TMP_MEMORY; 7276 7277 6743 return rc; 7278 6744 } 7279 7280 6745 /** 7281 6746 * Destroys the given rename state, freeing all allocated memory. … … 7321 6786 RTStrFree(pRenameState->pszNewFullName); 7322 6787 } 7323 7324 6788 /** 7325 6789 * Rolls back the rename operation to the original state. … … 7332 6796 { 7333 6797 int rc = VINF_SUCCESS; 7334 7335 6798 if (!pRenameState->fImageFreed) 7336 6799 { … … 7341 6804 vmdkFreeImage(pImage, false, true /*fFlush*/); 7342 6805 } 7343 7344 6806 /* Rename files back. */ 7345 6807 for (unsigned i = 0; i <= pRenameState->cExtents; i++) … … 7380 6842 pImage->pszFilename = pRenameState->pszOldImageName; 7381 6843 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); 7382 7383 6844 return rc; 7384 6845 } 7385 7386 6846 /** 7387 6847 * Rename worker doing the real work. … … 7396 6856 int rc = VINF_SUCCESS; 7397 6857 unsigned i, line; 7398 7399 6858 /* Update the descriptor with modified extent names. */ 7400 6859 for (i = 0, line = pImage->Descriptor.uFirstExtent; … … 7413 6872 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i]; 7414 6873 } 7415 7416 6874 if (RT_SUCCESS(rc)) 7417 6875 { … … 7420 6878 /* Flush the descriptor now, in case it is embedded. */ 7421 6879 vmdkFlushImage(pImage, NULL); 7422 7423 6880 /* Close and rename/move extents. */ 7424 6881 for (i = 0; i < pRenameState->cExtents; i++) … … 7438 6895 if (RT_FAILURE(rc)) 7439 6896 break;; 7440 7441 6897 /* Rename the extent file. */ 7442 6898 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0); … … 7446 6902 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname); 7447 6903 } 7448 7449 6904 if (RT_SUCCESS(rc)) 7450 6905 { … … 7454 6909 { 7455 6910 pRenameState->fImageFreed = true; 7456 7457 6911 /* Last elements of new/old name arrays are intended for 7458 6912 * storing descriptor's names. … … 7469 6923 } 7470 6924 } 7471 7472 6925 /* Update pImage with the new information. */ 7473 6926 pImage->pszFilename = pszFilename; 7474 7475 6927 /* Open the new image. */ 7476 6928 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); … … 7478 6930 } 7479 6931 } 7480 7481 6932 return rc; 7482 6933 } 7483 7484 6934 /** @copydoc VDIMAGEBACKEND::pfnRename */ 7485 6935 static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename) 7486 6936 { 7487 6937 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename)); 7488 7489 6938 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7490 6939 VMDKRENAMESTATE RenameState; 7491 7492 6940 memset(&RenameState, 0, sizeof(RenameState)); 7493 7494 6941 /* Check arguments. */ 7495 6942 AssertPtrReturn(pImage, VERR_INVALID_POINTER); … … 7497 6944 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 7498 6945 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER); 7499 7500 6946 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename); 7501 6947 if (RT_SUCCESS(rc)) 7502 6948 { 7503 6949 /* --- Up to this point we have not done any damage yet. --- */ 7504 7505 6950 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename); 7506 6951 /* Roll back all changes in case of failure. */ … … 7511 6956 } 7512 6957 } 7513 7514 6958 vmdkRenameStateDestroy(&RenameState); 7515 6959 LogFlowFunc(("returns %Rrc\n", rc)); 7516 6960 return rc; 7517 6961 } 7518 7519 6962 /** @copydoc VDIMAGEBACKEND::pfnClose */ 7520 6963 static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete) … … 7522 6965 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete)); 7523 6966 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7524 7525 6967 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/); 7526 6968 RTMemFree(pImage); 7527 7528 6969 LogFlowFunc(("returns %Rrc\n", rc)); 7529 6970 return rc; 7530 6971 } 7531 7532 6972 /** @copydoc VDIMAGEBACKEND::pfnRead */ 7533 6973 static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead, … … 7537 6977 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead)); 7538 6978 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7539 7540 6979 AssertPtr(pImage); 7541 6980 Assert(uOffset % 512 == 0); … … 7544 6983 AssertReturn(cbToRead, VERR_INVALID_PARAMETER); 7545 6984 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER); 7546 7547 6985 /* Find the extent and check access permissions as defined in the extent descriptor. */ 7548 6986 PVMDKEXTENT pExtent; … … 7555 6993 /* Clip read range to remain in this extent. */ 7556 6994 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 7557 7558 6995 /* Handle the read according to the current extent type. */ 7559 6996 switch (pExtent->enmType) … … 7562 6999 { 7563 7000 uint64_t uSectorExtentAbs; 7564 7565 7001 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 7566 7002 if (RT_FAILURE(rc)) … … 7586 7022 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 7587 7023 ("Async I/O is not supported for stream optimized VMDK's\n")); 7588 7589 7024 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 7590 7025 uSectorExtentAbs -= uSectorInGrain; … … 7627 7062 { 7628 7063 size_t cbSet; 7629 7630 7064 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 7631 7065 Assert(cbSet == cbToRead); … … 7638 7072 else if (RT_SUCCESS(rc)) 7639 7073 rc = VERR_VD_VMDK_INVALID_STATE; 7640 7641 7074 LogFlowFunc(("returns %Rrc\n", rc)); 7642 7075 return rc; 7643 7076 } 7644 7645 7077 /** @copydoc VDIMAGEBACKEND::pfnWrite */ 7646 7078 static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite, … … 7652 7084 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7653 7085 int rc; 7654 7655 7086 AssertPtr(pImage); 7656 7087 Assert(uOffset % 512 == 0); … … 7658 7089 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER); 7659 7090 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER); 7660 7661 7091 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7662 7092 { … … 7664 7094 uint64_t uSectorExtentRel; 7665 7095 uint64_t uSectorExtentAbs; 7666 7667 7096 /* No size check here, will do that later when the extent is located. 7668 7097 * There are sparse images out there which according to the spec are … … 7671 7100 * grain boundaries, and with the nominal size not being a multiple of the 7672 7101 * grain size), this would prevent writing to the last grain. */ 7673 7674 7102 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 7675 7103 &pExtent, &uSectorExtentRel); … … 7769 7197 } 7770 7198 } 7771 7772 7199 if (pcbWriteProcess) 7773 7200 *pcbWriteProcess = cbToWrite; … … 7776 7203 else 7777 7204 rc = VERR_VD_IMAGE_READ_ONLY; 7778 7779 7205 LogFlowFunc(("returns %Rrc\n", rc)); 7780 7206 return rc; 7781 7207 } 7782 7783 7208 /** @copydoc VDIMAGEBACKEND::pfnFlush */ 7784 7209 static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx) 7785 7210 { 7786 7211 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7787 7788 7212 return vmdkFlushImage(pImage, pIoCtx); 7789 7213 } 7790 7791 7214 /** @copydoc VDIMAGEBACKEND::pfnGetVersion */ 7792 7215 static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData) … … 7794 7217 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7795 7218 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7796 7797 7219 AssertPtrReturn(pImage, 0); 7798 7799 7220 return VMDK_IMAGE_VERSION; 7800 7221 } 7801 7802 7222 /** @copydoc VDIMAGEBACKEND::pfnGetFileSize */ 7803 7223 static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData) … … 7806 7226 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7807 7227 uint64_t cb = 0; 7808 7809 7228 AssertPtrReturn(pImage, 0); 7810 7811 7229 if (pImage->pFile != NULL) 7812 7230 { … … 7826 7244 } 7827 7245 } 7828 7829 7246 LogFlowFunc(("returns %lld\n", cb)); 7830 7247 return cb; 7831 7248 } 7832 7833 7249 /** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */ 7834 7250 static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry) … … 7837 7253 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7838 7254 int rc = VINF_SUCCESS; 7839 7840 7255 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7841 7842 7256 if (pImage->PCHSGeometry.cCylinders) 7843 7257 *pPCHSGeometry = pImage->PCHSGeometry; 7844 7258 else 7845 7259 rc = VERR_VD_GEOMETRY_NOT_SET; 7846 7847 7260 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 7848 7261 return rc; 7849 7262 } 7850 7851 7263 /** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */ 7852 7264 static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry) … … 7856 7268 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7857 7269 int rc = VINF_SUCCESS; 7858 7859 7270 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7860 7861 7271 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7862 7272 { … … 7872 7282 else 7873 7283 rc = VERR_VD_IMAGE_READ_ONLY; 7874 7875 7284 LogFlowFunc(("returns %Rrc\n", rc)); 7876 7285 return rc; 7877 7286 } 7878 7879 7287 /** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */ 7880 7288 static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry) … … 7883 7291 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7884 7292 int rc = VINF_SUCCESS; 7885 7886 7293 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7887 7888 7294 if (pImage->LCHSGeometry.cCylinders) 7889 7295 *pLCHSGeometry = pImage->LCHSGeometry; 7890 7296 else 7891 7297 rc = VERR_VD_GEOMETRY_NOT_SET; 7892 7893 7298 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 7894 7299 return rc; 7895 7300 } 7896 7897 7301 /** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */ 7898 7302 static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry) … … 7902 7306 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7903 7307 int rc = VINF_SUCCESS; 7904 7905 7308 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7906 7907 7309 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7908 7310 { … … 7918 7320 else 7919 7321 rc = VERR_VD_IMAGE_READ_ONLY; 7920 7921 7322 LogFlowFunc(("returns %Rrc\n", rc)); 7922 7323 return rc; 7923 7324 } 7924 7925 7325 /** @copydoc VDIMAGEBACKEND::pfnQueryRegions */ 7926 7326 static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList) … … 7928 7328 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList)); 7929 7329 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7930 7931 7330 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED); 7932 7933 7331 *ppRegionList = &pThis->RegionList; 7934 7332 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS)); 7935 7333 return VINF_SUCCESS; 7936 7334 } 7937 7938 7335 /** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */ 7939 7336 static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList) … … 7943 7340 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7944 7341 AssertPtr(pThis); RT_NOREF(pThis); 7945 7946 7342 /* Nothing to do here. */ 7947 7343 } 7948 7949 7344 /** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */ 7950 7345 static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData) … … 7952 7347 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7953 7348 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7954 7955 7349 AssertPtrReturn(pImage, 0); 7956 7957 7350 LogFlowFunc(("returns %#x\n", pImage->uImageFlags)); 7958 7351 return pImage->uImageFlags; 7959 7352 } 7960 7961 7353 /** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */ 7962 7354 static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData) … … 7964 7356 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7965 7357 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7966 7967 7358 AssertPtrReturn(pImage, 0); 7968 7969 7359 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags)); 7970 7360 return pImage->uOpenFlags; 7971 7361 } 7972 7973 7362 /** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */ 7974 7363 static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags) … … 7977 7366 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7978 7367 int rc; 7979 7980 7368 /* Image must be opened and the new flags must be valid. */ 7981 7369 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO … … 8000 7388 } 8001 7389 } 8002 8003 7390 LogFlowFunc(("returns %Rrc\n", rc)); 8004 7391 return rc; 8005 7392 } 8006 8007 7393 /** @copydoc VDIMAGEBACKEND::pfnGetComment */ 8008 7394 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment) … … 8010 7396 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment)); 8011 7397 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8012 8013 7398 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8014 8015 7399 char *pszCommentEncoded = NULL; 8016 7400 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor, … … 8021 7405 rc = VINF_SUCCESS; 8022 7406 } 8023 8024 7407 if (RT_SUCCESS(rc)) 8025 7408 { … … 8028 7411 else if (pszComment) 8029 7412 *pszComment = '\0'; 8030 8031 7413 if (pszCommentEncoded) 8032 7414 RTMemTmpFree(pszCommentEncoded); 8033 7415 } 8034 8035 7416 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment)); 8036 7417 return rc; 8037 7418 } 8038 8039 7419 /** @copydoc VDIMAGEBACKEND::pfnSetComment */ 8040 7420 static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment) … … 8043 7423 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8044 7424 int rc; 8045 8046 7425 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8047 8048 7426 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8049 7427 { … … 8055 7433 else 8056 7434 rc = VERR_VD_IMAGE_READ_ONLY; 8057 8058 7435 LogFlowFunc(("returns %Rrc\n", rc)); 8059 7436 return rc; 8060 7437 } 8061 8062 7438 /** @copydoc VDIMAGEBACKEND::pfnGetUuid */ 8063 7439 static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid) … … 8065 7441 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 8066 7442 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8067 8068 7443 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8069 8070 7444 *pUuid = pImage->ImageUuid; 8071 8072 7445 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 8073 7446 return VINF_SUCCESS; 8074 7447 } 8075 8076 7448 /** @copydoc VDIMAGEBACKEND::pfnSetUuid */ 8077 7449 static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid) … … 8080 7452 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8081 7453 int rc = VINF_SUCCESS; 8082 8083 7454 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8084 8085 7455 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8086 7456 { … … 8099 7469 else 8100 7470 rc = VERR_VD_IMAGE_READ_ONLY; 8101 8102 7471 LogFlowFunc(("returns %Rrc\n", rc)); 8103 7472 return rc; 8104 7473 } 8105 8106 7474 /** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */ 8107 7475 static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid) … … 8109 7477 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 8110 7478 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8111 8112 7479 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8113 8114 7480 *pUuid = pImage->ModificationUuid; 8115 8116 7481 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 8117 7482 return VINF_SUCCESS; 8118 7483 } 8119 8120 7484 /** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */ 8121 7485 static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 8124 7488 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8125 7489 int rc = VINF_SUCCESS; 8126 8127 7490 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8128 8129 7491 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8130 7492 { … … 8146 7508 else 8147 7509 rc = VERR_VD_IMAGE_READ_ONLY; 8148 8149 7510 LogFlowFunc(("returns %Rrc\n", rc)); 8150 7511 return rc; 8151 7512 } 8152 8153 7513 /** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */ 8154 7514 static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid) … … 8156 7516 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 8157 7517 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8158 8159 7518 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8160 8161 7519 *pUuid = pImage->ParentUuid; 8162 8163 7520 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 8164 7521 return VINF_SUCCESS; 8165 7522 } 8166 8167 7523 /** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */ 8168 7524 static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid) … … 8171 7527 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8172 7528 int rc = VINF_SUCCESS; 8173 8174 7529 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8175 8176 7530 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8177 7531 { … … 8190 7544 else 8191 7545 rc = VERR_VD_IMAGE_READ_ONLY; 8192 8193 7546 LogFlowFunc(("returns %Rrc\n", rc)); 8194 7547 return rc; 8195 7548 } 8196 8197 7549 /** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */ 8198 7550 static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid) … … 8200 7552 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 8201 7553 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8202 8203 7554 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8204 8205 7555 *pUuid = pImage->ParentModificationUuid; 8206 8207 7556 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 8208 7557 return VINF_SUCCESS; 8209 7558 } 8210 8211 7559 /** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */ 8212 7560 static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 8215 7563 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8216 7564 int rc = VINF_SUCCESS; 8217 8218 7565 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8219 8220 7566 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8221 7567 { … … 8233 7579 else 8234 7580 rc = VERR_VD_IMAGE_READ_ONLY; 8235 8236 7581 LogFlowFunc(("returns %Rrc\n", rc)); 8237 7582 return rc; 8238 7583 } 8239 8240 7584 /** @copydoc VDIMAGEBACKEND::pfnDump */ 8241 7585 static DECLCALLBACK(void) vmdkDump(void *pBackendData) 8242 7586 { 8243 7587 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8244 8245 7588 AssertPtrReturnVoid(pImage); 8246 7589 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", … … 8254 7597 } 8255 7598 8256 static int vmdkRepaceExtentSize(PVMDKIMAGE pImage, unsigned line, uint64_t cSectorsOld, 8257 uint64_t cSectorsNew) 8258 { 8259 char * szOldExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE); 8260 if (!szOldExtentSectors) 8261 return VERR_NO_MEMORY; 8262 8263 int cbWritten = RTStrPrintf2(szOldExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsOld); 8264 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE) 8265 { 8266 RTMemFree(szOldExtentSectors); 8267 szOldExtentSectors = NULL; 8268 7599 /** 7600 * Returns the size, in bytes, of the sparse extent overhead for 7601 * the number of desired total sectors and based on the current 7602 * sectors of the extent. 7603 * 7604 * @returns uint64_t size of new overhead in bytes. 7605 * @param pExtent VMDK extent instance. 7606 * @param cSectorsNew Number of desired total sectors. 7607 */ 7608 static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew) 7609 { 7610 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE; 7611 if (cSectorsNew % pExtent->cSectorsPerGDE) 7612 cNewDirEntries++; 7613 7614 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t); 7615 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512); 7616 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512); 7617 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector 7618 + pExtent->cDescriptorSectors, 1) 7619 + cbNewDirSize + cbNewAllTablesSize, 512); 7620 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize; 7621 cbNewOverhead = RT_ALIGN_64(cbNewOverhead, 7622 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7623 7624 return cbNewOverhead; 7625 } 7626 7627 /** 7628 * Internal: Replaces the size (in sectors) of an extent in the descriptor file. 7629 * 7630 * @returns VBox status code. 7631 * @param pImage VMDK image instance. 7632 * @param uLine Line number of descriptor to change. 7633 * @param cSectorsOld Existing number of sectors. 7634 * @param cSectorsNew New number of sectors. 7635 */ 7636 static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld, 7637 uint64_t cSectorsNew) 7638 { 7639 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE]; 7640 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE]; 7641 7642 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld); 7643 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors)) 8269 7644 return VERR_BUFFER_OVERFLOW; 8270 } 8271 8272 char * szNewExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE); 8273 if (!szNewExtentSectors) 8274 return VERR_NO_MEMORY; 8275 8276 cbWritten = RTStrPrintf2(szNewExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsNew); 8277 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE) 8278 { 8279 RTMemFree(szOldExtentSectors); 8280 szOldExtentSectors = NULL; 8281 8282 RTMemFree(szNewExtentSectors); 8283 szNewExtentSectors = NULL; 8284 7645 7646 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew); 7647 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors)) 8285 7648 return VERR_BUFFER_OVERFLOW; 8286 } 8287 8288 char * szNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[line], 7649 7650 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine], 8289 7651 szOldExtentSectors, 8290 7652 szNewExtentSectors); 8291 7653 8292 RTMemFree(szOldExtentSectors); 8293 szOldExtentSectors = NULL; 8294 8295 RTMemFree(szNewExtentSectors); 8296 szNewExtentSectors = NULL; 8297 8298 if (!szNewExtentLine) 7654 if (RT_UNLIKELY(!pszNewExtentLine)) 8299 7655 return VERR_INVALID_PARAMETER; 8300 7656 8301 pImage->Descriptor.aLines[line] = szNewExtentLine; 7657 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine); 7658 vmdkDescExtInsert(pImage, &pImage->Descriptor, 7659 pExtent->enmAccess, cSectorsNew, 7660 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset); 7661 7662 RTStrFree(pszNewExtentLine); 7663 pszNewExtentLine = NULL; 7664 7665 pImage->Descriptor.fDirty = true; 8302 7666 8303 7667 return VINF_SUCCESS; 7668 } 7669 7670 /** 7671 * Moves sectors down to make room for new overhead. 7672 * Used for sparse extent resize. 7673 * 7674 * @returns VBox status code. 7675 * @param pImage VMDK image instance. 7676 * @param pExtent VMDK extent instance. 7677 * @param cSectorsNew Number of sectors after resize. 7678 */ 7679 static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 7680 uint64_t cSectorsNew) 7681 { 7682 int rc = VINF_SUCCESS; 7683 7684 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew); 7685 7686 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead); 7687 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors; 7688 7689 uint64_t cbFile = 0; 7690 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile); 7691 7692 uint64_t uNewAppendPosition; 7693 7694 /* Calculate how many sectors need to be relocated. */ 7695 unsigned cSectorsReloc = cOverheadSectorDiff; 7696 if (cbNewOverhead % VMDK_SECTOR_SIZE) 7697 cSectorsReloc++; 7698 7699 if (cSectorsReloc < pExtent->cSectors) 7700 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512); 7701 else 7702 uNewAppendPosition = cbFile; 7703 7704 /* 7705 * Get the blocks we need to relocate first, they are appended to the end 7706 * of the image. 7707 */ 7708 void *pvBuf = NULL, *pvZero = NULL; 7709 do 7710 { 7711 /* Allocate data buffer. */ 7712 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7713 if (!pvBuf) 7714 { 7715 rc = VERR_NO_MEMORY; 7716 break; 7717 } 7718 7719 /* Allocate buffer for overwriting with zeroes. */ 7720 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7721 if (!pvZero) 7722 { 7723 RTMemFree(pvBuf); 7724 pvBuf = NULL; 7725 7726 rc = VERR_NO_MEMORY; 7727 break; 7728 } 7729 7730 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries); 7731 if(!aGTDataTmp) 7732 { 7733 RTMemFree(pvBuf); 7734 pvBuf = NULL; 7735 7736 RTMemFree(pvZero); 7737 pvZero = NULL; 7738 7739 rc = VERR_NO_MEMORY; 7740 break; 7741 } 7742 7743 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries); 7744 if(!aRGTDataTmp) 7745 { 7746 RTMemFree(pvBuf); 7747 pvBuf = NULL; 7748 7749 RTMemFree(pvZero); 7750 pvZero = NULL; 7751 7752 RTMemFree(aGTDataTmp); 7753 aGTDataTmp = NULL; 7754 7755 rc = VERR_NO_MEMORY; 7756 break; 7757 } 7758 7759 /* Search for overlap sector in the grain table. */ 7760 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++) 7761 { 7762 uint64_t uGTSector = pExtent->pGD[idxGD]; 7763 uint64_t uRGTSector = pExtent->pRGD[idxGD]; 7764 7765 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7766 VMDK_SECTOR2BYTE(uGTSector), 7767 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries); 7768 7769 if (RT_FAILURE(rc)) 7770 break; 7771 7772 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7773 VMDK_SECTOR2BYTE(uRGTSector), 7774 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries); 7775 7776 if (RT_FAILURE(rc)) 7777 break; 7778 7779 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++) 7780 { 7781 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]); 7782 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]); 7783 7784 /** 7785 * Check if grain table is valid. If not dump out with an error. 7786 * Shoudln't ever get here (given other checks) but good sanity check. 7787 */ 7788 if (aGTEntryLE != aRGTEntryLE) 7789 { 7790 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 7791 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname); 7792 break; 7793 } 7794 7795 if (aGTEntryLE < cNewOverheadSectors 7796 && aGTEntryLE != 0) 7797 { 7798 /* Read data and append grain to the end of the image. */ 7799 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7800 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf, 7801 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7802 if (RT_FAILURE(rc)) 7803 break; 7804 7805 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7806 uNewAppendPosition, pvBuf, 7807 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7808 if (RT_FAILURE(rc)) 7809 break; 7810 7811 /* Zero out the old block area. */ 7812 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7813 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero, 7814 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7815 if (RT_FAILURE(rc)) 7816 break; 7817 7818 /* Write updated grain tables to file */ 7819 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition); 7820 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition); 7821 7822 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries)) 7823 { 7824 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 7825 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname); 7826 break; 7827 } 7828 7829 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7830 VMDK_SECTOR2BYTE(uGTSector), 7831 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries); 7832 7833 if (RT_FAILURE(rc)) 7834 break; 7835 7836 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7837 VMDK_SECTOR2BYTE(uRGTSector), 7838 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries); 7839 7840 break; 7841 } 7842 } 7843 } 7844 7845 RTMemFree(aGTDataTmp); 7846 aGTDataTmp = NULL; 7847 7848 RTMemFree(aRGTDataTmp); 7849 aRGTDataTmp = NULL; 7850 7851 if (RT_FAILURE(rc)) 7852 break; 7853 7854 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain); 7855 } while (0); 7856 7857 if (pvBuf) 7858 { 7859 RTMemFree(pvBuf); 7860 pvBuf = NULL; 7861 } 7862 7863 if (pvZero) 7864 { 7865 RTMemFree(pvZero); 7866 pvZero = NULL; 7867 } 7868 7869 // Update append position for extent 7870 pExtent->uAppendPosition = uNewAppendPosition; 7871 7872 return rc; 7873 } 7874 7875 /** 7876 * Resizes meta/overhead for sparse extent resize. 7877 * 7878 * @returns VBox status code. 7879 * @param pImage VMDK image instance. 7880 * @param pExtent VMDK extent instance. 7881 * @param cSectorsNew Number of sectors after resize. 7882 */ 7883 static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 7884 uint64_t cSectorsNew) 7885 { 7886 int rc = VINF_SUCCESS; 7887 uint32_t cOldGDEntries = pExtent->cGDEntries; 7888 7889 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE; 7890 if (cSectorsNew % pExtent->cSectorsPerGDE) 7891 cNewDirEntries++; 7892 7893 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t); 7894 7895 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512); 7896 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512); 7897 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize); 7898 7899 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512); 7900 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512); 7901 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize); 7902 7903 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew); 7904 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead); 7905 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors; 7906 7907 /* 7908 * Get the blocks we need to relocate first, they are appended to the end 7909 * of the image. 7910 */ 7911 void *pvBuf = NULL, *pvZero = NULL; 7912 7913 do 7914 { 7915 /* Allocate data buffer. */ 7916 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE); 7917 if (!pvBuf) 7918 { 7919 rc = VERR_NO_MEMORY; 7920 break; 7921 } 7922 7923 /* Allocate buffer for overwriting with zeroes. */ 7924 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE); 7925 if (!pvZero) 7926 { 7927 RTMemFree(pvBuf); 7928 pvBuf = NULL; 7929 7930 rc = VERR_NO_MEMORY; 7931 break; 7932 } 7933 7934 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 7935 7936 // points to last element in the grain table 7937 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE; 7938 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512); 7939 7940 for (int i = pExtent->cGDEntries - 1; i >= 0; i--) 7941 { 7942 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7943 uGTTail, pvBuf, 7944 VMDK_GRAIN_TABLE_SIZE); 7945 if (RT_FAILURE(rc)) 7946 break; 7947 7948 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7949 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf, 7950 VMDK_GRAIN_TABLE_SIZE); 7951 if (RT_FAILURE(rc)) 7952 break; 7953 7954 // This overshoots when i == 0, but we don't need it anymore. 7955 uGTTail -= VMDK_GRAIN_TABLE_SIZE; 7956 } 7957 7958 7959 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */ 7960 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7961 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf, 7962 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 7963 if (RT_FAILURE(rc)) 7964 break; 7965 7966 int * tmpBuf = (int *)pvBuf; 7967 7968 for (uint32_t i = 0; i < pExtent->cGDEntries; i++) 7969 { 7970 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff); 7971 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff); 7972 } 7973 7974 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7975 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf, 7976 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 7977 if (RT_FAILURE(rc)) 7978 break; 7979 7980 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff; 7981 7982 /* Repeat both steps with the redundant grain table/directory. */ 7983 7984 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 7985 7986 // points to last element in the grain table 7987 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE; 7988 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512); 7989 7990 for (int i = pExtent->cGDEntries - 1; i >= 0; i--) 7991 { 7992 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 7993 uRGTTail, pvBuf, 7994 VMDK_GRAIN_TABLE_SIZE); 7995 if (RT_FAILURE(rc)) 7996 break; 7997 7998 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 7999 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf, 8000 VMDK_GRAIN_TABLE_SIZE); 8001 if (RT_FAILURE(rc)) 8002 break; 8003 8004 // This overshoots when i == 0, but we don't need it anymore. 8005 uRGTTail -= VMDK_GRAIN_TABLE_SIZE; 8006 } 8007 8008 /* Update locations of GT entries. */ 8009 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 8010 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf, 8011 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 8012 if (RT_FAILURE(rc)) 8013 break; 8014 8015 tmpBuf = (int *)pvBuf; 8016 8017 for (uint32_t i = 0; i < pExtent->cGDEntries; i++) 8018 { 8019 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff; 8020 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff; 8021 } 8022 8023 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 8024 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf, 8025 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 8026 if (RT_FAILURE(rc)) 8027 break; 8028 8029 pExtent->uSectorRGD = pExtent->uSectorRGD; 8030 pExtent->cOverheadSectors += cOverheadSectorDiff; 8031 8032 } while (0); 8033 8034 if (pvBuf) 8035 { 8036 RTMemFree(pvBuf); 8037 pvBuf = NULL; 8038 } 8039 8040 if (pvZero) 8041 { 8042 RTMemFree(pvZero); 8043 pvZero = NULL; 8044 } 8045 8046 pExtent->cGDEntries = cNewDirEntries; 8047 8048 /* Allocate buffer for overwriting with zeroes. */ 8049 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE); 8050 if (!pvZero) 8051 return VERR_NO_MEMORY; 8052 8053 // Allocate additional grain dir 8054 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD); 8055 if (RT_LIKELY(pExtent->pGD)) 8056 { 8057 if (pExtent->uSectorRGD) 8058 { 8059 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD); 8060 if (RT_UNLIKELY(!pExtent->pRGD)) 8061 rc = VERR_NO_MEMORY; 8062 } 8063 } 8064 else 8065 return VERR_NO_MEMORY; 8066 8067 8068 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE; 8069 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++) 8070 { 8071 pExtent->pGD[i] = uTmpDirVal; 8072 8073 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 8074 VMDK_SECTOR2BYTE(uTmpDirVal), pvZero, 8075 VMDK_GRAIN_TABLE_SIZE); 8076 8077 if (RT_FAILURE(rc)) 8078 return rc; 8079 8080 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE; 8081 } 8082 8083 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE; 8084 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++) 8085 { 8086 pExtent->pRGD[i] = uRTmpDirVal; 8087 8088 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 8089 VMDK_SECTOR2BYTE(uRTmpDirVal), pvZero, 8090 VMDK_GRAIN_TABLE_SIZE); 8091 8092 if (RT_FAILURE(rc)) 8093 return rc; 8094 8095 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE; 8096 } 8097 8098 RTMemFree(pvZero); 8099 pvZero = NULL; 8100 8101 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 8102 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD, 8103 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 8104 if (RT_FAILURE(rc)) 8105 return rc; 8106 8107 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, 8108 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD, 8109 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE); 8110 if (RT_FAILURE(rc)) 8111 return rc; 8112 8113 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent, 8114 pExtent->cNominalSectors, cSectorsNew); 8115 if (RT_FAILURE(rc)) 8116 return rc; 8117 8118 return rc; 8304 8119 } 8305 8120 … … 8318 8133 unsigned uImageFlags = pImage->uImageFlags; 8319 8134 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 8135 pExtent->fMetaDirty = true; 8320 8136 8321 8137 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */ … … 8338 8154 */ 8339 8155 /** @todo implement making the image smaller, it is the responsibility of 8340 * the user to know what he'sdoing. */8156 * the user to know what they're doing. */ 8341 8157 if (cbSize < pImage->cbSize) 8342 8158 rc = VERR_VD_SHRINK_NOT_SUPPORTED; … … 8358 8174 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 8359 8175 8360 rc = vmdkRep aceExtentSize(pImage, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);8176 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew); 8361 8177 if (RT_FAILURE(rc)) 8362 8178 return rc; … … 8375 8191 8376 8192 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld; 8193 8194 /** Space remaining in current last extent file that we don't need to create another one. */ 8377 8195 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)) 8378 8196 { … … 8384 8202 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 8385 8203 8386 rc = vmdkRep aceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,8387 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);8204 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1, 8205 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors); 8388 8206 if (RT_FAILURE(rc)) 8389 8207 return rc; 8390 8208 } 8209 //** Need more extent files to handle all the requested space. */ 8391 8210 else 8392 8211 { … … 8402 8221 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors; 8403 8222 8404 rc = vmdkRep aceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,8405 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));8223 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1, 8224 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)); 8406 8225 if (RT_FAILURE(rc)) 8407 8226 return rc; … … 8435 8254 } 8436 8255 8256 /** 8257 * monolithicSparse. 8258 */ 8259 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)) 8260 { 8261 // 1. Calculate sectors needed for new overhead. 8262 8263 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew); 8264 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead); 8265 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors; 8266 8267 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT 8268 if (cOverheadSectorDiff > 0) 8269 { 8270 if (pExtent->cSectors > 0) 8271 { 8272 /* Do the relocation. */ 8273 LogFlow(("Relocating VMDK sectors\n")); 8274 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew); 8275 if (RT_FAILURE(rc)) 8276 return rc; 8277 8278 rc = vmdkFlushImage(pImage, NULL); 8279 if (RT_FAILURE(rc)) 8280 return rc; 8281 } 8282 8283 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew); 8284 if (RT_FAILURE(rc)) 8285 return rc; 8286 } 8287 } 8288 8289 /** 8290 * twoGbSparseExtent 8291 */ 8292 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)) 8293 { 8294 /* Check to see how much space remains in last extent */ 8295 bool fSpaceAvailible = false; 8296 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE); 8297 if (cLastExtentRemSectors) 8298 fSpaceAvailible = true; 8299 8300 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld; 8301 8302 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)) 8303 { 8304 pExtent = &pImage->pExtents[cExtents - 1]; 8305 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors); 8306 if (RT_FAILURE(rc)) 8307 return rc; 8308 8309 rc = vmdkFlushImage(pImage, NULL); 8310 if (RT_FAILURE(rc)) 8311 return rc; 8312 8313 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors); 8314 if (RT_FAILURE(rc)) 8315 return rc; 8316 } 8317 else 8318 { 8319 if (fSpaceAvailible) 8320 { 8321 pExtent = &pImage->pExtents[cExtents - 1]; 8322 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)); 8323 if (RT_FAILURE(rc)) 8324 return rc; 8325 8326 rc = vmdkFlushImage(pImage, NULL); 8327 if (RT_FAILURE(rc)) 8328 return rc; 8329 8330 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)); 8331 if (RT_FAILURE(rc)) 8332 return rc; 8333 8334 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors; 8335 } 8336 8337 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE; 8338 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE) 8339 cNewExtents++; 8340 8341 for (unsigned i = cExtents; 8342 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE); 8343 i++) 8344 { 8345 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE); 8346 if (RT_FAILURE(rc)) 8347 return rc; 8348 8349 pExtent = &pImage->pExtents[i]; 8350 8351 rc = vmdkFlushImage(pImage, NULL); 8352 if (RT_FAILURE(rc)) 8353 return rc; 8354 8355 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE); 8356 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE); 8357 } 8358 8359 if (cSectorsNeeded) 8360 { 8361 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded)); 8362 if (RT_FAILURE(rc)) 8363 return rc; 8364 8365 pExtent = &pImage->pExtents[pImage->cExtents]; 8366 8367 rc = vmdkFlushImage(pImage, NULL); 8368 if (RT_FAILURE(rc)) 8369 return rc; 8370 } 8371 } 8372 } 8373 8437 8374 /* Successful resize. Update metadata */ 8438 8375 if (RT_SUCCESS(rc)) … … 8440 8377 /* Update size and new block count. */ 8441 8378 pImage->cbSize = cbSize; 8442 /** @todo r=jack: update cExtents if needed */8443 pExtent->c NominalSectors = VMDK_BYTE2SECTOR(cbSize);8379 pExtent->cNominalSectors = cSectorsNew; 8380 pExtent->cSectors = cSectorsNew; 8444 8381 8445 8382 /* Update geometry. */ … … 8449 8386 8450 8387 /* Update header information in base image file. */ 8388 pImage->Descriptor.fDirty = true; 8451 8389 rc = vmdkWriteDescriptor(pImage, NULL); 8452 8390 8453 if (RT_FAILURE(rc)) 8454 return rc; 8455 8456 rc = vmdkFlushImage(pImage, NULL); 8457 8458 if (RT_FAILURE(rc)) 8459 return rc; 8391 if (RT_SUCCESS(rc)) 8392 rc = vmdkFlushImage(pImage, NULL); 8460 8393 } 8461 8394 /* Same size doesn't change the image at all. */ … … 8464 8397 return rc; 8465 8398 } 8466 8467 8399 8468 8400 const VDIMAGEBACKEND g_VmdkBackend = -
trunk/src/VBox/Storage/testcase/tstVDIo.cpp
r96407 r97836 567 567 bool fBase = false; 568 568 bool fDynamic = true; 569 bool fSplit = false; 569 570 570 571 const char *pcszDisk = paScriptArgs[0].psz; … … 583 584 else if (!RTStrICmp(paScriptArgs[3].psz, "dynamic")) 584 585 fDynamic = true; 586 else if (!RTStrICmp(paScriptArgs[3].psz, "vmdk-dynamic-split")) 587 fSplit = true; 588 else if (!RTStrICmp(paScriptArgs[3].psz, "vmdk-fixed-split")) 589 { 590 fDynamic = false; 591 fSplit = true; 592 } 585 593 else 586 594 { … … 609 617 if (fHonorSame) 610 618 fOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME; 619 620 if (fSplit) 621 fImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G; 611 622 612 623 if (fBase) … … 3012 3023 return RTEXITCODE_SUCCESS; 3013 3024 } 3014 -
trunk/src/VBox/Storage/testcase/tstVDResize.vd
r96832 r97836 40 40 destroydisk("test"); 41 41 42 print("Testing VMDK Flat"); 43 createdisk("test-vmdk-flat", true); 44 create("test-vmdk-flat", "base", "test-vmdk-flat.vmdk", "fixed", "VMDK", 10G, false, false); 45 io("test-vmdk-flat", false, 1, "seq", 64K, 1G, 2G, 10G, 100, "none"); 46 resize("test-vmdk-flat", 20000M); 47 close("test-vmdk-flat", "single", true /* fDelete */); 48 destroydisk("test-vmdk-flat"); 42 print("Testing VMDK Monolithic Flat"); 43 createdisk("test-vmdk-mflat", true); 44 create("test-vmdk-mflat", "base", "test-vmdk-mflat.vmdk", "Fixed", "VMDK", 4G, false, false); 45 io("test-vmdk-mflat", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 46 resize("test-vmdk-mflat", 6000M); 47 io("test-vmdk-mflat", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 48 close("test-vmdk-mflat", "single", true /* fDelete */); 49 destroydisk("test-vmdk-mflat"); 50 51 print("Testing VMDK Split Flat"); 52 createdisk("test-vmdk-sflat", true); 53 create("test-vmdk-sflat", "base", "test-vmdk-sflat.vmdk", "vmdk-fixed-split", "VMDK", 4G, false, false); 54 io("test-vmdk-sflat", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 55 resize("test-vmdk-sflat", 6000M); 56 io("test-vmdk-sflat", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 57 close("test-vmdk-sflat", "single", true /* fDelete */); 58 destroydisk("test-vmdk-sflat"); 59 60 print("Testing VMDK Sparse"); 61 createdisk("test-vmdk-sparse", true); 62 create("test-vmdk-sparse", "base", "test-vmdk-sparse.vmdk", "Dynamic", "VMDK", 4G, false, false); 63 io("test-vmdk-sparse", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 64 resize("test-vmdk-sparse", 6000M); 65 io("test-vmdk-sparse", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 66 close("test-vmdk-sparse", "single", true /* fDelete */); 67 destroydisk("test-vmdk-sparse"); 68 69 print("Testing VMDK Sparse Split"); 70 createdisk("test-vmdk-sparse-split", true); 71 create("test-vmdk-sparse-split", "base", "test-vmdk-sparse-split.vmdk", "vmdk-dynamic-split", "VMDK", 4G, false, false); 72 io("test-vmdk-sparse-split", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 73 resize("test-vmdk-sparse-split", 6000M); 74 io("test-vmdk-sparse-split", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 75 close("test-vmdk-sparse-split", "single", true /* fDelete */); 76 destroydisk("test-vmdk-sparse-split"); 49 77 50 78 iorngdestroy(); 51 79 } 52
Note:
See TracChangeset
for help on using the changeset viewer.