Changeset 97839 in vbox for trunk/src/VBox/Storage
- Timestamp:
- Dec 20, 2022 9:48:25 AM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 154925
- Location:
- trunk/src/VBox/Storage
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Storage/VMDK.cpp
r97836 r97839 3 3 * VMDK disk image, core code. 4 4 */ 5 5 6 /* 6 7 * Copyright (C) 2006-2022 Oracle and/or its affiliates. … … 33 34 #include <VBox/vd-plugin.h> 34 35 #include <VBox/err.h> 36 35 37 #include <iprt/assert.h> 36 38 #include <iprt/alloc.h> … … 92 94 # define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83) 93 95 #endif /* RT_OS_DARWIN */ 96 94 97 #include "VDBackends.h" 95 98 … … 98 101 * Constants And Macros, Structures and Typedefs * 99 102 *********************************************************************************************************************************/ 103 100 104 /** Maximum encoded string size (including NUL) we allow for VMDK images. 101 105 * Deliberately not set high to avoid running out of descriptor space. */ 102 106 #define VMDK_ENCODED_COMMENT_MAX 1024 107 103 108 /** VMDK descriptor DDB entry for PCHS cylinders. */ 104 109 #define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders" 110 105 111 /** VMDK descriptor DDB entry for PCHS heads. */ 106 112 #define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads" 113 107 114 /** VMDK descriptor DDB entry for PCHS sectors. */ 108 115 #define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors" 116 109 117 /** VMDK descriptor DDB entry for LCHS cylinders. */ 110 118 #define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders" 119 111 120 /** VMDK descriptor DDB entry for LCHS heads. */ 112 121 #define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads" 122 113 123 /** VMDK descriptor DDB entry for LCHS sectors. */ 114 124 #define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors" 125 115 126 /** VMDK descriptor DDB entry for image UUID. */ 116 127 #define VMDK_DDB_IMAGE_UUID "ddb.uuid.image" 128 117 129 /** VMDK descriptor DDB entry for image modification UUID. */ 118 130 #define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification" 131 119 132 /** VMDK descriptor DDB entry for parent image UUID. */ 120 133 #define VMDK_DDB_PARENT_UUID "ddb.uuid.parent" 134 121 135 /** VMDK descriptor DDB entry for parent image modification UUID. */ 122 136 #define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification" 137 123 138 /** No compression for streamOptimized files. */ 124 139 #define VMDK_COMPRESSION_NONE 0 140 125 141 /** Deflate compression for streamOptimized files. */ 126 142 #define VMDK_COMPRESSION_DEFLATE 1 143 127 144 /** Marker that the actual GD value is stored in the footer. */ 128 145 #define VMDK_GD_AT_END 0xffffffffffffffffULL 146 129 147 /** Marker for end-of-stream in streamOptimized images. */ 130 148 #define VMDK_MARKER_EOS 0 149 131 150 /** Marker for grain table block in streamOptimized images. */ 132 151 #define VMDK_MARKER_GT 1 152 133 153 /** Marker for grain directory block in streamOptimized images. */ 134 154 #define VMDK_MARKER_GD 2 155 135 156 /** Marker for footer in streamOptimized images. */ 136 157 #define VMDK_MARKER_FOOTER 3 158 137 159 /** Marker for unknown purpose in streamOptimized images. 138 160 * Shows up in very recent images created by vSphere, but only sporadically. 139 161 * They "forgot" to document that one in the VMDK specification. */ 140 162 #define VMDK_MARKER_UNSPECIFIED 4 163 141 164 /** Dummy marker for "don't check the marker value". */ 142 165 #define VMDK_MARKER_IGNORE 0xffffffffU 166 143 167 /** 144 168 * Magic number for hosted images created by VMware Workstation 4, VMware … … 146 170 */ 147 171 #define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */ 172 148 173 /** VMDK sector size in bytes. */ 149 174 #define VMDK_SECTOR_SIZE 512 … … 154 179 /** Grain table size in bytes */ 155 180 #define VMDK_GRAIN_TABLE_SIZE 2048 181 156 182 /** 157 183 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as … … 181 207 } SparseExtentHeader; 182 208 #pragma pack() 209 183 210 /** The maximum allowed descriptor size in the extent header in sectors. */ 184 211 #define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */ 212 185 213 /** VMDK capacity for a single chunk when 2G splitting is turned on. Should be 186 214 * divisible by the default grain size (64K) */ 187 215 #define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024) 216 188 217 /** VMDK streamOptimized file format marker. The type field may or may not 189 218 * be actually valid, but there's always data to read there. */ … … 196 225 } VMDKMARKER, *PVMDKMARKER; 197 226 #pragma pack() 227 228 198 229 /** Convert sector number/size to byte offset/size. */ 199 230 #define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9) 231 200 232 /** Convert byte offset/size to sector number/size. */ 201 233 #define VMDK_BYTE2SECTOR(u) ((u) >> 9) 234 202 235 /** 203 236 * VMDK extent type. … … 214 247 VMDKETYPE_VMFS 215 248 } VMDKETYPE, *PVMDKETYPE; 249 216 250 /** 217 251 * VMDK access type for a extent. … … 226 260 VMDKACCESS_READWRITE 227 261 } VMDKACCESS, *PVMDKACCESS; 262 228 263 /** Forward declaration for PVMDKIMAGE. */ 229 264 typedef struct VMDKIMAGE *PVMDKIMAGE; 265 230 266 /** 231 267 * Extents files entry. Used for opening a particular file only once. … … 252 288 struct VMDKFILE *pPrev; 253 289 } VMDKFILE, *PVMDKFILE; 290 254 291 /** 255 292 * VMDK extent data structure. … … 332 369 struct VMDKIMAGE *pImage; 333 370 } VMDKEXTENT, *PVMDKEXTENT; 371 334 372 /** 335 373 * Grain table cache size. Allocated per image. 336 374 */ 337 375 #define VMDK_GT_CACHE_SIZE 256 376 338 377 /** 339 378 * Grain table block size. Smaller than an actual grain table block to allow … … 342 381 */ 343 382 #define VMDK_GT_CACHELINE_SIZE 128 383 384 344 385 /** 345 386 * Maximum number of lines in a descriptor file. Not worth the effort of … … 349 390 */ 350 391 #define VMDK_DESCRIPTOR_LINES_MAX 1100U 392 351 393 /** 352 394 * Parsed descriptor information. Allows easy access and update of the … … 372 414 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX]; 373 415 } VMDKDESCRIPTOR, *PVMDKDESCRIPTOR; 416 417 374 418 /** 375 419 * Cache entry for translating extent/sector to a sector number in that … … 385 429 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE]; 386 430 } VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY; 431 387 432 /** 388 433 * Cache data structure for blocks of grain table entries. For now this is a … … 398 443 unsigned cEntries; 399 444 } VMDKGTCACHE, *PVMDKGTCACHE; 445 400 446 /** 401 447 * Complete VMDK image data structure. Mainly a collection of extents and a few … … 408 454 /** Descriptor file if applicable. */ 409 455 PVMDKFILE pFile; 456 410 457 /** Pointer to the per-disk VD interface list. */ 411 458 PVDINTERFACE pVDIfsDisk; 412 459 /** Pointer to the per-image VD interface list. */ 413 460 PVDINTERFACE pVDIfsImage; 461 414 462 /** Error interface. */ 415 463 PVDINTERFACEERROR pIfError; 416 464 /** I/O interface. */ 417 465 PVDINTERFACEIOINT pIfIo; 466 467 418 468 /** Pointer to the image extents. */ 419 469 PVMDKEXTENT pExtents; … … 423 473 * times only once (happens mainly with raw partition access). */ 424 474 PVMDKFILE pFiles; 475 425 476 /** 426 477 * Pointer to an array of segment entries for async I/O. … … 432 483 /** Entries available in the segments array. */ 433 484 unsigned cSegments; 485 434 486 /** Open flags passed by VBoxHD layer. */ 435 487 unsigned uOpenFlags; … … 450 502 /** Parent image modification UUID. */ 451 503 RTUUID ParentModificationUuid; 504 452 505 /** Pointer to grain table cache, if this image contains sparse extents. */ 453 506 PVMDKGTCACHE pGTCache; … … 461 514 VDREGIONLIST RegionList; 462 515 } VMDKIMAGE; 516 517 463 518 /** State for the input/output callout of the inflate reader/deflate writer. */ 464 519 typedef struct VMDKCOMPRESSIO … … 473 528 void *pvCompGrain; 474 529 } VMDKCOMPRESSIO; 530 531 475 532 /** Tracks async grain allocation. */ 476 533 typedef struct VMDKGRAINALLOCASYNC … … 494 551 uint64_t uRGTSector; 495 552 } VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC; 553 496 554 /** 497 555 * State information for vmdkRename() and helpers. … … 536 594 * Static Variables * 537 595 *********************************************************************************************************************************/ 596 538 597 /** NULL-terminated array of supported file extensions. */ 539 598 static const VDFILEEXTENSION s_aVmdkFileExtensions[] = … … 542 601 {NULL, VDTYPE_INVALID} 543 602 }; 603 544 604 /** NULL-terminated array of configuration option. */ 545 605 static const VDCONFIGINFO s_aVmdkConfigInfo[] = … … 550 610 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 }, 551 611 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 }, 612 552 613 /* End of options list */ 553 614 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 } … … 558 619 * Internal Functions * 559 620 *********************************************************************************************************************************/ 621 560 622 static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent); 561 623 static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 562 624 bool fDelete); 625 563 626 static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents); 564 627 static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx); 565 628 static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment); 566 629 static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush); 630 567 631 static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, 568 632 void *pvUser, int rcReq); 633 569 634 /** 570 635 * Internal: open a file (using a file descriptor cache to ensure each file … … 576 641 int rc = VINF_SUCCESS; 577 642 PVMDKFILE pVmdkFile; 643 578 644 for (pVmdkFile = pImage->pFiles; 579 645 pVmdkFile != NULL; … … 584 650 Assert(fOpen == pVmdkFile->fOpen); 585 651 pVmdkFile->uReferences++; 652 586 653 *ppVmdkFile = pVmdkFile; 654 587 655 return rc; 588 656 } 589 657 } 658 590 659 /* If we get here, there's no matching entry in the cache. */ 591 660 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE)); … … 595 664 return VERR_NO_MEMORY; 596 665 } 666 597 667 pVmdkFile->pszFilename = RTStrDup(pszFilename); 598 668 if (!pVmdkFile->pszFilename) … … 602 672 return VERR_NO_MEMORY; 603 673 } 674 604 675 if (pszBasename) 605 676 { … … 613 684 } 614 685 } 686 615 687 pVmdkFile->fOpen = fOpen; 688 616 689 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen, 617 690 &pVmdkFile->pStorage); … … 632 705 *ppVmdkFile = NULL; 633 706 } 707 634 708 return rc; 635 709 } 710 636 711 /** 637 712 * Internal: close a file, updating the file descriptor cache. … … 641 716 int rc = VINF_SUCCESS; 642 717 PVMDKFILE pVmdkFile = *ppVmdkFile; 718 643 719 AssertPtr(pVmdkFile); 720 644 721 pVmdkFile->fDelete |= fDelete; 645 722 Assert(pVmdkFile->uReferences); … … 649 726 PVMDKFILE pPrev; 650 727 PVMDKFILE pNext; 728 651 729 /* Unchain the element from the list. */ 652 730 pPrev = pVmdkFile->pPrev; 653 731 pNext = pVmdkFile->pNext; 732 654 733 if (pNext) 655 734 pNext->pPrev = pPrev; … … 658 737 else 659 738 pImage->pFiles = pNext; 739 660 740 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage); 741 661 742 bool fFileDel = pVmdkFile->fDelete; 662 743 if ( pVmdkFile->pszBasename … … 671 752 fFileDel = false; 672 753 } 754 673 755 if (fFileDel) 674 756 { … … 684 766 RTMemFree(pVmdkFile); 685 767 } 768 686 769 *ppVmdkFile = NULL; 687 770 return rc; 688 771 } 772 689 773 /*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */ 690 774 #ifndef VMDK_USE_BLOCK_DECOMP_API … … 693 777 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser; 694 778 size_t cbInjected = 0; 779 695 780 Assert(cbBuf); 696 781 if (pInflateState->iOffset < 0) … … 718 803 } 719 804 #endif 805 720 806 /** 721 807 * Internal: read from a file and inflate the compressed data, … … 733 819 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; 734 820 size_t cbCompSize, cbActuallyRead; 821 735 822 if (!pcvMarker) 736 823 { … … 747 834 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize); 748 835 } 836 749 837 cbCompSize = RT_LE2H_U32(pMarker->cbSize); 750 838 if (cbCompSize == 0) … … 753 841 return VERR_VD_VMDK_INVALID_FORMAT; 754 842 } 843 755 844 /* Sanity check - the expansion ratio should be much less than 2. */ 756 845 Assert(cbCompSize < 2 * cbToRead); 757 846 if (cbCompSize >= 2 * cbToRead) 758 847 return VERR_VD_VMDK_INVALID_FORMAT; 848 759 849 /* Compressed grain marker. Data follows immediately. */ 760 850 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, … … 766 856 512) 767 857 - RT_UOFFSETOF(VMDKMARKER, uType)); 858 768 859 if (puLBA) 769 860 *puLBA = RT_LE2H_U64(pMarker->uSector); … … 772 863 + RT_UOFFSETOF(VMDKMARKER, uType), 773 864 512); 865 774 866 #ifdef VMDK_USE_BLOCK_DECOMP_API 775 867 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/, … … 782 874 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType); 783 875 InflateState.pvCompGrain = pExtent->pvCompGrain; 876 784 877 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper); 785 878 if (RT_FAILURE(rc)) … … 798 891 return rc; 799 892 } 893 800 894 static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf) 801 895 { 802 896 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser; 897 803 898 Assert(cbBuf); 804 899 if (pDeflateState->iOffset < 0) … … 817 912 return VINF_SUCCESS; 818 913 } 914 819 915 /** 820 916 * Internal: deflate the uncompressed data and write to a file, … … 829 925 PRTZIPCOMP pZip = NULL; 830 926 VMDKCOMPRESSIO DeflateState; 927 831 928 DeflateState.pImage = pImage; 832 929 DeflateState.iOffset = -1; 833 930 DeflateState.cbCompGrain = pExtent->cbCompGrain; 834 931 DeflateState.pvCompGrain = pExtent->pvCompGrain; 932 835 933 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, 836 934 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT); … … 845 943 Assert( DeflateState.iOffset > 0 846 944 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain); 945 847 946 /* pad with zeroes to get to a full sector size */ 848 947 uint32_t uSize = DeflateState.iOffset; … … 854 953 uSize = uSizeAlign; 855 954 } 955 856 956 if (pcbMarkerData) 857 957 *pcbMarkerData = uSize; 958 858 959 /* Compressed grain marker. Data follows immediately. */ 859 960 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; … … 868 969 return rc; 869 970 } 971 972 870 973 /** 871 974 * Internal: check if all files are closed, prevent leaking resources. … … 875 978 int rc = VINF_SUCCESS, rc2; 876 979 PVMDKFILE pVmdkFile; 980 877 981 Assert(pImage->pFiles == NULL); 878 982 for (pVmdkFile = pImage->pFiles; … … 883 987 pVmdkFile->pszFilename)); 884 988 pImage->pFiles = pVmdkFile->pNext; 989 885 990 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete); 991 886 992 if (RT_SUCCESS(rc)) 887 993 rc = rc2; … … 889 995 return rc; 890 996 } 997 891 998 /** 892 999 * Internal: truncate a string (at a UTF8 code point boundary) and encode the … … 897 1004 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3]; 898 1005 char *pszDst = szEnc; 1006 899 1007 AssertPtr(psz); 1008 900 1009 for (; *psz; psz = RTStrNextCp(psz)) 901 1010 { … … 928 1037 return RTStrDup(szEnc); 929 1038 } 1039 930 1040 /** 931 1041 * Internal: decode a string and store it into the specified string. … … 935 1045 int rc = VINF_SUCCESS; 936 1046 char szBuf[4]; 1047 937 1048 if (!cb) 938 1049 return VERR_BUFFER_OVERFLOW; 1050 939 1051 AssertPtr(psz); 1052 940 1053 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded)) 941 1054 { … … 960 1073 else 961 1074 pszDst = RTStrPutCp(pszDst, Cp); 1075 962 1076 /* Need to leave space for terminating NUL. */ 963 1077 if ((size_t)(pszDst - szBuf) + 1 >= cb) … … 972 1086 return rc; 973 1087 } 1088 974 1089 /** 975 1090 * Internal: free all buffers associated with grain directories. … … 988 1103 } 989 1104 } 1105 990 1106 /** 991 1107 * Internal: allocate the compressed/uncompressed buffers for streamOptimized … … 995 1111 { 996 1112 int rc = VINF_SUCCESS; 1113 997 1114 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 998 1115 { … … 1013 1130 rc = VERR_NO_MEMORY; 1014 1131 } 1132 1015 1133 if (RT_FAILURE(rc)) 1016 1134 vmdkFreeStreamBuffers(pExtent); 1017 1135 return rc; 1018 1136 } 1137 1019 1138 /** 1020 1139 * Internal: allocate all buffers associated with grain directories. … … 1025 1144 int rc = VINF_SUCCESS; 1026 1145 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1146 1027 1147 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD); 1028 1148 if (RT_LIKELY(pExtent->pGD)) … … 1037 1157 else 1038 1158 rc = VERR_NO_MEMORY; 1159 1039 1160 if (RT_FAILURE(rc)) 1040 1161 vmdkFreeGrainDirectory(pExtent); 1041 1162 return rc; 1042 1163 } 1164 1043 1165 /** 1044 1166 * Converts the grain directory from little to host endianess. … … 1051 1173 { 1052 1174 uint32_t *pGDTmp = pGD; 1175 1053 1176 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++) 1054 1177 *pGDTmp = RT_LE2H_U32(*pGDTmp); 1055 1178 } 1179 1056 1180 /** 1057 1181 * Read the grain directory and allocated grain tables verifying them against … … 1066 1190 int rc = VINF_SUCCESS; 1067 1191 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1192 1068 1193 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE 1069 1194 && pExtent->uSectorGD != VMDK_GD_AT_END 1070 1195 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR); 1196 1071 1197 rc = vmdkAllocGrainDirectory(pImage, pExtent); 1072 1198 if (RT_SUCCESS(rc)) … … 1080 1206 { 1081 1207 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries); 1208 1082 1209 if ( pExtent->uSectorRGD 1083 1210 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)) … … 1091 1218 { 1092 1219 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries); 1220 1093 1221 /* Check grain table and redundant grain table for consistency. */ 1094 1222 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t); 1095 1223 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */ 1096 1224 size_t cbGTBuffersMax = _1M; 1225 1097 1226 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1098 1227 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1228 1099 1229 if ( !pTmpGT1 1100 1230 || !pTmpGT2) 1101 1231 rc = VERR_NO_MEMORY; 1232 1102 1233 size_t i = 0; 1103 1234 uint32_t *pGDTmp = pExtent->pGD; 1104 1235 uint32_t *pRGDTmp = pExtent->pRGD; 1236 1105 1237 /* Loop through all entries. */ 1106 1238 while (i < pExtent->cGDEntries) … … 1109 1241 uint32_t uRGTStart = *pRGDTmp; 1110 1242 size_t cbGTRead = cbGT; 1243 1111 1244 /* If no grain table is allocated skip the entry. */ 1112 1245 if (*pGDTmp == 0 && *pRGDTmp == 0) … … 1115 1248 continue; 1116 1249 } 1250 1117 1251 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1118 1252 { … … 1124 1258 break; 1125 1259 } 1260 1126 1261 i++; 1127 1262 pGDTmp++; 1128 1263 pRGDTmp++; 1264 1129 1265 /* 1130 1266 * Read a few tables at once if adjacent to decrease the number … … 1140 1276 continue; 1141 1277 } 1278 1142 1279 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1143 1280 { … … 1149 1286 break; 1150 1287 } 1288 1151 1289 /* Check that the start offsets are adjacent.*/ 1152 1290 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp) 1153 1291 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp)) 1154 1292 break; 1293 1155 1294 i++; 1156 1295 pGDTmp++; … … 1158 1297 cbGTRead += cbGT; 1159 1298 } 1299 1160 1300 /* Increase buffers if required. */ 1161 1301 if ( RT_SUCCESS(rc) … … 1175 1315 else 1176 1316 rc = VERR_NO_MEMORY; 1317 1177 1318 if (rc == VERR_NO_MEMORY) 1178 1319 { … … 1181 1322 i -= cbGTRead / cbGT; 1182 1323 cbGTRead = cbGT; 1324 1183 1325 /* Don't try to increase the buffer again in the next run. */ 1184 1326 cbGTBuffersMax = cbGTBuffers; 1185 1327 } 1186 1328 } 1329 1187 1330 if (RT_SUCCESS(rc)) 1188 1331 { … … 1217 1360 } 1218 1361 } /* while (i < pExtent->cGDEntries) */ 1362 1219 1363 /** @todo figure out what to do for unclean VMDKs. */ 1220 1364 if (pTmpGT1) … … 1232 1376 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc); 1233 1377 } 1378 1234 1379 if (RT_FAILURE(rc)) 1235 1380 vmdkFreeGrainDirectory(pExtent); 1236 1381 return rc; 1237 1382 } 1383 1238 1384 /** 1239 1385 * Creates a new grain directory for the given extent at the given start sector. … … 1254 1400 size_t cbGTRounded; 1255 1401 uint64_t cbOverhead; 1402 1256 1403 if (fPreAlloc) 1257 1404 { … … 1267 1414 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded; 1268 1415 } 1416 1269 1417 /* For streamOptimized extents there is only one grain directory, 1270 1418 * and for all others take redundant grain directory into account. */ … … 1281 1429 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead); 1282 1430 } 1431 1283 1432 if (RT_SUCCESS(rc)) 1284 1433 { 1285 1434 pExtent->uAppendPosition = cbOverhead; 1286 1435 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead); 1436 1287 1437 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1288 1438 { … … 1295 1445 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded); 1296 1446 } 1447 1297 1448 rc = vmdkAllocStreamBuffers(pImage, pExtent); 1298 1449 if (RT_SUCCESS(rc)) … … 1304 1455 uint32_t uGTSectorLE; 1305 1456 uint64_t uOffsetSectors; 1457 1306 1458 if (pExtent->pRGD) 1307 1459 { … … 1323 1475 } 1324 1476 } 1477 1325 1478 if (RT_SUCCESS(rc)) 1326 1479 { … … 1345 1498 } 1346 1499 } 1500 1347 1501 if (RT_FAILURE(rc)) 1348 1502 vmdkFreeGrainDirectory(pExtent); 1349 1503 return rc; 1350 1504 } 1505 1351 1506 /** 1352 1507 * Unquotes the given string returning the result in a separate buffer. … … 1366 1521 char *pszQ; 1367 1522 char *pszUnquoted; 1523 1368 1524 /* Skip over whitespace. */ 1369 1525 while (*pszStr == ' ' || *pszStr == '\t') 1370 1526 pszStr++; 1527 1371 1528 if (*pszStr != '"') 1372 1529 { … … 1383 1540 pImage->pszFilename, pszStart); 1384 1541 } 1542 1385 1543 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1); 1386 1544 if (!pszUnquoted) … … 1393 1551 return VINF_SUCCESS; 1394 1552 } 1553 1395 1554 static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1396 1555 const char *pszLine) … … 1398 1557 char *pEnd = pDescriptor->aLines[pDescriptor->cLines]; 1399 1558 ssize_t cbDiff = strlen(pszLine) + 1; 1559 1400 1560 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1 1401 1561 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1402 1562 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1563 1403 1564 memcpy(pEnd, pszLine, cbDiff); 1404 1565 pDescriptor->cLines++; 1405 1566 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff; 1406 1567 pDescriptor->fDirty = true; 1568 1407 1569 return VINF_SUCCESS; 1408 1570 } 1571 1409 1572 static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart, 1410 1573 const char *pszKey, const char **ppszValue) … … 1412 1575 size_t cbKey = strlen(pszKey); 1413 1576 const char *pszValue; 1577 1414 1578 while (uStart != 0) 1415 1579 { … … 1430 1594 return !!uStart; 1431 1595 } 1596 1432 1597 static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1433 1598 unsigned uStart, … … 1437 1602 size_t cbKey = strlen(pszKey); 1438 1603 unsigned uLast = 0; 1604 1439 1605 while (uStart != 0) 1440 1606 { … … 1471 1637 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1472 1638 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1639 1473 1640 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal, 1474 1641 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal); … … 1533 1700 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1534 1701 pDescriptor->aLines[i] += cbDiff; 1702 1535 1703 /* Adjust starting line numbers of following descriptor sections. */ 1536 1704 if (uStart <= pDescriptor->uFirstExtent) … … 1542 1710 return VINF_SUCCESS; 1543 1711 } 1712 1544 1713 static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey, 1545 1714 uint32_t *puValue) 1546 1715 { 1547 1716 const char *pszValue; 1717 1548 1718 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1549 1719 &pszValue)) … … 1551 1721 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue); 1552 1722 } 1723 1553 1724 /** 1554 1725 * Returns the value of the given key as a string allocating the necessary memory. … … 1567 1738 const char *pszValue; 1568 1739 char *pszValueUnquoted; 1740 1569 1741 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1570 1742 &pszValue)) … … 1576 1748 return rc; 1577 1749 } 1750 1578 1751 static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1579 1752 const char *pszKey, const char *pszValue) 1580 1753 { 1581 1754 char *pszValueQuoted; 1755 1582 1756 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue); 1583 1757 if (!pszValueQuoted) … … 1588 1762 return rc; 1589 1763 } 1764 1590 1765 static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage, 1591 1766 PVMDKDESCRIPTOR pDescriptor) … … 1594 1769 unsigned uEntry = pDescriptor->uFirstExtent; 1595 1770 ssize_t cbDiff; 1771 1596 1772 if (!uEntry) 1597 1773 return; 1774 1598 1775 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1599 1776 /* Move everything including \0 in the entry marking the end of buffer. */ … … 1611 1788 if (pDescriptor->uFirstDDB) 1612 1789 pDescriptor->uFirstDDB--; 1790 1613 1791 return; 1614 1792 } 1615 static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage, 1616 PVMDKDESCRIPTOR pDescriptor, unsigned uLine) 1617 { 1618 RT_NOREF1(pImage); 1619 unsigned uEntry = uLine; 1620 ssize_t cbDiff; 1621 if (!uEntry) 1622 return; 1623 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1624 /* Move everything including \0 in the entry marking the end of buffer. */ 1625 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1], 1626 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1); 1627 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++) 1628 { 1629 if (i != uEntry) 1630 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff; 1631 if (pDescriptor->aNextLines[i]) 1632 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1; 1633 else 1634 pDescriptor->aNextLines[i - 1] = 0; 1635 } 1636 pDescriptor->cLines--; 1637 if (pDescriptor->uFirstDDB) 1638 pDescriptor->uFirstDDB--; 1639 return; 1640 } 1793 1641 1794 static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1642 1795 VMDKACCESS enmAccess, uint64_t cNominalSectors, … … 1650 1803 char szExt[1024]; 1651 1804 ssize_t cbDiff; 1805 1652 1806 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess)); 1653 1807 Assert((unsigned)enmType < RT_ELEMENTS(apszType)); 1808 1654 1809 /* Find last entry in extent description. */ 1655 1810 while (uStart) … … 1659 1814 uStart = pDescriptor->aNextLines[uStart]; 1660 1815 } 1816 1661 1817 if (enmType == VMDKETYPE_ZERO) 1662 1818 { … … 1677 1833 } 1678 1834 cbDiff = strlen(szExt) + 1; 1835 1679 1836 /* Check for buffer overflow. */ 1680 1837 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1) 1681 1838 || ( pDescriptor->aLines[pDescriptor->cLines] 1682 1839 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)) 1683 { 1684 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 1685 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)) 1686 { 1687 pImage->cbDescAlloc *= 2; 1688 pDescriptor->cbDescAlloc *= 2; 1689 } 1690 else 1691 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1692 } 1840 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1693 1841 1694 1842 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--) … … 1710 1858 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1711 1859 pDescriptor->aLines[i] += cbDiff; 1860 1712 1861 /* Adjust starting line numbers of following descriptor sections. */ 1713 1862 if (uStart <= pDescriptor->uFirstDDB) 1714 1863 pDescriptor->uFirstDDB++; 1864 1715 1865 pDescriptor->fDirty = true; 1716 1866 return VINF_SUCCESS; 1717 1867 } 1868 1718 1869 /** 1719 1870 * Returns the value of the given key from the DDB as a string allocating … … 1733 1884 const char *pszValue; 1734 1885 char *pszValueUnquoted; 1886 1735 1887 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1736 1888 &pszValue)) … … 1742 1894 return rc; 1743 1895 } 1896 1744 1897 static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1745 1898 const char *pszKey, uint32_t *puValue) … … 1747 1900 const char *pszValue; 1748 1901 char *pszValueUnquoted; 1902 1749 1903 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1750 1904 &pszValue)) … … 1757 1911 return rc; 1758 1912 } 1913 1759 1914 static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1760 1915 const char *pszKey, PRTUUID pUuid) … … 1762 1917 const char *pszValue; 1763 1918 char *pszValueUnquoted; 1919 1764 1920 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1765 1921 &pszValue)) … … 1772 1928 return rc; 1773 1929 } 1930 1774 1931 static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1775 1932 const char *pszKey, const char *pszVal) … … 1777 1934 int rc; 1778 1935 char *pszValQuoted; 1936 1779 1937 if (pszVal) 1780 1938 { … … 1791 1949 return rc; 1792 1950 } 1951 1793 1952 static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1794 1953 const char *pszKey, PCRTUUID pUuid) 1795 1954 { 1796 1955 char *pszUuid; 1956 1797 1957 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid); 1798 1958 if (!pszUuid) … … 1803 1963 return rc; 1804 1964 } 1965 1805 1966 static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1806 1967 const char *pszKey, uint32_t uValue) 1807 1968 { 1808 1969 char *pszValue; 1970 1809 1971 RTStrAPrintf(&pszValue, "\"%d\"", uValue); 1810 1972 if (!pszValue) … … 1815 1977 return rc; 1816 1978 } 1979 1817 1980 /** 1818 1981 * Splits the descriptor data into individual lines checking for correct line … … 1828 1991 unsigned cLine = 0; 1829 1992 int rc = VINF_SUCCESS; 1993 1830 1994 while ( RT_SUCCESS(rc) 1831 1995 && *pszTmp != '\0') … … 1838 2002 break; 1839 2003 } 2004 1840 2005 while (*pszTmp != '\0' && *pszTmp != '\n') 1841 2006 { … … 1855 2020 pszTmp++; 1856 2021 } 2022 1857 2023 if (RT_FAILURE(rc)) 1858 2024 break; 2025 1859 2026 /* Get rid of LF character. */ 1860 2027 if (*pszTmp == '\n') … … 1864 2031 } 1865 2032 } 2033 1866 2034 if (RT_SUCCESS(rc)) 1867 2035 { … … 1870 2038 pDesc->aLines[cLine] = pszTmp; 1871 2039 } 2040 1872 2041 return rc; 1873 2042 } 2043 1874 2044 static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData, 1875 2045 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 1888 2058 { 1889 2059 unsigned uLastNonEmptyLine = 0; 2060 1890 2061 /* Initialize those, because we need to be able to reopen an image. */ 1891 2062 pDescriptor->uFirstDesc = 0; … … 1953 2124 } 1954 2125 } 2126 1955 2127 return rc; 1956 2128 } 2129 1957 2130 static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage, 1958 2131 PCVDGEOMETRY pPCHSGeometry) … … 1973 2146 return rc; 1974 2147 } 2148 1975 2149 static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage, 1976 2150 PCVDGEOMETRY pLCHSGeometry) … … 1983 2157 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor, 1984 2158 VMDK_DDB_GEO_LCHS_HEADS, 2159 1985 2160 pLCHSGeometry->cHeads); 1986 2161 if (RT_FAILURE(rc)) … … 1991 2166 return rc; 1992 2167 } 2168 1993 2169 static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData, 1994 2170 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 2002 2178 pDescriptor->aLines[pDescriptor->cLines] = pDescData; 2003 2179 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines)); 2180 2004 2181 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile"); 2005 2182 if (RT_SUCCESS(rc)) … … 2033 2210 { 2034 2211 pDescriptor->uFirstDDB = pDescriptor->cLines - 1; 2212 2035 2213 /* Now that the framework is in place, use the normal functions to insert 2036 2214 * the remaining keys. */ … … 2045 2223 if (RT_SUCCESS(rc)) 2046 2224 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide"); 2225 2047 2226 return rc; 2048 2227 } 2228 2049 2229 static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData) 2050 2230 { … … 2053 2233 unsigned uLine; 2054 2234 unsigned i; 2235 2055 2236 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData, 2056 2237 &pImage->Descriptor); 2057 2238 if (RT_FAILURE(rc)) 2058 2239 return rc; 2240 2059 2241 /* Check version, must be 1. */ 2060 2242 uint32_t uVersion; … … 2064 2246 if (uVersion != 1) 2065 2247 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename); 2248 2066 2249 /* Get image creation type and determine image flags. */ 2067 2250 char *pszCreateType = NULL; /* initialized to make gcc shut up */ … … 2081 2264 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX; 2082 2265 RTMemTmpFree(pszCreateType); 2266 2083 2267 /* Count the number of extent config entries. */ 2084 2268 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0; … … 2086 2270 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++) 2087 2271 /* nothing */; 2272 2088 2273 if (!pImage->pDescData && cExtents != 1) 2089 2274 { … … 2091 2276 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename); 2092 2277 } 2278 2093 2279 if (pImage->pDescData) 2094 2280 { … … 2098 2284 return rc; 2099 2285 } 2286 2100 2287 for (i = 0, uLine = pImage->Descriptor.uFirstExtent; 2101 2288 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine]) 2102 2289 { 2103 2290 char *pszLine = pImage->Descriptor.aLines[uLine]; 2291 2104 2292 /* Access type of the extent. */ 2105 2293 if (!strncmp(pszLine, "RW", 2)) … … 2122 2310 if (*pszLine++ != ' ') 2123 2311 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2312 2124 2313 /* Nominal size of the extent. */ 2125 2314 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10, … … 2129 2318 if (*pszLine++ != ' ') 2130 2319 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2320 2131 2321 /* Type of the extent. */ 2132 2322 if (!strncmp(pszLine, "SPARSE", 6)) … … 2152 2342 else 2153 2343 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2344 2154 2345 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO) 2155 2346 { … … 2166 2357 if (*pszLine++ != ' ') 2167 2358 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2359 2168 2360 /* Basename of the image. Surrounded by quotes. */ 2169 2361 char *pszBasename; … … 2184 2376 } 2185 2377 } 2378 2186 2379 if (*pszLine != '\0') 2187 2380 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2188 2381 } 2189 2382 } 2383 2190 2384 /* Determine PCHS geometry (autogenerate if necessary). */ 2191 2385 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2222 2416 pImage->PCHSGeometry.cSectors = 63; 2223 2417 } 2418 2224 2419 /* Determine LCHS geometry (set to 0 if not specified). */ 2225 2420 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2252 2447 pImage->LCHSGeometry.cSectors = 0; 2253 2448 } 2449 2254 2450 /* Get image UUID. */ 2255 2451 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, … … 2275 2471 else if (RT_FAILURE(rc)) 2276 2472 return rc; 2473 2277 2474 /* Get image modification UUID. */ 2278 2475 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2300 2497 else if (RT_FAILURE(rc)) 2301 2498 return rc; 2499 2302 2500 /* Get UUID of parent image. */ 2303 2501 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, … … 2323 2521 else if (RT_FAILURE(rc)) 2324 2522 return rc; 2523 2325 2524 /* Get parent image modification UUID. */ 2326 2525 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2346 2545 else if (RT_FAILURE(rc)) 2347 2546 return rc; 2547 2348 2548 return VINF_SUCCESS; 2349 2549 } 2550 2350 2551 /** 2351 2552 * Internal : Prepares the descriptor to write to the image. … … 2355 2556 { 2356 2557 int rc = VINF_SUCCESS; 2558 2357 2559 /* 2358 2560 * Allocate temporary descriptor buffer. … … 2363 2565 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor); 2364 2566 size_t offDescriptor = 0; 2567 2365 2568 if (!pszDescriptor) 2366 2569 return VERR_NO_MEMORY; 2570 2367 2571 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++) 2368 2572 { 2369 2573 const char *psz = pImage->Descriptor.aLines[i]; 2370 2574 size_t cb = strlen(psz); 2575 2371 2576 /* 2372 2577 * Increase the descriptor if there is no limit and … … 2384 2589 char *pszDescriptorNew = NULL; 2385 2590 LogFlow(("Increasing descriptor cache\n")); 2591 2386 2592 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K); 2387 2593 if (!pszDescriptorNew) … … 2394 2600 } 2395 2601 } 2602 2396 2603 if (cb > 0) 2397 2604 { … … 2399 2606 offDescriptor += cb; 2400 2607 } 2608 2401 2609 memcpy(pszDescriptor + offDescriptor, "\n", 1); 2402 2610 offDescriptor++; 2403 2611 } 2612 2404 2613 if (RT_SUCCESS(rc)) 2405 2614 { … … 2409 2618 else if (pszDescriptor) 2410 2619 RTMemFree(pszDescriptor); 2620 2411 2621 return rc; 2412 2622 } 2623 2413 2624 /** 2414 2625 * Internal: write/update the descriptor part of the image. … … 2422 2633 void *pvDescriptor = NULL; 2423 2634 size_t cbDescriptor; 2635 2424 2636 if (pImage->pDescData) 2425 2637 { … … 2439 2651 if (pDescFile == NULL) 2440 2652 return VERR_INVALID_PARAMETER; 2653 2441 2654 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor); 2442 2655 if (RT_SUCCESS(rc)) … … 2450 2663 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename); 2451 2664 } 2665 2452 2666 if (RT_SUCCESS(rc) && !cbLimit) 2453 2667 { … … 2456 2670 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename); 2457 2671 } 2672 2458 2673 if (RT_SUCCESS(rc)) 2459 2674 pImage->Descriptor.fDirty = false; 2675 2460 2676 if (pvDescriptor) 2461 2677 RTMemFree(pvDescriptor); 2462 2678 return rc; 2463 } 2679 2680 } 2681 2464 2682 /** 2465 2683 * Internal: validate the consistency check values in a binary header. … … 2495 2713 return rc; 2496 2714 } 2715 2497 2716 /** 2498 2717 * Internal: read metadata belonging to an extent with binary header, i.e. … … 2504 2723 SparseExtentHeader Header; 2505 2724 int rc; 2725 2506 2726 if (!fMagicAlreadyRead) 2507 2727 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0, … … 2516 2736 - RT_UOFFSETOF(SparseExtentHeader, version)); 2517 2737 } 2738 2518 2739 if (RT_SUCCESS(rc)) 2519 2740 { … … 2522 2743 { 2523 2744 uint64_t cbFile = 0; 2745 2524 2746 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17)) 2525 2747 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END) 2526 2748 pExtent->fFooter = true; 2749 2527 2750 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 2528 2751 || ( pExtent->fFooter … … 2533 2756 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname); 2534 2757 } 2758 2535 2759 if (RT_SUCCESS(rc)) 2536 2760 { 2537 2761 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 2538 2762 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512); 2763 2539 2764 if ( pExtent->fFooter 2540 2765 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2550 2775 rc = VERR_VD_VMDK_INVALID_HEADER; 2551 2776 } 2777 2552 2778 if (RT_SUCCESS(rc)) 2553 2779 rc = vmdkValidateHeader(pImage, pExtent, &Header); … … 2555 2781 pExtent->uAppendPosition = 0; 2556 2782 } 2783 2557 2784 if (RT_SUCCESS(rc)) 2558 2785 { … … 2577 2804 pExtent->uSectorRGD = 0; 2578 2805 } 2806 2579 2807 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors) 2580 2808 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2581 2809 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname); 2810 2582 2811 if ( RT_SUCCESS(rc) 2583 2812 && ( pExtent->uSectorGD == VMDK_GD_AT_END … … 2587 2816 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2588 2817 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname); 2818 2589 2819 if (RT_SUCCESS(rc)) 2590 2820 { … … 2597 2827 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2598 2828 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2829 2599 2830 /* Fix up the number of descriptor sectors, as some flat images have 2600 2831 * really just one, and this causes failures when inserting the UUID … … 2619 2850 rc = VERR_VD_VMDK_INVALID_HEADER; 2620 2851 } 2852 2621 2853 if (RT_FAILURE(rc)) 2622 2854 vmdkFreeExtentData(pImage, pExtent, false); 2855 2623 2856 return rc; 2624 2857 } 2858 2625 2859 /** 2626 2860 * Internal: read additional metadata belonging to an extent. For those … … 2630 2864 { 2631 2865 int rc = VINF_SUCCESS; 2866 2632 2867 /* disabled the check as there are too many truncated vmdk images out there */ 2633 2868 #ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK … … 2669 2904 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2670 2905 pExtent->uAppendPosition = 0; 2906 2671 2907 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2672 2908 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2682 2918 } 2683 2919 } 2920 2684 2921 if (RT_FAILURE(rc)) 2685 2922 vmdkFreeExtentData(pImage, pExtent, false); 2923 2686 2924 return rc; 2687 2925 } 2926 2688 2927 /** 2689 2928 * Internal: write/update the metadata for a sparse extent. … … 2693 2932 { 2694 2933 SparseExtentHeader Header; 2934 2695 2935 memset(&Header, '\0', sizeof(Header)); 2696 2936 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER); … … 2735 2975 Header.doubleEndLineChar2 = '\n'; 2736 2976 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression); 2977 2737 2978 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, 2738 2979 uOffset, &Header, sizeof(Header), … … 2742 2983 return rc; 2743 2984 } 2985 2744 2986 /** 2745 2987 * Internal: free the buffers used for streamOptimized images. … … 2758 3000 } 2759 3001 } 3002 2760 3003 /** 2761 3004 * Internal: free the memory used by the extent data structure, optionally … … 2771 3014 { 2772 3015 int rc = VINF_SUCCESS; 3016 2773 3017 vmdkFreeGrainDirectory(pExtent); 2774 3018 if (pExtent->pDescData) … … 2797 3041 } 2798 3042 vmdkFreeStreamBuffers(pExtent); 3043 2799 3044 return rc; 2800 3045 } 3046 2801 3047 /** 2802 3048 * Internal: allocate grain table cache if necessary for this image. … … 2805 3051 { 2806 3052 PVMDKEXTENT pExtent; 3053 2807 3054 /* Allocate grain table cache if any sparse extent is present. */ 2808 3055 for (unsigned i = 0; i < pImage->cExtents; i++) … … 2824 3071 } 2825 3072 } 3073 2826 3074 return VINF_SUCCESS; 2827 3075 } 3076 2828 3077 /** 2829 3078 * Internal: allocate the given number of extents. … … 2853 3102 else 2854 3103 rc = VERR_NO_MEMORY; 3104 2855 3105 return rc; 2856 3106 } 2857 3107 2858 3108 /** 2859 * Internal: Create an additional file backed extent in split images. 2860 * Supports split sparse and flat images. 2861 * 2862 * @returns VBox status code. 2863 * @param pImage VMDK image instance. 2864 * @param cbSize Desiried size in bytes of new extent. 3109 * Internal: allocate and describes an additional, file-backed extent 3110 * for the given size. Preserves original extents. 2865 3111 */ 2866 3112 static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize) 2867 3113 { 2868 3114 int rc = VINF_SUCCESS; 2869 unsigned uImageFlags = pImage->uImageFlags;2870 2871 /* Check for unsupported image type. */2872 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)2873 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)2874 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))2875 {2876 return VERR_NOT_SUPPORTED;2877 }2878 2879 /* Allocate array of extents and copy existing extents to it. */2880 3115 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT)); 2881 if (!pNewExtents) 2882 { 2883 return VERR_NO_MEMORY; 2884 } 2885 2886 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT)); 2887 /** @todo r=jack - free old extent pointer */ 2888 2889 /* Locate newly created extent and populate default metadata. */ 2890 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents]; 2891 2892 pExtent->pFile = NULL; 2893 pExtent->pszBasename = NULL; 2894 pExtent->pszFullname = NULL; 2895 pExtent->pGD = NULL; 2896 pExtent->pRGD = NULL; 2897 pExtent->pDescData = NULL; 2898 pExtent->uVersion = 1; 2899 pExtent->uCompression = VMDK_COMPRESSION_NONE; 2900 pExtent->uExtent = pImage->cExtents; 2901 pExtent->pImage = pImage; 2902 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize); 2903 pExtent->enmAccess = VMDKACCESS_READWRITE; 2904 pExtent->uSectorOffset = 0; 2905 pExtent->fMetaDirty = true; 2906 2907 /* Apply image type specific meta data. */ 2908 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) 2909 { 3116 if (pNewExtents) 3117 { 3118 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT)); 3119 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents]; 3120 3121 pExtent->pFile = NULL; 3122 pExtent->pszBasename = NULL; 3123 pExtent->pszFullname = NULL; 3124 pExtent->pGD = NULL; 3125 pExtent->pRGD = NULL; 3126 pExtent->pDescData = NULL; 3127 pExtent->uVersion = 1; 3128 pExtent->uCompression = VMDK_COMPRESSION_NONE; 3129 pExtent->uExtent = pImage->cExtents; 3130 pExtent->pImage = pImage; 3131 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize); 2910 3132 pExtent->enmType = VMDKETYPE_FLAT; 2911 } 2912 else 2913 { 2914 uint64_t cSectorsPerGDE, cSectorsPerGD; 2915 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; 2916 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K)); 2917 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K); 2918 pExtent->cGTEntries = 512; 2919 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain; 2920 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2921 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2922 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 2923 } 2924 2925 /* Allocate and set file name for extent. */ 2926 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 2927 AssertPtr(pszBasenameSubstr); 2928 2929 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr); 2930 char *pszBasenameBase = RTStrDup(pszBasenameSubstr); 2931 RTPathStripSuffix(pszBasenameBase); 2932 char *pszTmp; 2933 size_t cbTmp; 2934 2935 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED) 2936 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase, 2937 pExtent->uExtent + 1, pszBasenameSuff); 2938 else 2939 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1, 2940 pszBasenameSuff); 2941 2942 RTStrFree(pszBasenameBase); 2943 if (!pszTmp) 2944 return VERR_NO_STR_MEMORY; 2945 cbTmp = strlen(pszTmp) + 1; 2946 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp); 2947 if (!pszBasename) 2948 { 3133 pExtent->enmAccess = VMDKACCESS_READWRITE; 3134 pExtent->uSectorOffset = 0; 3135 3136 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 3137 AssertPtr(pszBasenameSubstr); 3138 3139 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr); 3140 char *pszBasenameBase = RTStrDup(pszBasenameSubstr); 3141 RTPathStripSuffix(pszBasenameBase); 3142 char *pszTmp; 3143 size_t cbTmp; 3144 3145 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED) 3146 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase, 3147 pExtent->uExtent + 1, pszBasenameSuff); 3148 else 3149 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1, 3150 pszBasenameSuff); 3151 3152 RTStrFree(pszBasenameBase); 3153 if (!pszTmp) 3154 return VERR_NO_STR_MEMORY; 3155 cbTmp = strlen(pszTmp) + 1; 3156 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp); 3157 if (!pszBasename) 3158 { 3159 RTStrFree(pszTmp); 3160 return VERR_NO_MEMORY; 3161 } 3162 3163 memcpy(pszBasename, pszTmp, cbTmp); 2949 3164 RTStrFree(pszTmp); 2950 return VERR_NO_MEMORY; 2951 } 2952 2953 memcpy(pszBasename, pszTmp, cbTmp); 2954 RTStrFree(pszTmp); 2955 2956 pExtent->pszBasename = pszBasename; 2957 2958 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 2959 if (!pszBasedirectory) 2960 return VERR_NO_STR_MEMORY; 2961 RTPathStripFilename(pszBasedirectory); 2962 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename); 2963 RTStrFree(pszBasedirectory); 2964 if (!pszFullname) 2965 return VERR_NO_STR_MEMORY; 2966 pExtent->pszFullname = pszFullname; 2967 2968 /* Create file for extent. */ 2969 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, 2970 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, 2971 true /* fCreate */)); 2972 if (RT_FAILURE(rc)) 2973 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 2974 2975 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) 2976 { 2977 /* For flat images: Pre allocate file space. */ 3165 3166 pExtent->pszBasename = pszBasename; 3167 3168 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 3169 if (!pszBasedirectory) 3170 return VERR_NO_STR_MEMORY; 3171 RTPathStripFilename(pszBasedirectory); 3172 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename); 3173 RTStrFree(pszBasedirectory); 3174 if (!pszFullname) 3175 return VERR_NO_STR_MEMORY; 3176 pExtent->pszFullname = pszFullname; 3177 3178 /* Create file for extent. */ 3179 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, 3180 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, 3181 true /* fCreate */)); 3182 if (RT_FAILURE(rc)) 3183 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 3184 3185 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 3186 pExtent->cNominalSectors, pExtent->enmType, 3187 pExtent->pszBasename, pExtent->uSectorOffset); 3188 if (RT_FAILURE(rc)) 3189 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3190 2978 3191 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize, 2979 3192 0 /* fFlags */, NULL, 0, 0); 3193 2980 3194 if (RT_FAILURE(rc)) 2981 3195 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 3196 3197 pImage->pExtents = pNewExtents; 3198 pImage->cExtents++; 2982 3199 } 2983 3200 else 2984 { 2985 /* For sparse images: Allocate new grain directories/tables. */ 2986 /* fPreAlloc should never be false because VMware can't use such images. */ 2987 rc = vmdkCreateGrainDirectory(pImage, pExtent, 2988 RT_MAX( pExtent->uDescriptorSector 2989 + pExtent->cDescriptorSectors, 2990 1), 2991 true /* fPreAlloc */); 2992 if (RT_FAILURE(rc)) 2993 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 2994 } 2995 2996 /* Insert new extent into descriptor file. */ 2997 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 2998 pExtent->cNominalSectors, pExtent->enmType, 2999 pExtent->pszBasename, pExtent->uSectorOffset); 3000 if (RT_FAILURE(rc)) 3001 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3002 3003 pImage->pExtents = pNewExtents; 3004 pImage->cExtents++; 3005 3201 rc = VERR_NO_MEMORY; 3006 3202 return rc; 3007 3203 } 3008 3009 3204 /** 3010 3205 * Reads and processes the descriptor embedded in sparse images. … … 3054 3249 { 3055 3250 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors; 3251 3056 3252 pExtent->cDescriptorSectors = 4; 3057 3253 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3118 3314 rc = VERR_NO_MEMORY; 3119 3315 } 3316 3120 3317 return rc; 3121 3318 } 3319 3122 3320 /** 3123 3321 * Reads the descriptor from a pure text file. … … 3206 3404 else 3207 3405 pExtent->pszFullname = NULL; 3406 3208 3407 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0); 3209 3408 switch (pExtent->enmType) … … 3226 3425 if (RT_FAILURE(rc)) 3227 3426 break; 3427 3228 3428 /* Mark extent as unclean if opened in read-write mode. */ 3229 3429 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3262 3462 else if (RT_SUCCESS(rc)) 3263 3463 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename); 3464 3264 3465 return rc; 3265 3466 } 3467 3266 3468 /** 3267 3469 * Read and process the descriptor based on the image type. … … 3274 3476 { 3275 3477 uint32_t u32Magic; 3478 3276 3479 /* Read magic (if present). */ 3277 3480 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, … … 3290 3493 rc = VERR_VD_VMDK_INVALID_HEADER; 3291 3494 } 3495 3292 3496 return rc; 3293 3497 } 3498 3294 3499 /** 3295 3500 * Internal: Open an image, constructing all necessary data structures. … … 3301 3506 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 3302 3507 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 3508 3303 3509 /* 3304 3510 * Open the image. … … 3313 3519 { 3314 3520 pImage->pFile = pFile; 3521 3315 3522 rc = vmdkDescriptorRead(pImage, pFile); 3316 3523 if (RT_SUCCESS(rc)) … … 3330 3537 } 3331 3538 } 3539 3332 3540 /* Update the image metadata now in case has changed. */ 3333 3541 rc = vmdkFlushImage(pImage, NULL); … … 3349 3557 || pExtent->enmType == VMDKETYPE_ZERO) 3350 3558 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED; 3559 3351 3560 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors); 3352 3561 } 3562 3353 3563 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3354 3564 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 3360 3570 /* else: Do NOT signal an appropriate error here, as the VD layer has the 3361 3571 * choice of retrying the open if it failed. */ 3572 3362 3573 if (RT_SUCCESS(rc)) 3363 3574 { … … 3365 3576 pImage->RegionList.fFlags = 0; 3366 3577 pImage->RegionList.cRegions = 1; 3578 3367 3579 pRegion->offRegion = 0; /* Disk start. */ 3368 3580 pRegion->cbBlock = 512; … … 3377 3589 return rc; 3378 3590 } 3591 3379 3592 /** 3380 3593 * Frees a raw descriptor. … … 3385 3598 if (!pRawDesc) 3386 3599 return VINF_SUCCESS; 3600 3387 3601 RTStrFree(pRawDesc->pszRawDisk); 3388 3602 pRawDesc->pszRawDisk = NULL; 3603 3389 3604 /* Partitions: */ 3390 3605 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++) … … 3392 3607 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice); 3393 3608 pRawDesc->pPartDescs[i].pszRawDevice = NULL; 3609 3394 3610 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData); 3395 3611 pRawDesc->pPartDescs[i].pvPartitionData = NULL; 3396 3612 } 3613 3397 3614 RTMemFree(pRawDesc->pPartDescs); 3398 3615 pRawDesc->pPartDescs = NULL; 3616 3399 3617 RTMemFree(pRawDesc); 3400 3618 return VINF_SUCCESS; 3401 3619 } 3620 3402 3621 /** 3403 3622 * Helper that grows the raw partition descriptor table by @a cToAdd entries, … … 3416 3635 pRawDesc->cPartDescs = cNew; 3417 3636 pRawDesc->pPartDescs = paNew; 3637 3418 3638 *ppRet = &paNew[cOld]; 3419 3639 return VINF_SUCCESS; … … 3424 3644 pImage->pszFilename, cOld, cNew); 3425 3645 } 3646 3426 3647 /** 3427 3648 * @callback_method_impl{FNRTSORTCMP} … … 3433 3654 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0; 3434 3655 } 3656 3435 3657 /** 3436 3658 * Post processes the partition descriptors. … … 3444 3666 */ 3445 3667 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL); 3668 3446 3669 /* 3447 3670 * Check that we don't have overlapping descriptors. If we do, that's an … … 3458 3681 paPartDescs[i].pvPartitionData ? " (data)" : ""); 3459 3682 offLast -= 1; 3683 3460 3684 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk) 3461 3685 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS, … … 3470 3694 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize); 3471 3695 } 3696 3472 3697 return VINF_SUCCESS; 3473 3698 } 3699 3700 3474 3701 #ifdef RT_OS_LINUX 3475 3702 /** … … 3494 3721 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir); 3495 3722 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW); 3723 3496 3724 RTDIR hDir = NIL_RTDIR; 3497 3725 int rc = RTDirOpen(&hDir, pszBlockDevDir); … … 3511 3739 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName); 3512 3740 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */ 3741 3513 3742 dev_t uThisDevNo = ~uDevToLocate; 3514 3743 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir); … … 3540 3769 } 3541 3770 #endif /* RT_OS_LINUX */ 3771 3542 3772 #ifdef RT_OS_FREEBSD 3773 3774 3543 3775 /** 3544 3776 * Reads the config data from the provider and returns offset and size … … 3553 3785 gconfig *pConfEntry; 3554 3786 int rc = VERR_NOT_FOUND; 3787 3555 3788 /* 3556 3789 * Required parameters are located in the list containing key/value pairs. … … 3583 3816 return rc; 3584 3817 } 3818 3819 3585 3820 /** 3586 3821 * Searches the partition specified by name and calculates its size and absolute offset. … … 3601 3836 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER); 3602 3837 AssertReturn(pcbSize, VERR_INVALID_PARAMETER); 3838 3603 3839 ggeom *pParentGeom; 3604 3840 int rc = VERR_NOT_FOUND; … … 3613 3849 if (RT_FAILURE(rc)) 3614 3850 return rc; 3851 3615 3852 gprovider *pProvider; 3616 3853 /* … … 3624 3861 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize); 3625 3862 } 3863 3626 3864 /* 3627 3865 * No provider found. Go over the parent geom again … … 3633 3871 * provider 3634 3872 */ 3873 3635 3874 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider) 3636 3875 { … … 3640 3879 if (RT_FAILURE(rc)) 3641 3880 return rc; 3881 3642 3882 uint64_t cbProviderOffset = 0; 3643 3883 uint64_t cbProviderSize = 0; … … 3650 3890 } 3651 3891 } 3892 3652 3893 return VERR_NOT_FOUND; 3653 3894 } 3654 3895 #endif 3896 3897 3655 3898 /** 3656 3899 * Attempts to verify the raw partition path. … … 3662 3905 { 3663 3906 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 3907 3664 3908 /* 3665 3909 * Try open the raw partition device. … … 3671 3915 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"), 3672 3916 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc); 3917 3673 3918 /* 3674 3919 * Compare the partition UUID if we can get it. … … 3676 3921 #ifdef RT_OS_WINDOWS 3677 3922 DWORD cbReturned; 3923 3678 3924 /* 1. Get the device numbers for both handles, they should have the same disk. */ 3679 3925 STORAGE_DEVICE_NUMBER DevNum1; … … 3684 3930 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"), 3685 3931 pImage->pszFilename, pszRawDrive, GetLastError()); 3932 3686 3933 STORAGE_DEVICE_NUMBER DevNum2; 3687 3934 RT_ZERO(DevNum2); … … 3775 4022 rc = VERR_NO_TMP_MEMORY; 3776 4023 } 4024 3777 4025 #elif defined(RT_OS_LINUX) 3778 4026 RT_NOREF(hVol); 4027 3779 4028 /* Stat the two devices first to get their device numbers. (We probably 3780 4029 could make some assumptions here about the major & minor number assignments … … 3797 4046 { 3798 4047 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive); 4048 3799 4049 /* Now, scan the directories under that again for a partition device 3800 4050 matching the hRawPart device's number: */ 3801 4051 if (RT_SUCCESS(rc)) 3802 4052 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice); 4053 3803 4054 /* Having found the /sys/block/device/partition/ path, we can finally 3804 4055 read the partition attributes and compare with hVol. */ … … 3813 4064 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition); 3814 4065 /* else: ignore failure? */ 4066 3815 4067 /* start offset: */ 3816 4068 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */ … … 3826 4078 /* else: ignore failure? */ 3827 4079 } 4080 3828 4081 /* the size: */ 3829 4082 if (RT_SUCCESS(rc)) … … 3842 4095 /* else: We've got nothing to work on, so only do content comparison. */ 3843 4096 } 4097 3844 4098 #elif defined(RT_OS_FREEBSD) 3845 4099 char szDriveDevName[256]; … … 3872 4126 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS, 3873 4127 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename); 4128 4129 3874 4130 if (RT_SUCCESS(rc)) 3875 4131 { … … 3894 4150 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc); 3895 4151 } 4152 3896 4153 geom_deletetree(&geomMesh); 3897 4154 } … … 3900 4157 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err); 3901 4158 } 4159 3902 4160 #elif defined(RT_OS_SOLARIS) 3903 4161 RT_NOREF(hVol); 4162 3904 4163 dk_cinfo dkiDriveInfo; 3905 4164 dk_cinfo dkiPartInfo; … … 3949 4208 * using another way. If there is an error, it returns errno which will be handled below. 3950 4209 */ 4210 3951 4211 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition; 3952 4212 if (numPartition > NDKMAP) … … 3983 4243 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"), 3984 4244 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk); 4245 3985 4246 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData) 3986 4247 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS, … … 4058 4319 #else 4059 4320 RT_NOREF(hVol); /* PORTME */ 4321 rc = VERR_NOT_SUPPORTED; 4060 4322 #endif 4061 4323 if (RT_SUCCESS(rc)) … … 4073 4335 { 4074 4336 uint8_t *pbSector2 = pbSector1 + cbToCompare; 4337 4075 4338 /* Do the comparing, we repeat if it fails and the data might be volatile. */ 4076 4339 uint64_t uPrevCrc1 = 0; … … 4088 4351 { 4089 4352 rc = VERR_MISMATCH; 4353 4090 4354 /* Do data stability checks before repeating: */ 4091 4355 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare); … … 4120 4384 offMissmatch++; 4121 4385 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16); 4386 4122 4387 if (cStable > 0) 4123 4388 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, … … 4133 4398 } 4134 4399 } 4400 4135 4401 RTMemTmpFree(pbSector1); 4136 4402 } … … 4143 4409 return rc; 4144 4410 } 4411 4145 4412 #ifdef RT_OS_WINDOWS 4146 4413 /** … … 4164 4431 } 4165 4432 #endif /* RT_OS_WINDOWS */ 4433 4166 4434 /** 4167 4435 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the … … 4180 4448 { 4181 4449 *phVolToRelease = NIL_RTDVMVOLUME; 4450 4182 4451 /* Check sanity/understanding. */ 4183 4452 Assert(fPartitions); 4184 4453 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */ 4454 4185 4455 /* 4186 4456 * Allocate on descriptor for each volume up front. 4187 4457 */ 4188 4458 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr); 4459 4189 4460 PVDISKRAWPARTDESC paPartDescs = NULL; 4190 4461 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs); 4191 4462 AssertRCReturn(rc, rc); 4463 4192 4464 /* 4193 4465 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them. … … 4212 4484 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs); 4213 4485 *phVolToRelease = hVol = hVolNext; 4486 4214 4487 /* 4215 4488 * Depending on the fPartitions selector and associated read-only mask, … … 4218 4491 */ 4219 4492 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol); 4493 4220 4494 uint64_t offVolumeEndIgnored = 0; 4221 4495 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored); … … 4225 4499 pImage->pszFilename, i, pszRawDrive, rc); 4226 4500 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk); 4501 4227 4502 /* Note! The index must match IHostDrivePartition::number. */ 4228 4503 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST); … … 4233 4508 if (fPartitionsReadOnly & RT_BIT_32(idxPartition)) 4234 4509 paPartDescs[i].uFlags |= VDISKRAW_READONLY; 4510 4235 4511 if (!fRelative) 4236 4512 { … … 4253 4529 */ 4254 4530 paPartDescs[i].offStartInDevice = 0; 4531 4255 4532 #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) 4256 4533 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */ … … 4306 4583 #endif 4307 4584 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY); 4585 4308 4586 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 4309 4587 AssertRCReturn(rc, rc); … … 4317 4595 } 4318 4596 } /* for each volume */ 4597 4319 4598 RTDvmVolumeRelease(hVol); 4320 4599 *phVolToRelease = NIL_RTDVMVOLUME; 4600 4321 4601 /* 4322 4602 * Check that we found all the partitions the user selected. … … 4333 4613 pImage->pszFilename, pszRawDrive, szLeft); 4334 4614 } 4615 4335 4616 return VINF_SUCCESS; 4336 4617 } 4618 4337 4619 /** 4338 4620 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies … … 4365 4647 pImage->pszFilename, pszRawDrive, rc); 4366 4648 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5); 4649 4367 4650 /* We can allocate the partition descriptors here to save an intentation level. */ 4368 4651 PVDISKRAWPARTDESC paPartDescs = NULL; 4369 4652 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs); 4370 4653 AssertRCReturn(rc, rc); 4654 4371 4655 /* Allocate the result table and repeat the location table query: */ 4372 4656 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations); … … 4448 4732 return rc; 4449 4733 } 4734 4450 4735 /** 4451 4736 * Opens the volume manager for the raw drive when in selected-partition mode. … … 4463 4748 { 4464 4749 *phVolMgr = NIL_RTDVM; 4750 4465 4751 RTVFSFILE hVfsFile = NIL_RTVFSFILE; 4466 4752 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile); … … 4469 4755 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"), 4470 4756 pImage->pszFilename, pszRawDrive, rc); 4757 4471 4758 RTDVM hVolMgr = NIL_RTDVM; 4472 4759 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/); 4760 4473 4761 RTVfsFileRelease(hVfsFile); 4762 4474 4763 if (RT_FAILURE(rc)) 4475 4764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4476 4765 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"), 4477 4766 pImage->pszFilename, pszRawDrive, rc); 4767 4478 4768 rc = RTDvmMapOpen(hVolMgr); 4479 4769 if (RT_SUCCESS(rc)) … … 4486 4776 pImage->pszFilename, pszRawDrive, rc); 4487 4777 } 4778 4488 4779 /** 4489 4780 * Opens the raw drive device and get the sizes for it. … … 4509 4800 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"), 4510 4801 pImage->pszFilename, pszRawDrive, rc); 4802 4511 4803 /* 4512 4804 * Get the sector size. … … 4557 4849 return rc; 4558 4850 } 4851 4559 4852 /** 4560 4853 * Reads the raw disk configuration, leaving initalization and cleanup to the … … 4573 4866 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4574 4867 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename); 4868 4575 4869 /* 4576 4870 * RawDrive = path … … 4581 4875 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4582 4876 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3); 4877 4583 4878 /* 4584 4879 * Partitions=n[r][,...] … … 4586 4881 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */; 4587 4882 *pfPartitions = *pfPartitionsReadOnly = 0; 4883 4588 4884 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe); 4589 4885 if (RT_SUCCESS(rc)) … … 4619 4915 pImage->pszFilename, psz); 4620 4916 } 4917 4621 4918 RTStrFree(*ppszFreeMe); 4622 4919 *ppszFreeMe = NULL; … … 4625 4922 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4626 4923 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4924 4627 4925 /* 4628 4926 * BootSector=base64 … … 4644 4942 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"), 4645 4943 pImage->pszFilename, *ppszRawDrive, cbBootSector); 4944 4646 4945 /* Refuse the boot sector if whole-drive. This used to be done quietly, 4647 4946 however, bird disagrees and thinks the user should be told that what … … 4652 4951 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"), 4653 4952 pImage->pszFilename, *ppszRawDrive); 4953 4654 4954 *pcbBootSector = (size_t)cbBootSector; 4655 4955 *ppvBootSector = RTMemAlloc((size_t)cbBootSector); … … 4658 4958 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"), 4659 4959 pImage->pszFilename, cbBootSector, *ppszRawDrive); 4960 4660 4961 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/); 4661 4962 if (RT_FAILURE(rc)) … … 4663 4964 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"), 4664 4965 pImage->pszFilename, *ppszRawDrive, rc); 4966 4665 4967 RTStrFree(*ppszFreeMe); 4666 4968 *ppszFreeMe = NULL; … … 4669 4971 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4670 4972 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4973 4671 4974 /* 4672 4975 * Relative=0/1 … … 4696 4999 *pfRelative = false; 4697 5000 #endif 5001 4698 5002 return VINF_SUCCESS; 4699 5003 } 5004 4700 5005 /** 4701 5006 * Creates a raw drive (nee disk) descriptor. … … 4716 5021 /* Make sure it's NULL. */ 4717 5022 *ppRaw = NULL; 5023 4718 5024 /* 4719 5025 * Read the configuration. … … 4740 5046 if (RT_SUCCESS(rc)) 4741 5047 { 5048 pImage->cbSize = cbSize; 4742 5049 /* 4743 5050 * Create the raw-drive descriptor … … 4767 5074 //pRawDesc->cPartDescs = 0; 4768 5075 //pRawDesc->pPartDescs = NULL; 5076 4769 5077 /* We need to parse the partition map to complete the descriptor: */ 4770 5078 RTDVM hVolMgr = NIL_RTDVM; … … 4778 5086 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR 4779 5087 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT; 5088 4780 5089 /* Add copies of the partition tables: */ 4781 5090 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive, … … 4789 5098 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease); 4790 5099 RTDvmVolumeRelease(hVolRelease); 5100 4791 5101 /* Finally, sort the partition and check consistency (overlaps, etc): */ 4792 5102 if (RT_SUCCESS(rc)) … … 4832 5142 return rc; 4833 5143 } 5144 4834 5145 /** 4835 5146 * Internal: create VMDK images for raw disk/partition access. … … 4840 5151 int rc = VINF_SUCCESS; 4841 5152 PVMDKEXTENT pExtent; 5153 4842 5154 if (pRaw->uFlags & VDISKRAW_DISK) 4843 5155 { … … 4854 5166 if (RT_FAILURE(rc)) 4855 5167 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5168 4856 5169 /* Set up basename for extent description. Cannot use StrDup. */ 4857 5170 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1; … … 4870 5183 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 4871 5184 pExtent->fMetaDirty = false; 5185 4872 5186 /* Open flat image, the raw disk. */ 4873 5187 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 4882 5196 * file, write the partition information to a flat extent and 4883 5197 * open all the (flat) raw disk partitions. */ 5198 4884 5199 /* First pass over the partition data areas to determine how many 4885 5200 * extents we need. One data area can require up to 2 extents, as … … 4893 5208 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4894 5209 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename); 5210 4895 5211 if (uStart < pPart->offStartInVDisk) 4896 5212 cExtents++; … … 4901 5217 if (uStart != cbSize) 4902 5218 cExtents++; 5219 4903 5220 rc = vmdkCreateExtents(pImage, cExtents); 4904 5221 if (RT_FAILURE(rc)) 4905 5222 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5223 4906 5224 /* Create raw partition descriptor file. */ 4907 5225 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename, … … 4910 5228 if (RT_FAILURE(rc)) 4911 5229 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5230 4912 5231 /* Create base filename for the partition table extent. */ 4913 5232 /** @todo remove fixed buffer without creating memory leaks. */ … … 4924 5243 pszBaseBase, pszSuff); 4925 5244 RTStrFree(pszBaseBase); 5245 4926 5246 /* Second pass over the partitions, now define all extents. */ 4927 5247 uint64_t uPartOffset = 0; … … 4932 5252 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i]; 4933 5253 pExtent = &pImage->pExtents[cExtents++]; 5254 4934 5255 if (uStart < pPart->offStartInVDisk) 4935 5256 { … … 4945 5266 } 4946 5267 uStart = pPart->offStartInVDisk + pPart->cbData; 5268 4947 5269 if (pPart->pvPartitionData) 4948 5270 { … … 4954 5276 memcpy(pszBasename, pszPartition, cbBasename); 4955 5277 pExtent->pszBasename = pszBasename; 5278 4956 5279 /* Set up full name for partition extent. */ 4957 5280 char *pszDirname = RTStrDup(pImage->pszFilename); … … 4969 5292 pExtent->enmAccess = VMDKACCESS_READWRITE; 4970 5293 pExtent->fMetaDirty = false; 5294 4971 5295 /* Create partition table flat image. */ 4972 5296 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5003 5327 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 5004 5328 pExtent->fMetaDirty = false; 5329 5005 5330 /* Open flat image, the raw partition. */ 5006 5331 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5035 5360 } 5036 5361 } 5362 5037 5363 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5038 5364 (pRaw->uFlags & VDISKRAW_DISK) ? … … 5042 5368 return rc; 5043 5369 } 5370 5044 5371 /** 5045 5372 * Internal: create a regular (i.e. file-backed) VMDK image. … … 5053 5380 uint64_t cbOffset = 0; 5054 5381 uint64_t cbRemaining = cbSize; 5382 5055 5383 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 5056 5384 { … … 5064 5392 if (RT_FAILURE(rc)) 5065 5393 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5394 5066 5395 /* Basename strings needed for constructing the extent names. */ 5067 5396 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5068 5397 AssertPtr(pszBasenameSubstr); 5069 5398 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5399 5070 5400 /* Create separate descriptor file if necessary. */ 5071 5401 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5079 5409 else 5080 5410 pImage->pFile = NULL; 5411 5081 5412 /* Set up all extents. */ 5082 5413 for (unsigned i = 0; i < cExtents; i++) … … 5084 5415 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5085 5416 uint64_t cbExtent = cbRemaining; 5417 5086 5418 /* Set up fullname/basename for extent description. Cannot use StrDup 5087 5419 * for basename, as it is not guaranteed that the memory can be freed … … 5140 5472 return VERR_NO_STR_MEMORY; 5141 5473 pExtent->pszFullname = pszFullname; 5474 5142 5475 /* Create file for extent. */ 5143 5476 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5155 5488 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 5156 5489 } 5490 5157 5491 /* Place descriptor file information (where integrated). */ 5158 5492 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5164 5498 pImage->pDescData = NULL; 5165 5499 } 5500 5166 5501 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5167 5502 { … … 5191 5526 pExtent->enmType = VMDKETYPE_FLAT; 5192 5527 } 5528 5193 5529 pExtent->enmAccess = VMDKACCESS_READWRITE; 5194 5530 pExtent->fUncleanShutdown = true; … … 5196 5532 pExtent->uSectorOffset = 0; 5197 5533 pExtent->fMetaDirty = true; 5534 5198 5535 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5199 5536 { … … 5207 5544 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5208 5545 } 5546 5209 5547 cbOffset += cbExtent; 5548 5210 5549 if (RT_SUCCESS(rc)) 5211 5550 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize); 5551 5212 5552 cbRemaining -= cbExtent; 5213 5553 } 5554 5214 5555 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX) 5215 5556 { … … 5220 5561 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename); 5221 5562 } 5563 5222 5564 const char *pszDescType = NULL; 5223 5565 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) … … 5245 5587 return rc; 5246 5588 } 5589 5247 5590 /** 5248 5591 * Internal: Create a real stream optimized VMDK using only linear writes. … … 5253 5596 if (RT_FAILURE(rc)) 5254 5597 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5598 5255 5599 /* Basename strings needed for constructing the extent names. */ 5256 5600 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5257 5601 AssertPtr(pszBasenameSubstr); 5258 5602 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5603 5259 5604 /* No separate descriptor file. */ 5260 5605 pImage->pFile = NULL; 5606 5261 5607 /* Set up all extents. */ 5262 5608 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 5609 5263 5610 /* Set up fullname/basename for extent description. Cannot use StrDup 5264 5611 * for basename, as it is not guaranteed that the memory can be freed … … 5270 5617 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr); 5271 5618 pExtent->pszBasename = pszBasename; 5619 5272 5620 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 5273 5621 RTPathStripFilename(pszBasedirectory); … … 5277 5625 return VERR_NO_STR_MEMORY; 5278 5626 pExtent->pszFullname = pszFullname; 5627 5279 5628 /* Create file for extent. Make it write only, no reading allowed. */ 5280 5629 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5284 5633 if (RT_FAILURE(rc)) 5285 5634 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 5635 5286 5636 /* Place descriptor file information. */ 5287 5637 pExtent->uDescriptorSector = 1; … … 5290 5640 pExtent->pDescData = pImage->pDescData; 5291 5641 pImage->pDescData = NULL; 5642 5292 5643 uint64_t cSectorsPerGDE, cSectorsPerGD; 5293 5644 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; … … 5299 5650 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 5300 5651 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 5652 5301 5653 /* The spec says version is 1 for all VMDKs, but the vast 5302 5654 * majority of streamOptimized VMDKs actually contain … … 5305 5657 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE; 5306 5658 pExtent->fFooter = true; 5659 5307 5660 pExtent->enmAccess = VMDKACCESS_READONLY; 5308 5661 pExtent->fUncleanShutdown = false; … … 5310 5663 pExtent->uSectorOffset = 0; 5311 5664 pExtent->fMetaDirty = true; 5665 5312 5666 /* Create grain directory, without preallocating it straight away. It will 5313 5667 * be constructed on the fly when writing out the data and written when … … 5318 5672 if (RT_FAILURE(rc)) 5319 5673 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5674 5320 5675 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5321 5676 "streamOptimized"); 5322 5677 if (RT_FAILURE(rc)) 5323 5678 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 5679 5324 5680 return rc; 5325 5681 } 5682 5326 5683 /** 5327 5684 * Initializes the UUID fields in the DDB. … … 5359 5716 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 5360 5717 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 5718 5361 5719 return rc; 5362 5720 } 5721 5363 5722 /** 5364 5723 * Internal: The actual code for creating any VMDK variant currently in … … 5373 5732 { 5374 5733 pImage->uImageFlags = uImageFlags; 5734 5375 5735 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); 5376 5736 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 5377 5737 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 5738 5378 5739 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 5379 5740 &pImage->Descriptor); … … 5386 5747 rc = vmdkMakeRawDescriptor(pImage, &pRaw); 5387 5748 if (RT_FAILURE(rc)) 5388 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename); 5749 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"), 5750 pImage->pszFilename); 5751 if (!cbSize) 5752 cbSize = pImage->cbSize; 5753 5389 5754 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 5390 5755 vmdkRawDescFree(pRaw); … … 5402 5767 uPercentSpan * 95 / 100); 5403 5768 } 5769 5404 5770 if (RT_SUCCESS(rc)) 5405 5771 { 5406 5772 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100); 5773 5407 5774 pImage->cbSize = cbSize; 5775 5408 5776 for (unsigned i = 0; i < pImage->cExtents; i++) 5409 5777 { 5410 5778 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5779 5411 5780 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 5412 5781 pExtent->cNominalSectors, pExtent->enmType, … … 5418 5787 } 5419 5788 } 5789 5420 5790 if (RT_SUCCESS(rc)) 5421 5791 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 5422 if ( RT_SUCCESS(rc) 5423 && pPCHSGeometry->cCylinders != 0 5424 && pPCHSGeometry->cHeads != 0 5425 && pPCHSGeometry->cSectors != 0) 5426 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5792 5793 pImage->LCHSGeometry = *pLCHSGeometry; 5794 pImage->PCHSGeometry = *pPCHSGeometry; 5795 5796 if (RT_SUCCESS(rc)) 5797 { 5798 if ( pPCHSGeometry->cCylinders != 0 5799 && pPCHSGeometry->cHeads != 0 5800 && pPCHSGeometry->cSectors != 0) 5801 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5802 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) 5803 { 5804 VDGEOMETRY RawDiskPCHSGeometry; 5805 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383); 5806 RawDiskPCHSGeometry.cHeads = 16; 5807 RawDiskPCHSGeometry.cSectors = 63; 5808 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry); 5809 } 5810 } 5811 5427 5812 if ( RT_SUCCESS(rc) 5428 5813 && pLCHSGeometry->cCylinders != 0 … … 5430 5815 && pLCHSGeometry->cSectors != 0) 5431 5816 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 5432 pImage->LCHSGeometry = *pLCHSGeometry; 5433 pImage->PCHSGeometry = *pPCHSGeometry; 5817 5434 5818 pImage->ImageUuid = *pUuid; 5435 5819 RTUuidClear(&pImage->ParentUuid); 5436 5820 RTUuidClear(&pImage->ModificationUuid); 5437 5821 RTUuidClear(&pImage->ParentModificationUuid); 5822 5438 5823 if (RT_SUCCESS(rc)) 5439 5824 rc = vmdkCreateImageDdbUuidsInit(pImage); 5825 5440 5826 if (RT_SUCCESS(rc)) 5441 5827 rc = vmdkAllocateGrainTableCache(pImage); 5828 5442 5829 if (RT_SUCCESS(rc)) 5443 5830 { … … 5446 5833 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 5447 5834 } 5835 5448 5836 if (RT_SUCCESS(rc)) 5449 5837 { 5450 5838 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100); 5839 5451 5840 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5452 5841 { … … 5473 5862 else 5474 5863 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 5864 5865 5475 5866 if (RT_SUCCESS(rc)) 5476 5867 { … … 5478 5869 pImage->RegionList.fFlags = 0; 5479 5870 pImage->RegionList.cRegions = 1; 5871 5480 5872 pRegion->offRegion = 0; /* Disk start. */ 5481 5873 pRegion->cbBlock = 512; … … 5485 5877 pRegion->cbMetadata = 0; 5486 5878 pRegion->cRegionBlocksOrBytes = pImage->cbSize; 5879 5487 5880 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan); 5488 5881 } … … 5491 5884 return rc; 5492 5885 } 5886 5493 5887 /** 5494 5888 * Internal: Update image comment. … … 5503 5897 return VERR_NO_MEMORY; 5504 5898 } 5899 5505 5900 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, 5506 5901 "ddb.comment", pszCommentEncoded); … … 5511 5906 return VINF_SUCCESS; 5512 5907 } 5908 5513 5909 /** 5514 5910 * Internal. Clear the grain table buffer for real stream optimized writing. … … 5521 5917 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t)); 5522 5918 } 5919 5523 5920 /** 5524 5921 * Internal. Flush the grain table buffer for real stream optimized writing. … … 5529 5926 int rc = VINF_SUCCESS; 5530 5927 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE; 5928 5531 5929 /* VMware does not write out completely empty grain tables in the case 5532 5930 * of streamOptimized images, which according to my interpretation of … … 5550 5948 if (fAllZero) 5551 5949 return VINF_SUCCESS; 5950 5552 5951 uint64_t uFileOffset = pExtent->uAppendPosition; 5553 5952 if (!uFileOffset) … … 5555 5954 /* Align to sector, as the previous write could have been any size. */ 5556 5955 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 5956 5557 5957 /* Grain table marker. */ 5558 5958 uint8_t aMarker[512]; … … 5565 5965 AssertRC(rc); 5566 5966 uFileOffset += 512; 5967 5567 5968 if (!pExtent->pGD || pExtent->pGD[uGDEntry]) 5568 5969 return VERR_INTERNAL_ERROR; 5970 5569 5971 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset); 5972 5570 5973 for (uint32_t i = 0; i < cCacheLines; i++) 5571 5974 { … … 5575 5978 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++) 5576 5979 *pGTTmp = RT_H2LE_U32(*pGTTmp); 5980 5577 5981 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset, 5578 5982 &pImage->pGTCache->aGTCache[i].aGTData[0], … … 5586 5990 return rc; 5587 5991 } 5992 5588 5993 /** 5589 5994 * Internal. Free all allocated space for representing an image, and optionally … … 5593 5998 { 5594 5999 int rc = VINF_SUCCESS; 6000 5595 6001 /* Freeing a never allocated image (e.g. because the open failed) is 5596 6002 * not signalled as an error. After all nothing bad happens. */ … … 5618 6024 pImage->pExtents[i].fMetaDirty = true; 5619 6025 } 6026 5620 6027 /* From now on it's not safe to append any more data. */ 5621 6028 pImage->pExtents[i].uAppendPosition = 0; … … 5623 6030 } 5624 6031 } 6032 5625 6033 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5626 6034 { … … 5641 6049 AssertRC(rc); 5642 6050 } 6051 5643 6052 uint64_t uFileOffset = pExtent->uAppendPosition; 5644 6053 if (!uFileOffset) 5645 6054 return VERR_INTERNAL_ERROR; 5646 6055 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6056 5647 6057 /* From now on it's not safe to append any more data. */ 5648 6058 pExtent->uAppendPosition = 0; 6059 5649 6060 /* Grain directory marker. */ 5650 6061 uint8_t aMarker[512]; … … 5657 6068 AssertRC(rc); 5658 6069 uFileOffset += 512; 6070 5659 6071 /* Write grain directory in little endian style. The array will 5660 6072 * not be used after this, so convert in place. */ … … 5666 6078 pExtent->cGDEntries * sizeof(uint32_t)); 5667 6079 AssertRC(rc); 6080 5668 6081 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset); 5669 6082 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset); … … 5671 6084 + pExtent->cGDEntries * sizeof(uint32_t), 5672 6085 512); 6086 5673 6087 /* Footer marker. */ 5674 6088 memset(pMarker, '\0', sizeof(aMarker)); … … 5678 6092 uFileOffset, aMarker, sizeof(aMarker)); 5679 6093 AssertRC(rc); 6094 5680 6095 uFileOffset += 512; 5681 6096 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL); 5682 6097 AssertRC(rc); 6098 5683 6099 uFileOffset += 512; 5684 6100 /* End-of-stream marker. */ … … 5691 6107 else if (!fDelete && fFlush) 5692 6108 vmdkFlushImage(pImage, NULL); 6109 5693 6110 if (pImage->pExtents != NULL) 5694 6111 { … … 5712 6129 if (RT_SUCCESS(rc)) 5713 6130 rc = rc2; /* Propogate any error when closing the file. */ 6131 5714 6132 if (pImage->pGTCache) 5715 6133 { … … 5723 6141 } 5724 6142 } 6143 5725 6144 LogFlowFunc(("returns %Rrc\n", rc)); 5726 6145 return rc; 5727 6146 } 6147 5728 6148 /** 5729 6149 * Internal. Flush image data (and metadata) to disk. … … 5733 6153 PVMDKEXTENT pExtent; 5734 6154 int rc = VINF_SUCCESS; 6155 5735 6156 /* Update descriptor if changed. */ 5736 6157 if (pImage->Descriptor.fDirty) 5737 6158 rc = vmdkWriteDescriptor(pImage, pIoCtx); 6159 5738 6160 if (RT_SUCCESS(rc)) 5739 6161 { … … 5771 6193 } 5772 6194 } 6195 5773 6196 if (RT_FAILURE(rc)) 5774 6197 break; 6198 5775 6199 switch (pExtent->enmType) 5776 6200 { … … 5794 6218 } 5795 6219 } 6220 5796 6221 return rc; 5797 6222 } 6223 5798 6224 /** 5799 6225 * Internal. Find extent corresponding to the sector number in the disk. … … 5804 6230 PVMDKEXTENT pExtent = NULL; 5805 6231 int rc = VINF_SUCCESS; 6232 5806 6233 for (unsigned i = 0; i < pImage->cExtents; i++) 5807 6234 { … … 5814 6241 offSector -= pImage->pExtents[i].cNominalSectors; 5815 6242 } 6243 5816 6244 if (pExtent) 5817 6245 *ppExtent = pExtent; 5818 6246 else 5819 6247 rc = VERR_IO_SECTOR_NOT_FOUND; 6248 5820 6249 return rc; 5821 6250 } 6251 5822 6252 /** 5823 6253 * Internal. Hash function for placing the grain table hash entries. … … 5830 6260 return (uSector + uExtent) % pCache->cEntries; 5831 6261 } 6262 5832 6263 /** 5833 6264 * Internal. Get sector number in the extent file from the relative sector … … 5844 6275 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE]; 5845 6276 int rc; 6277 5846 6278 /* For newly created and readonly/sequentially opened streamOptimized 5847 6279 * images this must be a no-op, as the grain directory is not there. */ … … 5855 6287 return VINF_SUCCESS; 5856 6288 } 6289 5857 6290 uGDIndex = uSector / pExtent->cSectorsPerGDE; 5858 6291 if (uGDIndex >= pExtent->cGDEntries) … … 5866 6299 return VINF_SUCCESS; 5867 6300 } 6301 5868 6302 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); 5869 6303 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent); … … 5894 6328 return VINF_SUCCESS; 5895 6329 } 6330 5896 6331 /** 5897 6332 * Internal. Writes the grain and also if necessary the grain tables. … … 5908 6343 const void *pData; 5909 6344 int rc; 6345 5910 6346 /* Very strict requirements: always write at least one full grain, with 5911 6347 * proper alignment. Everything else would require reading of already … … 5920 6356 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors) 5921 6357 return VERR_INVALID_PARAMETER; 6358 5922 6359 /* Clip write range to at most the rest of the grain. */ 5923 6360 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain)); 6361 5924 6362 /* Do not allow to go back. */ 5925 6363 uGrain = uSector / pExtent->cSectorsPerGrain; … … 5930 6368 if (uGrain < pExtent->uLastGrainAccess) 5931 6369 return VERR_VD_VMDK_INVALID_WRITE; 6370 5932 6371 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need 5933 6372 * to allocate something, we also need to detect the situation ourself. */ … … 5935 6374 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */)) 5936 6375 return VINF_SUCCESS; 6376 5937 6377 if (uGDEntry != uLastGDEntry) 5938 6378 { … … 5948 6388 } 5949 6389 } 6390 5950 6391 uint64_t uFileOffset; 5951 6392 uFileOffset = pExtent->uAppendPosition; … … 5954 6395 /* Align to sector, as the previous write could have been any size. */ 5955 6396 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6397 5956 6398 /* Paranoia check: extent type, grain table buffer presence and 5957 6399 * grain table buffer space. Also grain table entry must be clear. */ … … 5961 6403 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry]) 5962 6404 return VERR_INTERNAL_ERROR; 6405 5963 6406 /* Update grain table entry. */ 5964 6407 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset); 6408 5965 6409 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 5966 6410 { … … 5975 6419 unsigned cSegments = 1; 5976 6420 size_t cbSeg = 0; 6421 5977 6422 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 5978 6423 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); … … 5991 6436 pExtent->uLastGrainAccess = uGrain; 5992 6437 pExtent->uAppendPosition += cbGrain; 6438 5993 6439 return rc; 5994 6440 } 6441 5995 6442 /** 5996 6443 * Internal: Updates the grain table during grain allocation. … … 6006 6453 uint64_t uSector = pGrainAlloc->uSector; 6007 6454 PVMDKGTCACHEENTRY pGTCacheEntry; 6455 6008 6456 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n", 6009 6457 pImage, pExtent, pCache, pIoCtx, pGrainAlloc)); 6458 6010 6459 uGTSector = pGrainAlloc->uGTSector; 6011 6460 uRGTSector = pGrainAlloc->uRGTSector; 6012 6461 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6462 6013 6463 /* Update the grain table (and the cache). */ 6014 6464 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); … … 6073 6523 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname); 6074 6524 } 6525 6075 6526 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6076 6527 return rc; 6077 6528 } 6529 6078 6530 /** 6079 6531 * Internal - complete the grain allocation by updating disk grain table if required. … … 6085 6537 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6086 6538 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser; 6539 6087 6540 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n", 6088 6541 pBackendData, pIoCtx, pvUser, rcReq)); 6542 6089 6543 pGrainAlloc->cIoXfersPending--; 6090 6544 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded) 6091 6545 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc); 6546 6092 6547 if (!pGrainAlloc->cIoXfersPending) 6093 6548 { … … 6095 6550 RTMemFree(pGrainAlloc); 6096 6551 } 6552 6097 6553 LogFlowFunc(("Leaving rc=%Rrc\n", rc)); 6098 6554 return rc; 6099 6555 } 6556 6100 6557 /** 6101 6558 * Internal. Allocates a new grain table (if necessary). … … 6109 6566 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL; 6110 6567 int rc; 6568 6111 6569 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n", 6112 6570 pCache, pExtent, pIoCtx, uSector, cbWrite)); 6571 6113 6572 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC)); 6114 6573 if (!pGrainAlloc) 6115 6574 return VERR_NO_MEMORY; 6575 6116 6576 pGrainAlloc->pExtent = pExtent; 6117 6577 pGrainAlloc->uSector = uSector; 6578 6118 6579 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6119 6580 if (uGDIndex >= pExtent->cGDEntries) … … 6130 6591 { 6131 6592 LogFlow(("Allocating new grain table\n")); 6593 6132 6594 /* There is no grain table referenced by this grain directory 6133 6595 * entry. So there is absolutely no data in this area. Allocate … … 6140 6602 } 6141 6603 Assert(!(uFileOffset % 512)); 6604 6142 6605 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6143 6606 uGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6607 6144 6608 /* Normally the grain table is preallocated for hosted sparse extents 6145 6609 * that support more than 32 bit sector numbers. So this shouldn't … … 6150 6614 return VERR_VD_VMDK_INVALID_HEADER; 6151 6615 } 6616 6152 6617 /* Write grain table by writing the required number of grain table 6153 6618 * cache chunks. Allocate memory dynamically here or we flood the … … 6155 6620 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t); 6156 6621 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp); 6622 6157 6623 if (!paGTDataTmp) 6158 6624 { … … 6160 6626 return VERR_NO_MEMORY; 6161 6627 } 6628 6162 6629 memset(paGTDataTmp, '\0', cbGTDataTmp); 6163 6630 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, … … 6175 6642 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition 6176 6643 + cbGTDataTmp, 512); 6644 6177 6645 if (pExtent->pRGD) 6178 6646 { … … 6183 6651 Assert(!(uFileOffset % 512)); 6184 6652 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6653 6185 6654 /* Normally the redundant grain table is preallocated for hosted 6186 6655 * sparse extents that support more than 32 bit sector numbers. So … … 6191 6660 return VERR_VD_VMDK_INVALID_HEADER; 6192 6661 } 6662 6193 6663 /* Write grain table by writing the required number of grain table 6194 6664 * cache chunks. Allocate memory dynamically here or we flood the … … 6205 6675 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname); 6206 6676 } 6677 6207 6678 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp; 6208 6679 } 6680 6209 6681 RTMemTmpFree(paGTDataTmp); 6682 6210 6683 /* Update the grain directory on disk (doing it before writing the 6211 6684 * grain table will result in a garbled extent if the operation is … … 6233 6706 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname); 6234 6707 } 6708 6235 6709 /* As the final step update the in-memory copy of the GDs. */ 6236 6710 pExtent->pGD[uGDIndex] = uGTSector; … … 6238 6712 pExtent->pRGD[uGDIndex] = uRGTSector; 6239 6713 } 6714 6240 6715 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6241 6716 pGrainAlloc->uGTSector = uGTSector; 6242 6717 pGrainAlloc->uRGTSector = uRGTSector; 6718 6243 6719 uFileOffset = pExtent->uAppendPosition; 6244 6720 if (!uFileOffset) 6245 6721 return VERR_INTERNAL_ERROR; 6246 6722 Assert(!(uFileOffset % 512)); 6723 6247 6724 pGrainAlloc->uGrainOffset = uFileOffset; 6725 6248 6726 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6249 6727 { … … 6251 6729 ("Accesses to stream optimized images must be synchronous\n"), 6252 6730 VERR_INVALID_STATE); 6731 6253 6732 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6254 6733 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname); 6734 6255 6735 /* Invalidate cache, just in case some code incorrectly allows mixing 6256 6736 * of reads and writes. Normally shouldn't be needed. */ 6257 6737 pExtent->uGrainSectorAbs = 0; 6738 6258 6739 /* Write compressed data block and the markers. */ 6259 6740 uint32_t cbGrain = 0; … … 6261 6742 RTSGSEG Segment; 6262 6743 unsigned cSegments = 1; 6744 6263 6745 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6264 6746 &cSegments, cbWrite); 6265 6747 Assert(cbSeg == cbWrite); 6748 6266 6749 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, 6267 6750 Segment.pvSeg, cbWrite, uSector, &cbGrain); … … 6284 6767 else if (RT_FAILURE(rc)) 6285 6768 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname); 6769 6286 6770 pExtent->uAppendPosition += cbWrite; 6287 6771 } 6772 6288 6773 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc); 6774 6289 6775 if (!pGrainAlloc->cIoXfersPending) 6290 6776 { … … 6292 6778 RTMemFree(pGrainAlloc); 6293 6779 } 6780 6294 6781 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6782 6295 6783 return rc; 6296 6784 } 6785 6297 6786 /** 6298 6787 * Internal. Reads the contents by sequentially going over the compressed … … 6304 6793 { 6305 6794 int rc; 6795 6306 6796 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n", 6307 6797 pImage, pExtent, uSector, pIoCtx, cbRead)); 6798 6308 6799 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 6309 6800 ("Async I/O not supported for sequential stream optimized images\n"), 6310 6801 VERR_INVALID_STATE); 6802 6311 6803 /* Do not allow to go back. */ 6312 6804 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain; … … 6314 6806 return VERR_VD_VMDK_INVALID_STATE; 6315 6807 pExtent->uLastGrainAccess = uGrain; 6808 6316 6809 /* After a previous error do not attempt to recover, as it would need 6317 6810 * seeking (in the general case backwards which is forbidden). */ 6318 6811 if (!pExtent->uGrainSectorAbs) 6319 6812 return VERR_VD_VMDK_INVALID_STATE; 6813 6320 6814 /* Check if we need to read something from the image or if what we have 6321 6815 * in the buffer is good to fulfill the request. */ … … 6324 6818 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs 6325 6819 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead); 6820 6326 6821 /* Get the marker from the next data block - and skip everything which 6327 6822 * is not a compressed grain. If it's a compressed grain which is for … … 6338 6833 Marker.uSector = RT_LE2H_U64(Marker.uSector); 6339 6834 Marker.cbSize = RT_LE2H_U32(Marker.cbSize); 6835 6340 6836 if (Marker.cbSize == 0) 6341 6837 { … … 6416 6912 } 6417 6913 } while (Marker.uType != VMDK_MARKER_EOS); 6914 6418 6915 pExtent->uGrainSectorAbs = uGrainSectorAbs; 6916 6419 6917 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS) 6420 6918 { … … 6425 6923 } 6426 6924 } 6925 6427 6926 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain) 6428 6927 { … … 6432 6931 return VERR_VD_BLOCK_FREE; 6433 6932 } 6933 6434 6934 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain; 6435 6935 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, … … 6439 6939 return VINF_SUCCESS; 6440 6940 } 6941 6441 6942 /** 6442 6943 * Replaces a fragment of a string with the specified string. … … 6507 7008 return pszNewStr; 6508 7009 } 7010 7011 6509 7012 /** @copydoc VDIMAGEBACKEND::pfnProbe */ 6510 7013 static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk, … … 6534 7037 vmdkFreeImage(pImage, false, false /*fFlush*/); 6535 7038 RTMemFree(pImage); 7039 6536 7040 if (RT_SUCCESS(rc)) 6537 7041 *penmType = VDTYPE_HDD; … … 6539 7043 else 6540 7044 rc = VERR_NO_MEMORY; 7045 6541 7046 LogFlowFunc(("returns %Rrc\n", rc)); 6542 7047 return rc; 6543 7048 } 7049 6544 7050 /** @copydoc VDIMAGEBACKEND::pfnOpen */ 6545 7051 static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags, … … 6548 7054 { 6549 7055 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */ 7056 6550 7057 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", 6551 7058 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 6552 7059 int rc; 7060 6553 7061 /* Check open flags. All valid flags are supported. */ 6554 7062 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 6555 7063 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER); 6556 7064 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 7065 6557 7066 6558 7067 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); … … 6567 7076 pImage->pVDIfsDisk = pVDIfsDisk; 6568 7077 pImage->pVDIfsImage = pVDIfsImage; 7078 6569 7079 rc = vmdkOpenImage(pImage, uOpenFlags); 6570 7080 if (RT_SUCCESS(rc)) … … 6575 7085 else 6576 7086 rc = VERR_NO_MEMORY; 7087 6577 7088 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6578 7089 return rc; 6579 7090 } 7091 6580 7092 /** @copydoc VDIMAGEBACKEND::pfnCreate */ 6581 7093 static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize, … … 6591 7103 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); 6592 7104 int rc; 7105 6593 7106 /* Check the VD container type and image flags. */ 6594 7107 if ( enmType != VDTYPE_HDD 6595 7108 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 6596 7109 return VERR_VD_INVALID_TYPE; 7110 6597 7111 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */ 6598 7112 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) … … 6600 7114 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))) 6601 7115 return VERR_VD_INVALID_SIZE; 7116 6602 7117 /* Check image flags for invalid combinations. */ 6603 7118 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6604 7119 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))) 6605 7120 return VERR_INVALID_PARAMETER; 7121 6606 7122 /* Check open flags. All valid flags are supported. */ 6607 7123 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); … … 6613 7129 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)), 6614 7130 VERR_INVALID_PARAMETER); 7131 6615 7132 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); 6616 7133 if (RT_LIKELY(pImage)) 6617 7134 { 6618 7135 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 7136 6619 7137 pImage->pszFilename = pszFilename; 6620 7138 pImage->pFile = NULL; … … 6647 7165 rc = vmdkOpenImage(pImage, uOpenFlags); 6648 7166 } 7167 6649 7168 if (RT_SUCCESS(rc)) 6650 7169 *ppBackendData = pImage; 6651 7170 } 7171 6652 7172 if (RT_FAILURE(rc)) 6653 7173 RTMemFree(pImage->pDescData); … … 6655 7175 else 6656 7176 rc = VERR_NO_MEMORY; 7177 6657 7178 if (RT_FAILURE(rc)) 6658 7179 RTMemFree(pImage); … … 6660 7181 else 6661 7182 rc = VERR_NO_MEMORY; 7183 6662 7184 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6663 7185 return rc; 6664 7186 } 7187 6665 7188 /** 6666 7189 * Prepares the state for renaming a VMDK image, setting up the state and allocating … … 6675 7198 { 6676 7199 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER); 7200 6677 7201 int rc = VINF_SUCCESS; 7202 6678 7203 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy)); 7204 6679 7205 /* 6680 7206 * Allocate an array to store both old and new names of renamed files … … 6702 7228 pRenameState->fEmbeddedDesc = true; 6703 7229 } 7230 6704 7231 /* Save the descriptor content. */ 6705 7232 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines; … … 6713 7240 } 6714 7241 } 7242 6715 7243 if (RT_SUCCESS(rc)) 6716 7244 { … … 6719 7247 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY); 6720 7248 RTPathStripSuffix(pRenameState->pszNewBaseName); 7249 6721 7250 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename)); 6722 7251 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY); 6723 7252 RTPathStripSuffix(pRenameState->pszOldBaseName); 7253 6724 7254 /* Prepare both old and new full names used for string replacement. 6725 7255 Note! Must abspath the stuff here, so the strstr weirdness later in … … 6729 7259 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY); 6730 7260 RTPathStripSuffix(pRenameState->pszNewFullName); 7261 6731 7262 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename); 6732 7263 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY); 6733 7264 RTPathStripSuffix(pRenameState->pszOldFullName); 7265 6734 7266 /* Save the old name for easy access to the old descriptor file. */ 6735 7267 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename); 6736 7268 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY); 7269 6737 7270 /* Save old image name. */ 6738 7271 pRenameState->pszOldImageName = pImage->pszFilename; … … 6741 7274 else 6742 7275 rc = VERR_NO_TMP_MEMORY; 7276 6743 7277 return rc; 6744 7278 } 7279 6745 7280 /** 6746 7281 * Destroys the given rename state, freeing all allocated memory. … … 6786 7321 RTStrFree(pRenameState->pszNewFullName); 6787 7322 } 7323 6788 7324 /** 6789 7325 * Rolls back the rename operation to the original state. … … 6796 7332 { 6797 7333 int rc = VINF_SUCCESS; 7334 6798 7335 if (!pRenameState->fImageFreed) 6799 7336 { … … 6804 7341 vmdkFreeImage(pImage, false, true /*fFlush*/); 6805 7342 } 7343 6806 7344 /* Rename files back. */ 6807 7345 for (unsigned i = 0; i <= pRenameState->cExtents; i++) … … 6842 7380 pImage->pszFilename = pRenameState->pszOldImageName; 6843 7381 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); 7382 6844 7383 return rc; 6845 7384 } 7385 6846 7386 /** 6847 7387 * Rename worker doing the real work. … … 6856 7396 int rc = VINF_SUCCESS; 6857 7397 unsigned i, line; 7398 6858 7399 /* Update the descriptor with modified extent names. */ 6859 7400 for (i = 0, line = pImage->Descriptor.uFirstExtent; … … 6872 7413 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i]; 6873 7414 } 7415 6874 7416 if (RT_SUCCESS(rc)) 6875 7417 { … … 6878 7420 /* Flush the descriptor now, in case it is embedded. */ 6879 7421 vmdkFlushImage(pImage, NULL); 7422 6880 7423 /* Close and rename/move extents. */ 6881 7424 for (i = 0; i < pRenameState->cExtents; i++) … … 6895 7438 if (RT_FAILURE(rc)) 6896 7439 break;; 7440 6897 7441 /* Rename the extent file. */ 6898 7442 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0); … … 6902 7446 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname); 6903 7447 } 7448 6904 7449 if (RT_SUCCESS(rc)) 6905 7450 { … … 6909 7454 { 6910 7455 pRenameState->fImageFreed = true; 7456 6911 7457 /* Last elements of new/old name arrays are intended for 6912 7458 * storing descriptor's names. … … 6923 7469 } 6924 7470 } 7471 6925 7472 /* Update pImage with the new information. */ 6926 7473 pImage->pszFilename = pszFilename; 7474 6927 7475 /* Open the new image. */ 6928 7476 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); … … 6930 7478 } 6931 7479 } 7480 6932 7481 return rc; 6933 7482 } 7483 6934 7484 /** @copydoc VDIMAGEBACKEND::pfnRename */ 6935 7485 static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename) 6936 7486 { 6937 7487 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename)); 7488 6938 7489 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6939 7490 VMDKRENAMESTATE RenameState; 7491 6940 7492 memset(&RenameState, 0, sizeof(RenameState)); 7493 6941 7494 /* Check arguments. */ 6942 7495 AssertPtrReturn(pImage, VERR_INVALID_POINTER); … … 6944 7497 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER); 6945 7498 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER); 7499 6946 7500 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename); 6947 7501 if (RT_SUCCESS(rc)) 6948 7502 { 6949 7503 /* --- Up to this point we have not done any damage yet. --- */ 7504 6950 7505 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename); 6951 7506 /* Roll back all changes in case of failure. */ … … 6956 7511 } 6957 7512 } 7513 6958 7514 vmdkRenameStateDestroy(&RenameState); 6959 7515 LogFlowFunc(("returns %Rrc\n", rc)); 6960 7516 return rc; 6961 7517 } 7518 6962 7519 /** @copydoc VDIMAGEBACKEND::pfnClose */ 6963 7520 static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete) … … 6965 7522 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete)); 6966 7523 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7524 6967 7525 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/); 6968 7526 RTMemFree(pImage); 7527 6969 7528 LogFlowFunc(("returns %Rrc\n", rc)); 6970 7529 return rc; 6971 7530 } 7531 6972 7532 /** @copydoc VDIMAGEBACKEND::pfnRead */ 6973 7533 static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead, … … 6977 7537 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead)); 6978 7538 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7539 6979 7540 AssertPtr(pImage); 6980 7541 Assert(uOffset % 512 == 0); … … 6983 7544 AssertReturn(cbToRead, VERR_INVALID_PARAMETER); 6984 7545 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER); 7546 6985 7547 /* Find the extent and check access permissions as defined in the extent descriptor. */ 6986 7548 PVMDKEXTENT pExtent; … … 6993 7555 /* Clip read range to remain in this extent. */ 6994 7556 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 7557 6995 7558 /* Handle the read according to the current extent type. */ 6996 7559 switch (pExtent->enmType) … … 6999 7562 { 7000 7563 uint64_t uSectorExtentAbs; 7564 7001 7565 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 7002 7566 if (RT_FAILURE(rc)) … … 7022 7586 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 7023 7587 ("Async I/O is not supported for stream optimized VMDK's\n")); 7588 7024 7589 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 7025 7590 uSectorExtentAbs -= uSectorInGrain; … … 7062 7627 { 7063 7628 size_t cbSet; 7629 7064 7630 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 7065 7631 Assert(cbSet == cbToRead); … … 7072 7638 else if (RT_SUCCESS(rc)) 7073 7639 rc = VERR_VD_VMDK_INVALID_STATE; 7640 7074 7641 LogFlowFunc(("returns %Rrc\n", rc)); 7075 7642 return rc; 7076 7643 } 7644 7077 7645 /** @copydoc VDIMAGEBACKEND::pfnWrite */ 7078 7646 static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite, … … 7084 7652 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7085 7653 int rc; 7654 7086 7655 AssertPtr(pImage); 7087 7656 Assert(uOffset % 512 == 0); … … 7089 7658 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER); 7090 7659 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER); 7660 7091 7661 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7092 7662 { … … 7094 7664 uint64_t uSectorExtentRel; 7095 7665 uint64_t uSectorExtentAbs; 7666 7096 7667 /* No size check here, will do that later when the extent is located. 7097 7668 * There are sparse images out there which according to the spec are … … 7100 7671 * grain boundaries, and with the nominal size not being a multiple of the 7101 7672 * grain size), this would prevent writing to the last grain. */ 7673 7102 7674 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 7103 7675 &pExtent, &uSectorExtentRel); … … 7197 7769 } 7198 7770 } 7771 7199 7772 if (pcbWriteProcess) 7200 7773 *pcbWriteProcess = cbToWrite; … … 7203 7776 else 7204 7777 rc = VERR_VD_IMAGE_READ_ONLY; 7778 7205 7779 LogFlowFunc(("returns %Rrc\n", rc)); 7206 7780 return rc; 7207 7781 } 7782 7208 7783 /** @copydoc VDIMAGEBACKEND::pfnFlush */ 7209 7784 static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx) 7210 7785 { 7211 7786 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7787 7212 7788 return vmdkFlushImage(pImage, pIoCtx); 7213 7789 } 7790 7214 7791 /** @copydoc VDIMAGEBACKEND::pfnGetVersion */ 7215 7792 static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData) … … 7217 7794 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7218 7795 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7796 7219 7797 AssertPtrReturn(pImage, 0); 7798 7220 7799 return VMDK_IMAGE_VERSION; 7221 7800 } 7801 7222 7802 /** @copydoc VDIMAGEBACKEND::pfnGetFileSize */ 7223 7803 static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData) … … 7226 7806 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7227 7807 uint64_t cb = 0; 7808 7228 7809 AssertPtrReturn(pImage, 0); 7810 7229 7811 if (pImage->pFile != NULL) 7230 7812 { … … 7244 7826 } 7245 7827 } 7828 7246 7829 LogFlowFunc(("returns %lld\n", cb)); 7247 7830 return cb; 7248 7831 } 7832 7249 7833 /** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */ 7250 7834 static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry) … … 7253 7837 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7254 7838 int rc = VINF_SUCCESS; 7839 7255 7840 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7841 7256 7842 if (pImage->PCHSGeometry.cCylinders) 7257 7843 *pPCHSGeometry = pImage->PCHSGeometry; 7258 7844 else 7259 7845 rc = VERR_VD_GEOMETRY_NOT_SET; 7846 7260 7847 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 7261 7848 return rc; 7262 7849 } 7850 7263 7851 /** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */ 7264 7852 static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry) … … 7268 7856 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7269 7857 int rc = VINF_SUCCESS; 7858 7270 7859 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7860 7271 7861 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7272 7862 { … … 7282 7872 else 7283 7873 rc = VERR_VD_IMAGE_READ_ONLY; 7874 7284 7875 LogFlowFunc(("returns %Rrc\n", rc)); 7285 7876 return rc; 7286 7877 } 7878 7287 7879 /** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */ 7288 7880 static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry) … … 7291 7883 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7292 7884 int rc = VINF_SUCCESS; 7885 7293 7886 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7887 7294 7888 if (pImage->LCHSGeometry.cCylinders) 7295 7889 *pLCHSGeometry = pImage->LCHSGeometry; 7296 7890 else 7297 7891 rc = VERR_VD_GEOMETRY_NOT_SET; 7892 7298 7893 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 7299 7894 return rc; 7300 7895 } 7896 7301 7897 /** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */ 7302 7898 static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry) … … 7306 7902 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7307 7903 int rc = VINF_SUCCESS; 7904 7308 7905 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7906 7309 7907 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7310 7908 { … … 7320 7918 else 7321 7919 rc = VERR_VD_IMAGE_READ_ONLY; 7920 7322 7921 LogFlowFunc(("returns %Rrc\n", rc)); 7323 7922 return rc; 7324 7923 } 7924 7325 7925 /** @copydoc VDIMAGEBACKEND::pfnQueryRegions */ 7326 7926 static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList) … … 7328 7928 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList)); 7329 7929 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7930 7330 7931 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED); 7932 7331 7933 *ppRegionList = &pThis->RegionList; 7332 7934 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS)); 7333 7935 return VINF_SUCCESS; 7334 7936 } 7937 7335 7938 /** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */ 7336 7939 static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList) … … 7340 7943 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7341 7944 AssertPtr(pThis); RT_NOREF(pThis); 7945 7342 7946 /* Nothing to do here. */ 7343 7947 } 7948 7344 7949 /** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */ 7345 7950 static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData) … … 7347 7952 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7348 7953 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7954 7349 7955 AssertPtrReturn(pImage, 0); 7956 7350 7957 LogFlowFunc(("returns %#x\n", pImage->uImageFlags)); 7351 7958 return pImage->uImageFlags; 7352 7959 } 7960 7353 7961 /** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */ 7354 7962 static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData) … … 7356 7964 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7357 7965 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7966 7358 7967 AssertPtrReturn(pImage, 0); 7968 7359 7969 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags)); 7360 7970 return pImage->uOpenFlags; 7361 7971 } 7972 7362 7973 /** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */ 7363 7974 static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags) … … 7366 7977 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7367 7978 int rc; 7979 7368 7980 /* Image must be opened and the new flags must be valid. */ 7369 7981 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO … … 7388 8000 } 7389 8001 } 8002 7390 8003 LogFlowFunc(("returns %Rrc\n", rc)); 7391 8004 return rc; 7392 8005 } 8006 7393 8007 /** @copydoc VDIMAGEBACKEND::pfnGetComment */ 7394 8008 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment) … … 7396 8010 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment)); 7397 8011 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8012 7398 8013 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8014 7399 8015 char *pszCommentEncoded = NULL; 7400 8016 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor, … … 7405 8021 rc = VINF_SUCCESS; 7406 8022 } 8023 7407 8024 if (RT_SUCCESS(rc)) 7408 8025 { … … 7411 8028 else if (pszComment) 7412 8029 *pszComment = '\0'; 8030 7413 8031 if (pszCommentEncoded) 7414 8032 RTMemTmpFree(pszCommentEncoded); 7415 8033 } 8034 7416 8035 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment)); 7417 8036 return rc; 7418 8037 } 8038 7419 8039 /** @copydoc VDIMAGEBACKEND::pfnSetComment */ 7420 8040 static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment) … … 7423 8043 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7424 8044 int rc; 8045 7425 8046 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8047 7426 8048 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7427 8049 { … … 7433 8055 else 7434 8056 rc = VERR_VD_IMAGE_READ_ONLY; 8057 7435 8058 LogFlowFunc(("returns %Rrc\n", rc)); 7436 8059 return rc; 7437 8060 } 8061 7438 8062 /** @copydoc VDIMAGEBACKEND::pfnGetUuid */ 7439 8063 static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid) … … 7441 8065 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7442 8066 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8067 7443 8068 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8069 7444 8070 *pUuid = pImage->ImageUuid; 8071 7445 8072 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7446 8073 return VINF_SUCCESS; 7447 8074 } 8075 7448 8076 /** @copydoc VDIMAGEBACKEND::pfnSetUuid */ 7449 8077 static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid) … … 7452 8080 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7453 8081 int rc = VINF_SUCCESS; 8082 7454 8083 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8084 7455 8085 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7456 8086 { … … 7469 8099 else 7470 8100 rc = VERR_VD_IMAGE_READ_ONLY; 8101 7471 8102 LogFlowFunc(("returns %Rrc\n", rc)); 7472 8103 return rc; 7473 8104 } 8105 7474 8106 /** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */ 7475 8107 static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7477 8109 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7478 8110 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8111 7479 8112 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8113 7480 8114 *pUuid = pImage->ModificationUuid; 8115 7481 8116 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7482 8117 return VINF_SUCCESS; 7483 8118 } 8119 7484 8120 /** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */ 7485 8121 static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7488 8124 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7489 8125 int rc = VINF_SUCCESS; 8126 7490 8127 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8128 7491 8129 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7492 8130 { … … 7508 8146 else 7509 8147 rc = VERR_VD_IMAGE_READ_ONLY; 8148 7510 8149 LogFlowFunc(("returns %Rrc\n", rc)); 7511 8150 return rc; 7512 8151 } 8152 7513 8153 /** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */ 7514 8154 static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid) … … 7516 8156 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7517 8157 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8158 7518 8159 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8160 7519 8161 *pUuid = pImage->ParentUuid; 8162 7520 8163 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7521 8164 return VINF_SUCCESS; 7522 8165 } 8166 7523 8167 /** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */ 7524 8168 static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid) … … 7527 8171 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7528 8172 int rc = VINF_SUCCESS; 8173 7529 8174 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8175 7530 8176 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7531 8177 { … … 7544 8190 else 7545 8191 rc = VERR_VD_IMAGE_READ_ONLY; 8192 7546 8193 LogFlowFunc(("returns %Rrc\n", rc)); 7547 8194 return rc; 7548 8195 } 8196 7549 8197 /** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */ 7550 8198 static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7552 8200 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7553 8201 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8202 7554 8203 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8204 7555 8205 *pUuid = pImage->ParentModificationUuid; 8206 7556 8207 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7557 8208 return VINF_SUCCESS; 7558 8209 } 8210 7559 8211 /** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */ 7560 8212 static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7563 8215 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7564 8216 int rc = VINF_SUCCESS; 8217 7565 8218 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 8219 7566 8220 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7567 8221 { … … 7579 8233 else 7580 8234 rc = VERR_VD_IMAGE_READ_ONLY; 8235 7581 8236 LogFlowFunc(("returns %Rrc\n", rc)); 7582 8237 return rc; 7583 8238 } 8239 7584 8240 /** @copydoc VDIMAGEBACKEND::pfnDump */ 7585 8241 static DECLCALLBACK(void) vmdkDump(void *pBackendData) 7586 8242 { 7587 8243 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8244 7588 8245 AssertPtrReturnVoid(pImage); 7589 8246 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", … … 7597 8254 } 7598 8255 7599 /** 7600 * Returns the size, in bytes, of the sparse extent overhead for 7601 * the number of desired total sectors and based on the current 7602 * sectors of the extent. 7603 * 7604 * @returns uint64_t size of new overhead in bytes. 7605 * @param pExtent VMDK extent instance. 7606 * @param cSectorsNew Number of desired total sectors. 7607 */ 7608 static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew) 7609 { 7610 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE; 7611 if (cSectorsNew % pExtent->cSectorsPerGDE) 7612 cNewDirEntries++; 7613 7614 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t); 7615 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512); 7616 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512); 7617 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector 7618 + pExtent->cDescriptorSectors, 1) 7619 + cbNewDirSize + cbNewAllTablesSize, 512); 7620 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize; 7621 cbNewOverhead = RT_ALIGN_64(cbNewOverhead, 7622 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); 7623 7624 return cbNewOverhead; 7625 } 7626 7627 /** 7628 * Internal: Replaces the size (in sectors) of an extent in the descriptor file. 7629 * 7630 * @returns VBox status code. 7631 * @param pImage VMDK image instance. 7632 * @param uLine Line number of descriptor to change. 7633 * @param cSectorsOld Existing number of sectors. 7634 * @param cSectorsNew New number of sectors. 7635 */ 7636 static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld, 7637 uint64_t cSectorsNew) 7638 { 7639 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE]; 7640 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE]; 7641 7642 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld); 7643 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors)) 8256 static int vmdkRepaceExtentSize(PVMDKIMAGE pImage, unsigned line, uint64_t cSectorsOld, 8257 uint64_t cSectorsNew) 8258 { 8259 char * szOldExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE); 8260 if (!szOldExtentSectors) 8261 return VERR_NO_MEMORY; 8262 8263 int cbWritten = RTStrPrintf2(szOldExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsOld); 8264 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE) 8265 { 8266 RTMemFree(szOldExtentSectors); 8267 szOldExtentSectors = NULL; 8268 7644 8269 return VERR_BUFFER_OVERFLOW; 7645 7646 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew); 7647 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors)) 8270 } 8271 8272 char * szNewExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE); 8273 if (!szNewExtentSectors) 8274 return VERR_NO_MEMORY; 8275 8276 cbWritten = RTStrPrintf2(szNewExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsNew); 8277 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE) 8278 { 8279 RTMemFree(szOldExtentSectors); 8280 szOldExtentSectors = NULL; 8281 8282 RTMemFree(szNewExtentSectors); 8283 szNewExtentSectors = NULL; 8284 7648 8285 return VERR_BUFFER_OVERFLOW; 7649 7650 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine], 8286 } 8287 8288 char * szNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[line], 7651 8289 szOldExtentSectors, 7652 8290 szNewExtentSectors); 7653 8291 7654 if (RT_UNLIKELY(!pszNewExtentLine)) 8292 RTMemFree(szOldExtentSectors); 8293 szOldExtentSectors = NULL; 8294 8295 RTMemFree(szNewExtentSectors); 8296 szNewExtentSectors = NULL; 8297 8298 if (!szNewExtentLine) 7655 8299 return VERR_INVALID_PARAMETER; 7656 8300 7657 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine); 7658 vmdkDescExtInsert(pImage, &pImage->Descriptor, 7659 pExtent->enmAccess, cSectorsNew, 7660 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset); 7661 7662 RTStrFree(pszNewExtentLine); 7663 pszNewExtentLine = NULL; 7664 7665 pImage->Descriptor.fDirty = true; 8301 pImage->Descriptor.aLines[line] = szNewExtentLine; 7666 8302 7667 8303 return VINF_SUCCESS; 7668 }7669 7670 /**7671 * Moves sectors down to make room for new overhead.7672 * Used for sparse extent resize.7673 *7674 * @returns VBox status code.7675 * @param pImage VMDK image instance.7676 * @param pExtent VMDK extent instance.7677 * @param cSectorsNew Number of sectors after resize.7678 */7679 static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,7680 uint64_t cSectorsNew)7681 {7682 int rc = VINF_SUCCESS;7683 7684 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);7685 7686 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);7687 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;7688 7689 uint64_t cbFile = 0;7690 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);7691 7692 uint64_t uNewAppendPosition;7693 7694 /* Calculate how many sectors need to be relocated. */7695 unsigned cSectorsReloc = cOverheadSectorDiff;7696 if (cbNewOverhead % VMDK_SECTOR_SIZE)7697 cSectorsReloc++;7698 7699 if (cSectorsReloc < pExtent->cSectors)7700 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512);7701 else7702 uNewAppendPosition = cbFile;7703 7704 /*7705 * Get the blocks we need to relocate first, they are appended to the end7706 * of the image.7707 */7708 void *pvBuf = NULL, *pvZero = NULL;7709 do7710 {7711 /* Allocate data buffer. */7712 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));7713 if (!pvBuf)7714 {7715 rc = VERR_NO_MEMORY;7716 break;7717 }7718 7719 /* Allocate buffer for overwriting with zeroes. */7720 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));7721 if (!pvZero)7722 {7723 RTMemFree(pvBuf);7724 pvBuf = NULL;7725 7726 rc = VERR_NO_MEMORY;7727 break;7728 }7729 7730 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);7731 if(!aGTDataTmp)7732 {7733 RTMemFree(pvBuf);7734 pvBuf = NULL;7735 7736 RTMemFree(pvZero);7737 pvZero = NULL;7738 7739 rc = VERR_NO_MEMORY;7740 break;7741 }7742 7743 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);7744 if(!aRGTDataTmp)7745 {7746 RTMemFree(pvBuf);7747 pvBuf = NULL;7748 7749 RTMemFree(pvZero);7750 pvZero = NULL;7751 7752 RTMemFree(aGTDataTmp);7753 aGTDataTmp = NULL;7754 7755 rc = VERR_NO_MEMORY;7756 break;7757 }7758 7759 /* Search for overlap sector in the grain table. */7760 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++)7761 {7762 uint64_t uGTSector = pExtent->pGD[idxGD];7763 uint64_t uRGTSector = pExtent->pRGD[idxGD];7764 7765 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7766 VMDK_SECTOR2BYTE(uGTSector),7767 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);7768 7769 if (RT_FAILURE(rc))7770 break;7771 7772 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7773 VMDK_SECTOR2BYTE(uRGTSector),7774 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);7775 7776 if (RT_FAILURE(rc))7777 break;7778 7779 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++)7780 {7781 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]);7782 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]);7783 7784 /**7785 * Check if grain table is valid. If not dump out with an error.7786 * Shoudln't ever get here (given other checks) but good sanity check.7787 */7788 if (aGTEntryLE != aRGTEntryLE)7789 {7790 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,7791 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname);7792 break;7793 }7794 7795 if (aGTEntryLE < cNewOverheadSectors7796 && aGTEntryLE != 0)7797 {7798 /* Read data and append grain to the end of the image. */7799 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7800 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf,7801 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));7802 if (RT_FAILURE(rc))7803 break;7804 7805 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7806 uNewAppendPosition, pvBuf,7807 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));7808 if (RT_FAILURE(rc))7809 break;7810 7811 /* Zero out the old block area. */7812 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7813 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero,7814 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));7815 if (RT_FAILURE(rc))7816 break;7817 7818 /* Write updated grain tables to file */7819 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);7820 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);7821 7822 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries))7823 {7824 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,7825 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);7826 break;7827 }7828 7829 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7830 VMDK_SECTOR2BYTE(uGTSector),7831 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);7832 7833 if (RT_FAILURE(rc))7834 break;7835 7836 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7837 VMDK_SECTOR2BYTE(uRGTSector),7838 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);7839 7840 break;7841 }7842 }7843 }7844 7845 RTMemFree(aGTDataTmp);7846 aGTDataTmp = NULL;7847 7848 RTMemFree(aRGTDataTmp);7849 aRGTDataTmp = NULL;7850 7851 if (RT_FAILURE(rc))7852 break;7853 7854 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain);7855 } while (0);7856 7857 if (pvBuf)7858 {7859 RTMemFree(pvBuf);7860 pvBuf = NULL;7861 }7862 7863 if (pvZero)7864 {7865 RTMemFree(pvZero);7866 pvZero = NULL;7867 }7868 7869 // Update append position for extent7870 pExtent->uAppendPosition = uNewAppendPosition;7871 7872 return rc;7873 }7874 7875 /**7876 * Resizes meta/overhead for sparse extent resize.7877 *7878 * @returns VBox status code.7879 * @param pImage VMDK image instance.7880 * @param pExtent VMDK extent instance.7881 * @param cSectorsNew Number of sectors after resize.7882 */7883 static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,7884 uint64_t cSectorsNew)7885 {7886 int rc = VINF_SUCCESS;7887 uint32_t cOldGDEntries = pExtent->cGDEntries;7888 7889 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;7890 if (cSectorsNew % pExtent->cSectorsPerGDE)7891 cNewDirEntries++;7892 7893 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);7894 7895 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);7896 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512);7897 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize);7898 7899 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);7900 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512);7901 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize);7902 7903 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);7904 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);7905 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;7906 7907 /*7908 * Get the blocks we need to relocate first, they are appended to the end7909 * of the image.7910 */7911 void *pvBuf = NULL, *pvZero = NULL;7912 7913 do7914 {7915 /* Allocate data buffer. */7916 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);7917 if (!pvBuf)7918 {7919 rc = VERR_NO_MEMORY;7920 break;7921 }7922 7923 /* Allocate buffer for overwriting with zeroes. */7924 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);7925 if (!pvZero)7926 {7927 RTMemFree(pvBuf);7928 pvBuf = NULL;7929 7930 rc = VERR_NO_MEMORY;7931 break;7932 }7933 7934 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);7935 7936 // points to last element in the grain table7937 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;7938 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512);7939 7940 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)7941 {7942 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7943 uGTTail, pvBuf,7944 VMDK_GRAIN_TABLE_SIZE);7945 if (RT_FAILURE(rc))7946 break;7947 7948 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7949 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf,7950 VMDK_GRAIN_TABLE_SIZE);7951 if (RT_FAILURE(rc))7952 break;7953 7954 // This overshoots when i == 0, but we don't need it anymore.7955 uGTTail -= VMDK_GRAIN_TABLE_SIZE;7956 }7957 7958 7959 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */7960 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7961 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf,7962 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);7963 if (RT_FAILURE(rc))7964 break;7965 7966 int * tmpBuf = (int *)pvBuf;7967 7968 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)7969 {7970 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff);7971 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff);7972 }7973 7974 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7975 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf,7976 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);7977 if (RT_FAILURE(rc))7978 break;7979 7980 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff;7981 7982 /* Repeat both steps with the redundant grain table/directory. */7983 7984 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);7985 7986 // points to last element in the grain table7987 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;7988 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512);7989 7990 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)7991 {7992 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,7993 uRGTTail, pvBuf,7994 VMDK_GRAIN_TABLE_SIZE);7995 if (RT_FAILURE(rc))7996 break;7997 7998 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,7999 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf,8000 VMDK_GRAIN_TABLE_SIZE);8001 if (RT_FAILURE(rc))8002 break;8003 8004 // This overshoots when i == 0, but we don't need it anymore.8005 uRGTTail -= VMDK_GRAIN_TABLE_SIZE;8006 }8007 8008 /* Update locations of GT entries. */8009 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,8010 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,8011 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);8012 if (RT_FAILURE(rc))8013 break;8014 8015 tmpBuf = (int *)pvBuf;8016 8017 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)8018 {8019 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff;8020 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff;8021 }8022 8023 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,8024 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,8025 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);8026 if (RT_FAILURE(rc))8027 break;8028 8029 pExtent->uSectorRGD = pExtent->uSectorRGD;8030 pExtent->cOverheadSectors += cOverheadSectorDiff;8031 8032 } while (0);8033 8034 if (pvBuf)8035 {8036 RTMemFree(pvBuf);8037 pvBuf = NULL;8038 }8039 8040 if (pvZero)8041 {8042 RTMemFree(pvZero);8043 pvZero = NULL;8044 }8045 8046 pExtent->cGDEntries = cNewDirEntries;8047 8048 /* Allocate buffer for overwriting with zeroes. */8049 pvZero = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);8050 if (!pvZero)8051 return VERR_NO_MEMORY;8052 8053 // Allocate additional grain dir8054 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);8055 if (RT_LIKELY(pExtent->pGD))8056 {8057 if (pExtent->uSectorRGD)8058 {8059 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);8060 if (RT_UNLIKELY(!pExtent->pRGD))8061 rc = VERR_NO_MEMORY;8062 }8063 }8064 else8065 return VERR_NO_MEMORY;8066 8067 8068 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;8069 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)8070 {8071 pExtent->pGD[i] = uTmpDirVal;8072 8073 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,8074 VMDK_SECTOR2BYTE(uTmpDirVal), pvZero,8075 VMDK_GRAIN_TABLE_SIZE);8076 8077 if (RT_FAILURE(rc))8078 return rc;8079 8080 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;8081 }8082 8083 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;8084 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)8085 {8086 pExtent->pRGD[i] = uRTmpDirVal;8087 8088 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,8089 VMDK_SECTOR2BYTE(uRTmpDirVal), pvZero,8090 VMDK_GRAIN_TABLE_SIZE);8091 8092 if (RT_FAILURE(rc))8093 return rc;8094 8095 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;8096 }8097 8098 RTMemFree(pvZero);8099 pvZero = NULL;8100 8101 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,8102 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD,8103 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);8104 if (RT_FAILURE(rc))8105 return rc;8106 8107 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,8108 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD,8109 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);8110 if (RT_FAILURE(rc))8111 return rc;8112 8113 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent,8114 pExtent->cNominalSectors, cSectorsNew);8115 if (RT_FAILURE(rc))8116 return rc;8117 8118 return rc;8119 8304 } 8120 8305 … … 8133 8318 unsigned uImageFlags = pImage->uImageFlags; 8134 8319 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 8135 pExtent->fMetaDirty = true;8136 8320 8137 8321 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */ … … 8154 8338 */ 8155 8339 /** @todo implement making the image smaller, it is the responsibility of 8156 * the user to know what they'redoing. */8340 * the user to know what he's doing. */ 8157 8341 if (cbSize < pImage->cbSize) 8158 8342 rc = VERR_VD_SHRINK_NOT_SUPPORTED; … … 8174 8358 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 8175 8359 8176 rc = vmdkRep laceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);8360 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew); 8177 8361 if (RT_FAILURE(rc)) 8178 8362 return rc; … … 8191 8375 8192 8376 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld; 8193 8194 /** Space remaining in current last extent file that we don't need to create another one. */8195 8377 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)) 8196 8378 { … … 8202 8384 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 8203 8385 8204 rc = vmdkRep laceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,8205 8386 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1, 8387 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors); 8206 8388 if (RT_FAILURE(rc)) 8207 8389 return rc; 8208 8390 } 8209 //** Need more extent files to handle all the requested space. */8210 8391 else 8211 8392 { … … 8221 8402 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors; 8222 8403 8223 rc = vmdkRep laceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,8224 8404 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1, 8405 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE)); 8225 8406 if (RT_FAILURE(rc)) 8226 8407 return rc; … … 8254 8435 } 8255 8436 8256 /**8257 * monolithicSparse.8258 */8259 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))8260 {8261 // 1. Calculate sectors needed for new overhead.8262 8263 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);8264 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);8265 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;8266 8267 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT8268 if (cOverheadSectorDiff > 0)8269 {8270 if (pExtent->cSectors > 0)8271 {8272 /* Do the relocation. */8273 LogFlow(("Relocating VMDK sectors\n"));8274 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew);8275 if (RT_FAILURE(rc))8276 return rc;8277 8278 rc = vmdkFlushImage(pImage, NULL);8279 if (RT_FAILURE(rc))8280 return rc;8281 }8282 8283 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew);8284 if (RT_FAILURE(rc))8285 return rc;8286 }8287 }8288 8289 /**8290 * twoGbSparseExtent8291 */8292 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))8293 {8294 /* Check to see how much space remains in last extent */8295 bool fSpaceAvailible = false;8296 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);8297 if (cLastExtentRemSectors)8298 fSpaceAvailible = true;8299 8300 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;8301 8302 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))8303 {8304 pExtent = &pImage->pExtents[cExtents - 1];8305 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);8306 if (RT_FAILURE(rc))8307 return rc;8308 8309 rc = vmdkFlushImage(pImage, NULL);8310 if (RT_FAILURE(rc))8311 return rc;8312 8313 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);8314 if (RT_FAILURE(rc))8315 return rc;8316 }8317 else8318 {8319 if (fSpaceAvailible)8320 {8321 pExtent = &pImage->pExtents[cExtents - 1];8322 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));8323 if (RT_FAILURE(rc))8324 return rc;8325 8326 rc = vmdkFlushImage(pImage, NULL);8327 if (RT_FAILURE(rc))8328 return rc;8329 8330 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));8331 if (RT_FAILURE(rc))8332 return rc;8333 8334 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;8335 }8336 8337 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;8338 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)8339 cNewExtents++;8340 8341 for (unsigned i = cExtents;8342 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);8343 i++)8344 {8345 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);8346 if (RT_FAILURE(rc))8347 return rc;8348 8349 pExtent = &pImage->pExtents[i];8350 8351 rc = vmdkFlushImage(pImage, NULL);8352 if (RT_FAILURE(rc))8353 return rc;8354 8355 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);8356 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);8357 }8358 8359 if (cSectorsNeeded)8360 {8361 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));8362 if (RT_FAILURE(rc))8363 return rc;8364 8365 pExtent = &pImage->pExtents[pImage->cExtents];8366 8367 rc = vmdkFlushImage(pImage, NULL);8368 if (RT_FAILURE(rc))8369 return rc;8370 }8371 }8372 }8373 8374 8437 /* Successful resize. Update metadata */ 8375 8438 if (RT_SUCCESS(rc)) … … 8377 8440 /* Update size and new block count. */ 8378 8441 pImage->cbSize = cbSize; 8379 pExtent->cNominalSectors = cSectorsNew;8380 pExtent->c Sectors = cSectorsNew;8442 /** @todo r=jack: update cExtents if needed */ 8443 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize); 8381 8444 8382 8445 /* Update geometry. */ … … 8386 8449 8387 8450 /* Update header information in base image file. */ 8388 pImage->Descriptor.fDirty = true;8389 8451 rc = vmdkWriteDescriptor(pImage, NULL); 8390 8452 8391 if (RT_SUCCESS(rc)) 8392 rc = vmdkFlushImage(pImage, NULL); 8453 if (RT_FAILURE(rc)) 8454 return rc; 8455 8456 rc = vmdkFlushImage(pImage, NULL); 8457 8458 if (RT_FAILURE(rc)) 8459 return rc; 8393 8460 } 8394 8461 /* Same size doesn't change the image at all. */ … … 8397 8464 return rc; 8398 8465 } 8466 8399 8467 8400 8468 const VDIMAGEBACKEND g_VmdkBackend = -
trunk/src/VBox/Storage/testcase/tstVDIo.cpp
r97836 r97839 567 567 bool fBase = false; 568 568 bool fDynamic = true; 569 bool fSplit = false;570 569 571 570 const char *pcszDisk = paScriptArgs[0].psz; … … 584 583 else if (!RTStrICmp(paScriptArgs[3].psz, "dynamic")) 585 584 fDynamic = true; 586 else if (!RTStrICmp(paScriptArgs[3].psz, "vmdk-dynamic-split"))587 fSplit = true;588 else if (!RTStrICmp(paScriptArgs[3].psz, "vmdk-fixed-split"))589 {590 fDynamic = false;591 fSplit = true;592 }593 585 else 594 586 { … … 617 609 if (fHonorSame) 618 610 fOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME; 619 620 if (fSplit)621 fImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;622 611 623 612 if (fBase) … … 3023 3012 return RTEXITCODE_SUCCESS; 3024 3013 } 3014 -
trunk/src/VBox/Storage/testcase/tstVDResize.vd
r97836 r97839 40 40 destroydisk("test"); 41 41 42 print("Testing VMDK Monolithic Flat"); 43 createdisk("test-vmdk-mflat", true); 44 create("test-vmdk-mflat", "base", "test-vmdk-mflat.vmdk", "Fixed", "VMDK", 4G, false, false); 45 io("test-vmdk-mflat", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 46 resize("test-vmdk-mflat", 6000M); 47 io("test-vmdk-mflat", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 48 close("test-vmdk-mflat", "single", true /* fDelete */); 49 destroydisk("test-vmdk-mflat"); 50 51 print("Testing VMDK Split Flat"); 52 createdisk("test-vmdk-sflat", true); 53 create("test-vmdk-sflat", "base", "test-vmdk-sflat.vmdk", "vmdk-fixed-split", "VMDK", 4G, false, false); 54 io("test-vmdk-sflat", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 55 resize("test-vmdk-sflat", 6000M); 56 io("test-vmdk-sflat", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 57 close("test-vmdk-sflat", "single", true /* fDelete */); 58 destroydisk("test-vmdk-sflat"); 59 60 print("Testing VMDK Sparse"); 61 createdisk("test-vmdk-sparse", true); 62 create("test-vmdk-sparse", "base", "test-vmdk-sparse.vmdk", "Dynamic", "VMDK", 4G, false, false); 63 io("test-vmdk-sparse", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 64 resize("test-vmdk-sparse", 6000M); 65 io("test-vmdk-sparse", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 66 close("test-vmdk-sparse", "single", true /* fDelete */); 67 destroydisk("test-vmdk-sparse"); 68 69 print("Testing VMDK Sparse Split"); 70 createdisk("test-vmdk-sparse-split", true); 71 create("test-vmdk-sparse-split", "base", "test-vmdk-sparse-split.vmdk", "vmdk-dynamic-split", "VMDK", 4G, false, false); 72 io("test-vmdk-sparse-split", false, 1, "seq", 64K, 1G, 2G, 1G, 100, "none"); 73 resize("test-vmdk-sparse-split", 6000M); 74 io("test-vmdk-sparse-split", false, 1, "seq", 64K, 4G, 5G, 1G, 100, "none"); 75 close("test-vmdk-sparse-split", "single", true /* fDelete */); 76 destroydisk("test-vmdk-sparse-split"); 42 print("Testing VMDK Flat"); 43 createdisk("test-vmdk-flat", true); 44 create("test-vmdk-flat", "base", "test-vmdk-flat.vmdk", "fixed", "VMDK", 10G, false, false); 45 io("test-vmdk-flat", false, 1, "seq", 64K, 1G, 2G, 10G, 100, "none"); 46 resize("test-vmdk-flat", 20000M); 47 close("test-vmdk-flat", "single", true /* fDelete */); 48 destroydisk("test-vmdk-flat"); 77 49 78 50 iorngdestroy(); 79 51 } 52
Note:
See TracChangeset
for help on using the changeset viewer.