Changeset 87052 in vbox for trunk/src/VBox/Storage
- Timestamp:
- Dec 8, 2020 1:21:08 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 141811
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Storage/VMDK.cpp
r87051 r87052 3 3 * VMDK disk image, core code. 4 4 */ 5 6 5 /* 7 6 * Copyright (C) 2006-2020 Oracle Corporation … … 15 14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 16 15 */ 17 18 19 16 /********************************************************************************************************************************* 20 17 * Header Files * … … 24 21 #include <VBox/vd-plugin.h> 25 22 #include <VBox/err.h> 26 27 23 #include <iprt/assert.h> 28 24 #include <iprt/alloc.h> … … 64 60 #include <errno.h> 65 61 #endif 66 67 62 #include "VDBackends.h" 68 69 70 63 /********************************************************************************************************************************* 71 64 * Constants And Macros, Structures and Typedefs * 72 65 *********************************************************************************************************************************/ 73 74 66 /** Maximum encoded string size (including NUL) we allow for VMDK images. 75 67 * Deliberately not set high to avoid running out of descriptor space. */ 76 68 #define VMDK_ENCODED_COMMENT_MAX 1024 77 78 69 /** VMDK descriptor DDB entry for PCHS cylinders. */ 79 70 #define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders" 80 81 71 /** VMDK descriptor DDB entry for PCHS heads. */ 82 72 #define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads" 83 84 73 /** VMDK descriptor DDB entry for PCHS sectors. */ 85 74 #define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors" 86 87 75 /** VMDK descriptor DDB entry for LCHS cylinders. */ 88 76 #define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders" 89 90 77 /** VMDK descriptor DDB entry for LCHS heads. */ 91 78 #define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads" 92 93 79 /** VMDK descriptor DDB entry for LCHS sectors. */ 94 80 #define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors" 95 96 81 /** VMDK descriptor DDB entry for image UUID. */ 97 82 #define VMDK_DDB_IMAGE_UUID "ddb.uuid.image" 98 99 83 /** VMDK descriptor DDB entry for image modification UUID. */ 100 84 #define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification" 101 102 85 /** VMDK descriptor DDB entry for parent image UUID. */ 103 86 #define VMDK_DDB_PARENT_UUID "ddb.uuid.parent" 104 105 87 /** VMDK descriptor DDB entry for parent image modification UUID. */ 106 88 #define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification" 107 108 89 /** No compression for streamOptimized files. */ 109 90 #define VMDK_COMPRESSION_NONE 0 110 111 91 /** Deflate compression for streamOptimized files. */ 112 92 #define VMDK_COMPRESSION_DEFLATE 1 113 114 93 /** Marker that the actual GD value is stored in the footer. */ 115 94 #define VMDK_GD_AT_END 0xffffffffffffffffULL 116 117 95 /** Marker for end-of-stream in streamOptimized images. */ 118 96 #define VMDK_MARKER_EOS 0 119 120 97 /** Marker for grain table block in streamOptimized images. */ 121 98 #define VMDK_MARKER_GT 1 122 123 99 /** Marker for grain directory block in streamOptimized images. */ 124 100 #define VMDK_MARKER_GD 2 125 126 101 /** Marker for footer in streamOptimized images. */ 127 102 #define VMDK_MARKER_FOOTER 3 128 129 103 /** Marker for unknown purpose in streamOptimized images. 130 104 * Shows up in very recent images created by vSphere, but only sporadically. 131 105 * They "forgot" to document that one in the VMDK specification. */ 132 106 #define VMDK_MARKER_UNSPECIFIED 4 133 134 107 /** Dummy marker for "don't check the marker value". */ 135 108 #define VMDK_MARKER_IGNORE 0xffffffffU 136 137 109 /** 138 110 * Magic number for hosted images created by VMware Workstation 4, VMware … … 140 112 */ 141 113 #define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */ 142 143 114 /** 144 115 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as … … 168 139 } SparseExtentHeader; 169 140 #pragma pack() 170 171 141 /** The maximum allowed descriptor size in the extent header in sectors. */ 172 142 #define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */ 173 174 143 /** VMDK capacity for a single chunk when 2G splitting is turned on. Should be 175 144 * divisible by the default grain size (64K) */ 176 145 #define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024) 177 178 146 /** VMDK streamOptimized file format marker. The type field may or may not 179 147 * be actually valid, but there's always data to read there. */ … … 186 154 } VMDKMARKER, *PVMDKMARKER; 187 155 #pragma pack() 188 189 190 156 /** Convert sector number/size to byte offset/size. */ 191 157 #define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9) 192 193 158 /** Convert byte offset/size to sector number/size. */ 194 159 #define VMDK_BYTE2SECTOR(u) ((u) >> 9) 195 196 160 /** 197 161 * VMDK extent type. … … 208 172 VMDKETYPE_VMFS 209 173 } VMDKETYPE, *PVMDKETYPE; 210 211 174 /** 212 175 * VMDK access type for a extent. … … 221 184 VMDKACCESS_READWRITE 222 185 } VMDKACCESS, *PVMDKACCESS; 223 224 186 /** Forward declaration for PVMDKIMAGE. */ 225 187 typedef struct VMDKIMAGE *PVMDKIMAGE; 226 227 188 /** 228 189 * Extents files entry. Used for opening a particular file only once. … … 249 210 struct VMDKFILE *pPrev; 250 211 } VMDKFILE, *PVMDKFILE; 251 252 212 /** 253 213 * VMDK extent data structure. … … 330 290 struct VMDKIMAGE *pImage; 331 291 } VMDKEXTENT, *PVMDKEXTENT; 332 333 292 /** 334 293 * Grain table cache size. Allocated per image. 335 294 */ 336 295 #define VMDK_GT_CACHE_SIZE 256 337 338 296 /** 339 297 * Grain table block size. Smaller than an actual grain table block to allow … … 342 300 */ 343 301 #define VMDK_GT_CACHELINE_SIZE 128 344 345 346 302 /** 347 303 * Maximum number of lines in a descriptor file. Not worth the effort of … … 351 307 */ 352 308 #define VMDK_DESCRIPTOR_LINES_MAX 1100U 353 354 309 /** 355 310 * Parsed descriptor information. Allows easy access and update of the … … 375 330 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX]; 376 331 } VMDKDESCRIPTOR, *PVMDKDESCRIPTOR; 377 378 379 332 /** 380 333 * Cache entry for translating extent/sector to a sector number in that … … 390 343 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE]; 391 344 } VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY; 392 393 345 /** 394 346 * Cache data structure for blocks of grain table entries. For now this is a … … 404 356 unsigned cEntries; 405 357 } VMDKGTCACHE, *PVMDKGTCACHE; 406 407 358 /** 408 359 * Complete VMDK image data structure. Mainly a collection of extents and a few … … 415 366 /** Descriptor file if applicable. */ 416 367 PVMDKFILE pFile; 417 418 368 /** Pointer to the per-disk VD interface list. */ 419 369 PVDINTERFACE pVDIfsDisk; 420 370 /** Pointer to the per-image VD interface list. */ 421 371 PVDINTERFACE pVDIfsImage; 422 423 372 /** Error interface. */ 424 373 PVDINTERFACEERROR pIfError; 425 374 /** I/O interface. */ 426 375 PVDINTERFACEIOINT pIfIo; 427 428 429 376 /** Pointer to the image extents. */ 430 377 PVMDKEXTENT pExtents; … … 434 381 * times only once (happens mainly with raw partition access). */ 435 382 PVMDKFILE pFiles; 436 437 383 /** 438 384 * Pointer to an array of segment entries for async I/O. … … 444 390 /** Entries available in the segments array. */ 445 391 unsigned cSegments; 446 447 392 /** Open flags passed by VBoxHD layer. */ 448 393 unsigned uOpenFlags; … … 463 408 /** Parent image modification UUID. */ 464 409 RTUUID ParentModificationUuid; 465 466 410 /** Pointer to grain table cache, if this image contains sparse extents. */ 467 411 PVMDKGTCACHE pGTCache; … … 475 419 VDREGIONLIST RegionList; 476 420 } VMDKIMAGE; 477 478 479 421 /** State for the input/output callout of the inflate reader/deflate writer. */ 480 422 typedef struct VMDKCOMPRESSIO … … 489 431 void *pvCompGrain; 490 432 } VMDKCOMPRESSIO; 491 492 493 433 /** Tracks async grain allocation. */ 494 434 typedef struct VMDKGRAINALLOCASYNC … … 512 452 uint64_t uRGTSector; 513 453 } VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC; 514 515 454 /** 516 455 * State information for vmdkRename() and helpers. … … 550 489 /** Pointer to a VMDK rename state. */ 551 490 typedef VMDKRENAMESTATE *PVMDKRENAMESTATE; 552 553 554 491 /********************************************************************************************************************************* 555 492 * Static Variables * 556 493 *********************************************************************************************************************************/ 557 558 494 /** NULL-terminated array of supported file extensions. */ 559 495 static const VDFILEEXTENSION s_aVmdkFileExtensions[] = … … 562 498 {NULL, VDTYPE_INVALID} 563 499 }; 564 565 500 /** NULL-terminated array of configuration option. */ 566 501 static const VDCONFIGINFO s_aVmdkConfigInfo[] = … … 571 506 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 }, 572 507 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 }, 573 574 508 /* End of options list */ 575 509 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 } 576 510 }; 577 578 579 511 /********************************************************************************************************************************* 580 512 * Internal Functions * 581 513 *********************************************************************************************************************************/ 582 583 514 static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent); 584 515 static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, 585 516 bool fDelete); 586 587 517 static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents); 588 518 static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx); 589 519 static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment); 590 520 static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush); 591 592 521 static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, 593 522 void *pvUser, int rcReq); 594 595 523 /** 596 524 * Internal: open a file (using a file descriptor cache to ensure each file … … 602 530 int rc = VINF_SUCCESS; 603 531 PVMDKFILE pVmdkFile; 604 605 532 for (pVmdkFile = pImage->pFiles; 606 533 pVmdkFile != NULL; … … 611 538 Assert(fOpen == pVmdkFile->fOpen); 612 539 pVmdkFile->uReferences++; 613 614 540 *ppVmdkFile = pVmdkFile; 615 616 541 return rc; 617 542 } 618 543 } 619 620 544 /* If we get here, there's no matching entry in the cache. */ 621 545 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE)); … … 625 549 return VERR_NO_MEMORY; 626 550 } 627 628 551 pVmdkFile->pszFilename = RTStrDup(pszFilename); 629 552 if (!pVmdkFile->pszFilename) … … 633 556 return VERR_NO_MEMORY; 634 557 } 635 636 558 if (pszBasename) 637 559 { … … 645 567 } 646 568 } 647 648 569 pVmdkFile->fOpen = fOpen; 649 650 570 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen, 651 571 &pVmdkFile->pStorage); … … 666 586 *ppVmdkFile = NULL; 667 587 } 668 669 return rc; 670 } 671 588 return rc; 589 } 672 590 /** 673 591 * Internal: close a file, updating the file descriptor cache. … … 677 595 int rc = VINF_SUCCESS; 678 596 PVMDKFILE pVmdkFile = *ppVmdkFile; 679 680 597 AssertPtr(pVmdkFile); 681 682 598 pVmdkFile->fDelete |= fDelete; 683 599 Assert(pVmdkFile->uReferences); … … 687 603 PVMDKFILE pPrev; 688 604 PVMDKFILE pNext; 689 690 605 /* Unchain the element from the list. */ 691 606 pPrev = pVmdkFile->pPrev; 692 607 pNext = pVmdkFile->pNext; 693 694 608 if (pNext) 695 609 pNext->pPrev = pPrev; … … 698 612 else 699 613 pImage->pFiles = pNext; 700 701 614 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage); 702 703 615 bool fFileDel = pVmdkFile->fDelete; 704 616 if ( pVmdkFile->pszBasename … … 713 625 fFileDel = false; 714 626 } 715 716 627 if (fFileDel) 717 628 { … … 727 638 RTMemFree(pVmdkFile); 728 639 } 729 730 640 *ppVmdkFile = NULL; 731 641 return rc; 732 642 } 733 734 643 /*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */ 735 644 #ifndef VMDK_USE_BLOCK_DECOMP_API … … 738 647 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser; 739 648 size_t cbInjected = 0; 740 741 649 Assert(cbBuf); 742 650 if (pInflateState->iOffset < 0) … … 764 672 } 765 673 #endif 766 767 674 /** 768 675 * Internal: read from a file and inflate the compressed data, … … 780 687 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; 781 688 size_t cbCompSize, cbActuallyRead; 782 783 689 if (!pcvMarker) 784 690 { … … 795 701 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize); 796 702 } 797 798 703 cbCompSize = RT_LE2H_U32(pMarker->cbSize); 799 704 if (cbCompSize == 0) … … 802 707 return VERR_VD_VMDK_INVALID_FORMAT; 803 708 } 804 805 709 /* Sanity check - the expansion ratio should be much less than 2. */ 806 710 Assert(cbCompSize < 2 * cbToRead); 807 711 if (cbCompSize >= 2 * cbToRead) 808 712 return VERR_VD_VMDK_INVALID_FORMAT; 809 810 713 /* Compressed grain marker. Data follows immediately. */ 811 714 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, … … 817 720 512) 818 721 - RT_UOFFSETOF(VMDKMARKER, uType)); 819 820 722 if (puLBA) 821 723 *puLBA = RT_LE2H_U64(pMarker->uSector); … … 824 726 + RT_UOFFSETOF(VMDKMARKER, uType), 825 727 512); 826 827 728 #ifdef VMDK_USE_BLOCK_DECOMP_API 828 729 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/, … … 835 736 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType); 836 737 InflateState.pvCompGrain = pExtent->pvCompGrain; 837 838 738 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper); 839 739 if (RT_FAILURE(rc)) … … 852 752 return rc; 853 753 } 854 855 754 static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf) 856 755 { 857 756 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser; 858 859 757 Assert(cbBuf); 860 758 if (pDeflateState->iOffset < 0) … … 873 771 return VINF_SUCCESS; 874 772 } 875 876 773 /** 877 774 * Internal: deflate the uncompressed data and write to a file, … … 886 783 PRTZIPCOMP pZip = NULL; 887 784 VMDKCOMPRESSIO DeflateState; 888 889 785 DeflateState.pImage = pImage; 890 786 DeflateState.iOffset = -1; 891 787 DeflateState.cbCompGrain = pExtent->cbCompGrain; 892 788 DeflateState.pvCompGrain = pExtent->pvCompGrain; 893 894 789 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, 895 790 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT); … … 904 799 Assert( DeflateState.iOffset > 0 905 800 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain); 906 907 801 /* pad with zeroes to get to a full sector size */ 908 802 uint32_t uSize = DeflateState.iOffset; … … 914 808 uSize = uSizeAlign; 915 809 } 916 917 810 if (pcbMarkerData) 918 811 *pcbMarkerData = uSize; 919 920 812 /* Compressed grain marker. Data follows immediately. */ 921 813 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain; … … 930 822 return rc; 931 823 } 932 933 934 824 /** 935 825 * Internal: check if all files are closed, prevent leaking resources. … … 939 829 int rc = VINF_SUCCESS, rc2; 940 830 PVMDKFILE pVmdkFile; 941 942 831 Assert(pImage->pFiles == NULL); 943 832 for (pVmdkFile = pImage->pFiles; … … 948 837 pVmdkFile->pszFilename)); 949 838 pImage->pFiles = pVmdkFile->pNext; 950 951 839 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete); 952 953 840 if (RT_SUCCESS(rc)) 954 841 rc = rc2; … … 956 843 return rc; 957 844 } 958 959 845 /** 960 846 * Internal: truncate a string (at a UTF8 code point boundary) and encode the … … 965 851 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3]; 966 852 char *pszDst = szEnc; 967 968 853 AssertPtr(psz); 969 970 854 for (; *psz; psz = RTStrNextCp(psz)) 971 855 { … … 998 882 return RTStrDup(szEnc); 999 883 } 1000 1001 884 /** 1002 885 * Internal: decode a string and store it into the specified string. … … 1006 889 int rc = VINF_SUCCESS; 1007 890 char szBuf[4]; 1008 1009 891 if (!cb) 1010 892 return VERR_BUFFER_OVERFLOW; 1011 1012 893 AssertPtr(psz); 1013 1014 894 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded)) 1015 895 { … … 1034 914 else 1035 915 pszDst = RTStrPutCp(pszDst, Cp); 1036 1037 916 /* Need to leave space for terminating NUL. */ 1038 917 if ((size_t)(pszDst - szBuf) + 1 >= cb) … … 1047 926 return rc; 1048 927 } 1049 1050 928 /** 1051 929 * Internal: free all buffers associated with grain directories. … … 1064 942 } 1065 943 } 1066 1067 944 /** 1068 945 * Internal: allocate the compressed/uncompressed buffers for streamOptimized … … 1072 949 { 1073 950 int rc = VINF_SUCCESS; 1074 1075 951 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1076 952 { … … 1091 967 rc = VERR_NO_MEMORY; 1092 968 } 1093 1094 969 if (RT_FAILURE(rc)) 1095 970 vmdkFreeStreamBuffers(pExtent); 1096 971 return rc; 1097 972 } 1098 1099 973 /** 1100 974 * Internal: allocate all buffers associated with grain directories. … … 1105 979 int rc = VINF_SUCCESS; 1106 980 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1107 1108 981 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD); 1109 982 if (RT_LIKELY(pExtent->pGD)) … … 1118 991 else 1119 992 rc = VERR_NO_MEMORY; 1120 1121 993 if (RT_FAILURE(rc)) 1122 994 vmdkFreeGrainDirectory(pExtent); 1123 995 return rc; 1124 996 } 1125 1126 997 /** 1127 998 * Converts the grain directory from little to host endianess. … … 1134 1005 { 1135 1006 uint32_t *pGDTmp = pGD; 1136 1137 1007 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++) 1138 1008 *pGDTmp = RT_LE2H_U32(*pGDTmp); 1139 1009 } 1140 1141 1010 /** 1142 1011 * Read the grain directory and allocated grain tables verifying them against … … 1151 1020 int rc = VINF_SUCCESS; 1152 1021 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t); 1153 1154 1022 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE 1155 1023 && pExtent->uSectorGD != VMDK_GD_AT_END 1156 1024 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR); 1157 1158 1025 rc = vmdkAllocGrainDirectory(pImage, pExtent); 1159 1026 if (RT_SUCCESS(rc)) … … 1167 1034 { 1168 1035 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries); 1169 1170 1036 if ( pExtent->uSectorRGD 1171 1037 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)) … … 1179 1045 { 1180 1046 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries); 1181 1182 1047 /* Check grain table and redundant grain table for consistency. */ 1183 1048 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t); 1184 1049 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */ 1185 1050 size_t cbGTBuffersMax = _1M; 1186 1187 1051 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1188 1052 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers); 1189 1190 1053 if ( !pTmpGT1 1191 1054 || !pTmpGT2) 1192 1055 rc = VERR_NO_MEMORY; 1193 1194 1056 size_t i = 0; 1195 1057 uint32_t *pGDTmp = pExtent->pGD; 1196 1058 uint32_t *pRGDTmp = pExtent->pRGD; 1197 1198 1059 /* Loop through all entries. */ 1199 1060 while (i < pExtent->cGDEntries) … … 1202 1063 uint32_t uRGTStart = *pRGDTmp; 1203 1064 size_t cbGTRead = cbGT; 1204 1205 1065 /* If no grain table is allocated skip the entry. */ 1206 1066 if (*pGDTmp == 0 && *pRGDTmp == 0) … … 1209 1069 continue; 1210 1070 } 1211 1212 1071 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1213 1072 { … … 1219 1078 break; 1220 1079 } 1221 1222 1080 i++; 1223 1081 pGDTmp++; 1224 1082 pRGDTmp++; 1225 1226 1083 /* 1227 1084 * Read a few tables at once if adjacent to decrease the number … … 1237 1094 continue; 1238 1095 } 1239 1240 1096 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp) 1241 1097 { … … 1247 1103 break; 1248 1104 } 1249 1250 1105 /* Check that the start offsets are adjacent.*/ 1251 1106 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp) 1252 1107 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp)) 1253 1108 break; 1254 1255 1109 i++; 1256 1110 pGDTmp++; … … 1258 1112 cbGTRead += cbGT; 1259 1113 } 1260 1261 1114 /* Increase buffers if required. */ 1262 1115 if ( RT_SUCCESS(rc) … … 1276 1129 else 1277 1130 rc = VERR_NO_MEMORY; 1278 1279 1131 if (rc == VERR_NO_MEMORY) 1280 1132 { … … 1283 1135 i -= cbGTRead / cbGT; 1284 1136 cbGTRead = cbGT; 1285 1286 1137 /* Don't try to increase the buffer again in the next run. */ 1287 1138 cbGTBuffersMax = cbGTBuffers; 1288 1139 } 1289 1140 } 1290 1291 1141 if (RT_SUCCESS(rc)) 1292 1142 { … … 1321 1171 } 1322 1172 } /* while (i < pExtent->cGDEntries) */ 1323 1324 1173 /** @todo figure out what to do for unclean VMDKs. */ 1325 1174 if (pTmpGT1) … … 1337 1186 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc); 1338 1187 } 1339 1340 1188 if (RT_FAILURE(rc)) 1341 1189 vmdkFreeGrainDirectory(pExtent); 1342 1190 return rc; 1343 1191 } 1344 1345 1192 /** 1346 1193 * Creates a new grain directory for the given extent at the given start sector. … … 1361 1208 size_t cbGTRounded; 1362 1209 uint64_t cbOverhead; 1363 1364 1210 if (fPreAlloc) 1365 1211 { … … 1375 1221 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded; 1376 1222 } 1377 1378 1223 /* For streamOptimized extents there is only one grain directory, 1379 1224 * and for all others take redundant grain directory into account. */ … … 1390 1235 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead); 1391 1236 } 1392 1393 1237 if (RT_SUCCESS(rc)) 1394 1238 { 1395 1239 pExtent->uAppendPosition = cbOverhead; 1396 1240 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead); 1397 1398 1241 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 1399 1242 { … … 1406 1249 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded); 1407 1250 } 1408 1409 1251 rc = vmdkAllocStreamBuffers(pImage, pExtent); 1410 1252 if (RT_SUCCESS(rc)) … … 1416 1258 uint32_t uGTSectorLE; 1417 1259 uint64_t uOffsetSectors; 1418 1419 1260 if (pExtent->pRGD) 1420 1261 { … … 1436 1277 } 1437 1278 } 1438 1439 1279 if (RT_SUCCESS(rc)) 1440 1280 { … … 1459 1299 } 1460 1300 } 1461 1462 1301 if (RT_FAILURE(rc)) 1463 1302 vmdkFreeGrainDirectory(pExtent); 1464 1303 return rc; 1465 1304 } 1466 1467 1305 /** 1468 1306 * Unquotes the given string returning the result in a separate buffer. … … 1482 1320 char *pszQ; 1483 1321 char *pszUnquoted; 1484 1485 1322 /* Skip over whitespace. */ 1486 1323 while (*pszStr == ' ' || *pszStr == '\t') 1487 1324 pszStr++; 1488 1489 1325 if (*pszStr != '"') 1490 1326 { … … 1501 1337 pImage->pszFilename, pszStart); 1502 1338 } 1503 1504 1339 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1); 1505 1340 if (!pszUnquoted) … … 1512 1347 return VINF_SUCCESS; 1513 1348 } 1514 1515 1349 static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1516 1350 const char *pszLine) … … 1518 1352 char *pEnd = pDescriptor->aLines[pDescriptor->cLines]; 1519 1353 ssize_t cbDiff = strlen(pszLine) + 1; 1520 1521 1354 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1 1522 1355 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1523 1356 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1524 1525 1357 memcpy(pEnd, pszLine, cbDiff); 1526 1358 pDescriptor->cLines++; 1527 1359 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff; 1528 1360 pDescriptor->fDirty = true; 1529 1530 1361 return VINF_SUCCESS; 1531 1362 } 1532 1533 1363 static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart, 1534 1364 const char *pszKey, const char **ppszValue) … … 1536 1366 size_t cbKey = strlen(pszKey); 1537 1367 const char *pszValue; 1538 1539 1368 while (uStart != 0) 1540 1369 { … … 1555 1384 return !!uStart; 1556 1385 } 1557 1558 1386 static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1559 1387 unsigned uStart, … … 1563 1391 size_t cbKey = strlen(pszKey); 1564 1392 unsigned uLast = 0; 1565 1566 1393 while (uStart != 0) 1567 1394 { … … 1598 1425 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff) 1599 1426 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1600 1601 1427 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal, 1602 1428 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal); … … 1661 1487 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1662 1488 pDescriptor->aLines[i] += cbDiff; 1663 1664 1489 /* Adjust starting line numbers of following descriptor sections. */ 1665 1490 if (uStart <= pDescriptor->uFirstExtent) … … 1671 1496 return VINF_SUCCESS; 1672 1497 } 1673 1674 1498 static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey, 1675 1499 uint32_t *puValue) 1676 1500 { 1677 1501 const char *pszValue; 1678 1679 1502 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1680 1503 &pszValue)) … … 1682 1505 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue); 1683 1506 } 1684 1685 1507 /** 1686 1508 * Returns the value of the given key as a string allocating the necessary memory. … … 1699 1521 const char *pszValue; 1700 1522 char *pszValueUnquoted; 1701 1702 1523 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey, 1703 1524 &pszValue)) … … 1709 1530 return rc; 1710 1531 } 1711 1712 1532 static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1713 1533 const char *pszKey, const char *pszValue) 1714 1534 { 1715 1535 char *pszValueQuoted; 1716 1717 1536 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue); 1718 1537 if (!pszValueQuoted) … … 1723 1542 return rc; 1724 1543 } 1725 1726 1544 static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage, 1727 1545 PVMDKDESCRIPTOR pDescriptor) … … 1730 1548 unsigned uEntry = pDescriptor->uFirstExtent; 1731 1549 ssize_t cbDiff; 1732 1733 1550 if (!uEntry) 1734 1551 return; 1735 1736 1552 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1; 1737 1553 /* Move everything including \0 in the entry marking the end of buffer. */ … … 1749 1565 if (pDescriptor->uFirstDDB) 1750 1566 pDescriptor->uFirstDDB--; 1751 1752 1567 return; 1753 1568 } 1754 1755 1569 static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1756 1570 VMDKACCESS enmAccess, uint64_t cNominalSectors, … … 1764 1578 char szExt[1024]; 1765 1579 ssize_t cbDiff; 1766 1767 1580 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess)); 1768 1581 Assert((unsigned)enmType < RT_ELEMENTS(apszType)); 1769 1770 1582 /* Find last entry in extent description. */ 1771 1583 while (uStart) … … 1775 1587 uStart = pDescriptor->aNextLines[uStart]; 1776 1588 } 1777 1778 1589 if (enmType == VMDKETYPE_ZERO) 1779 1590 { … … 1794 1605 } 1795 1606 cbDiff = strlen(szExt) + 1; 1796 1797 1607 /* Check for buffer overflow. */ 1798 1608 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1) … … 1800 1610 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)) 1801 1611 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename); 1802 1803 1612 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--) 1804 1613 { … … 1819 1628 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++) 1820 1629 pDescriptor->aLines[i] += cbDiff; 1821 1822 1630 /* Adjust starting line numbers of following descriptor sections. */ 1823 1631 if (uStart <= pDescriptor->uFirstDDB) 1824 1632 pDescriptor->uFirstDDB++; 1825 1826 1633 pDescriptor->fDirty = true; 1827 1634 return VINF_SUCCESS; 1828 1635 } 1829 1830 1636 /** 1831 1637 * Returns the value of the given key from the DDB as a string allocating … … 1845 1651 const char *pszValue; 1846 1652 char *pszValueUnquoted; 1847 1848 1653 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1849 1654 &pszValue)) … … 1855 1660 return rc; 1856 1661 } 1857 1858 1662 static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1859 1663 const char *pszKey, uint32_t *puValue) … … 1861 1665 const char *pszValue; 1862 1666 char *pszValueUnquoted; 1863 1864 1667 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1865 1668 &pszValue)) … … 1872 1675 return rc; 1873 1676 } 1874 1875 1677 static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1876 1678 const char *pszKey, PRTUUID pUuid) … … 1878 1680 const char *pszValue; 1879 1681 char *pszValueUnquoted; 1880 1881 1682 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey, 1882 1683 &pszValue)) … … 1889 1690 return rc; 1890 1691 } 1891 1892 1692 static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1893 1693 const char *pszKey, const char *pszVal) … … 1895 1695 int rc; 1896 1696 char *pszValQuoted; 1897 1898 1697 if (pszVal) 1899 1698 { … … 1910 1709 return rc; 1911 1710 } 1912 1913 1711 static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1914 1712 const char *pszKey, PCRTUUID pUuid) 1915 1713 { 1916 1714 char *pszUuid; 1917 1918 1715 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid); 1919 1716 if (!pszUuid) … … 1924 1721 return rc; 1925 1722 } 1926 1927 1723 static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor, 1928 1724 const char *pszKey, uint32_t uValue) 1929 1725 { 1930 1726 char *pszValue; 1931 1932 1727 RTStrAPrintf(&pszValue, "\"%d\"", uValue); 1933 1728 if (!pszValue) … … 1938 1733 return rc; 1939 1734 } 1940 1941 1735 /** 1942 1736 * Splits the descriptor data into individual lines checking for correct line … … 1952 1746 unsigned cLine = 0; 1953 1747 int rc = VINF_SUCCESS; 1954 1955 1748 while ( RT_SUCCESS(rc) 1956 1749 && *pszTmp != '\0') … … 1963 1756 break; 1964 1757 } 1965 1966 1758 while (*pszTmp != '\0' && *pszTmp != '\n') 1967 1759 { … … 1981 1773 pszTmp++; 1982 1774 } 1983 1984 1775 if (RT_FAILURE(rc)) 1985 1776 break; 1986 1987 1777 /* Get rid of LF character. */ 1988 1778 if (*pszTmp == '\n') … … 1992 1782 } 1993 1783 } 1994 1995 1784 if (RT_SUCCESS(rc)) 1996 1785 { … … 1999 1788 pDesc->aLines[cLine] = pszTmp; 2000 1789 } 2001 2002 return rc; 2003 } 2004 1790 return rc; 1791 } 2005 1792 static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData, 2006 1793 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 2019 1806 { 2020 1807 unsigned uLastNonEmptyLine = 0; 2021 2022 1808 /* Initialize those, because we need to be able to reopen an image. */ 2023 1809 pDescriptor->uFirstDesc = 0; … … 2085 1871 } 2086 1872 } 2087 2088 return rc; 2089 } 2090 1873 return rc; 1874 } 2091 1875 static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage, 2092 1876 PCVDGEOMETRY pPCHSGeometry) … … 2107 1891 return rc; 2108 1892 } 2109 2110 1893 static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage, 2111 1894 PCVDGEOMETRY pLCHSGeometry) … … 2118 1901 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor, 2119 1902 VMDK_DDB_GEO_LCHS_HEADS, 2120 2121 1903 pLCHSGeometry->cHeads); 2122 1904 if (RT_FAILURE(rc)) … … 2127 1909 return rc; 2128 1910 } 2129 2130 1911 static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData, 2131 1912 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor) … … 2139 1920 pDescriptor->aLines[pDescriptor->cLines] = pDescData; 2140 1921 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines)); 2141 2142 1922 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile"); 2143 1923 if (RT_SUCCESS(rc)) … … 2171 1951 { 2172 1952 pDescriptor->uFirstDDB = pDescriptor->cLines - 1; 2173 2174 1953 /* Now that the framework is in place, use the normal functions to insert 2175 1954 * the remaining keys. */ … … 2184 1963 if (RT_SUCCESS(rc)) 2185 1964 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide"); 2186 2187 return rc; 2188 } 2189 1965 return rc; 1966 } 2190 1967 static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData) 2191 1968 { … … 2194 1971 unsigned uLine; 2195 1972 unsigned i; 2196 2197 1973 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData, 2198 1974 &pImage->Descriptor); 2199 1975 if (RT_FAILURE(rc)) 2200 1976 return rc; 2201 2202 1977 /* Check version, must be 1. */ 2203 1978 uint32_t uVersion; … … 2207 1982 if (uVersion != 1) 2208 1983 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename); 2209 2210 1984 /* Get image creation type and determine image flags. */ 2211 1985 char *pszCreateType = NULL; /* initialized to make gcc shut up */ … … 2225 1999 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX; 2226 2000 RTMemTmpFree(pszCreateType); 2227 2228 2001 /* Count the number of extent config entries. */ 2229 2002 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0; … … 2231 2004 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++) 2232 2005 /* nothing */; 2233 2234 2006 if (!pImage->pDescData && cExtents != 1) 2235 2007 { … … 2237 2009 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename); 2238 2010 } 2239 2240 2011 if (pImage->pDescData) 2241 2012 { … … 2245 2016 return rc; 2246 2017 } 2247 2248 2018 for (i = 0, uLine = pImage->Descriptor.uFirstExtent; 2249 2019 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine]) 2250 2020 { 2251 2021 char *pszLine = pImage->Descriptor.aLines[uLine]; 2252 2253 2022 /* Access type of the extent. */ 2254 2023 if (!strncmp(pszLine, "RW", 2)) … … 2271 2040 if (*pszLine++ != ' ') 2272 2041 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2273 2274 2042 /* Nominal size of the extent. */ 2275 2043 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10, … … 2279 2047 if (*pszLine++ != ' ') 2280 2048 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2281 2282 2049 /* Type of the extent. */ 2283 2050 if (!strncmp(pszLine, "SPARSE", 6)) … … 2303 2070 else 2304 2071 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2305 2306 2072 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO) 2307 2073 { … … 2318 2084 if (*pszLine++ != ' ') 2319 2085 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2320 2321 2086 /* Basename of the image. Surrounded by quotes. */ 2322 2087 char *pszBasename; … … 2337 2102 } 2338 2103 } 2339 2340 2104 if (*pszLine != '\0') 2341 2105 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename); 2342 2106 } 2343 2107 } 2344 2345 2108 /* Determine PCHS geometry (autogenerate if necessary). */ 2346 2109 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2377 2140 pImage->PCHSGeometry.cSectors = 63; 2378 2141 } 2379 2380 2142 /* Determine LCHS geometry (set to 0 if not specified). */ 2381 2143 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor, … … 2408 2170 pImage->LCHSGeometry.cSectors = 0; 2409 2171 } 2410 2411 2172 /* Get image UUID. */ 2412 2173 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, … … 2432 2193 else if (RT_FAILURE(rc)) 2433 2194 return rc; 2434 2435 2195 /* Get image modification UUID. */ 2436 2196 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2458 2218 else if (RT_FAILURE(rc)) 2459 2219 return rc; 2460 2461 2220 /* Get UUID of parent image. */ 2462 2221 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, … … 2482 2241 else if (RT_FAILURE(rc)) 2483 2242 return rc; 2484 2485 2243 /* Get parent image modification UUID. */ 2486 2244 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, … … 2506 2264 else if (RT_FAILURE(rc)) 2507 2265 return rc; 2508 2509 2266 return VINF_SUCCESS; 2510 2267 } 2511 2512 2268 /** 2513 2269 * Internal : Prepares the descriptor to write to the image. … … 2517 2273 { 2518 2274 int rc = VINF_SUCCESS; 2519 2520 2275 /* 2521 2276 * Allocate temporary descriptor buffer. … … 2526 2281 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor); 2527 2282 size_t offDescriptor = 0; 2528 2529 2283 if (!pszDescriptor) 2530 2284 return VERR_NO_MEMORY; 2531 2532 2285 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++) 2533 2286 { 2534 2287 const char *psz = pImage->Descriptor.aLines[i]; 2535 2288 size_t cb = strlen(psz); 2536 2537 2289 /* 2538 2290 * Increase the descriptor if there is no limit and … … 2550 2302 char *pszDescriptorNew = NULL; 2551 2303 LogFlow(("Increasing descriptor cache\n")); 2552 2553 2304 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K); 2554 2305 if (!pszDescriptorNew) … … 2561 2312 } 2562 2313 } 2563 2564 2314 if (cb > 0) 2565 2315 { … … 2567 2317 offDescriptor += cb; 2568 2318 } 2569 2570 2319 memcpy(pszDescriptor + offDescriptor, "\n", 1); 2571 2320 offDescriptor++; 2572 2321 } 2573 2574 2322 if (RT_SUCCESS(rc)) 2575 2323 { … … 2579 2327 else if (pszDescriptor) 2580 2328 RTMemFree(pszDescriptor); 2581 2582 return rc; 2583 } 2584 2329 return rc; 2330 } 2585 2331 /** 2586 2332 * Internal: write/update the descriptor part of the image. … … 2594 2340 void *pvDescriptor = NULL; 2595 2341 size_t cbDescriptor; 2596 2597 2342 if (pImage->pDescData) 2598 2343 { … … 2612 2357 if (pDescFile == NULL) 2613 2358 return VERR_INVALID_PARAMETER; 2614 2615 2359 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor); 2616 2360 if (RT_SUCCESS(rc)) … … 2624 2368 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename); 2625 2369 } 2626 2627 2370 if (RT_SUCCESS(rc) && !cbLimit) 2628 2371 { … … 2631 2374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename); 2632 2375 } 2633 2634 2376 if (RT_SUCCESS(rc)) 2635 2377 pImage->Descriptor.fDirty = false; 2636 2637 2378 if (pvDescriptor) 2638 2379 RTMemFree(pvDescriptor); 2639 2380 return rc; 2640 2641 } 2642 2381 } 2643 2382 /** 2644 2383 * Internal: validate the consistency check values in a binary header. … … 2674 2413 return rc; 2675 2414 } 2676 2677 2415 /** 2678 2416 * Internal: read metadata belonging to an extent with binary header, i.e. … … 2684 2422 SparseExtentHeader Header; 2685 2423 int rc; 2686 2687 2424 if (!fMagicAlreadyRead) 2688 2425 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0, … … 2697 2434 - RT_UOFFSETOF(SparseExtentHeader, version)); 2698 2435 } 2699 2700 2436 if (RT_SUCCESS(rc)) 2701 2437 { … … 2704 2440 { 2705 2441 uint64_t cbFile = 0; 2706 2707 2442 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17)) 2708 2443 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END) 2709 2444 pExtent->fFooter = true; 2710 2711 2445 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 2712 2446 || ( pExtent->fFooter … … 2717 2451 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname); 2718 2452 } 2719 2720 2453 if (RT_SUCCESS(rc)) 2721 2454 { 2722 2455 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 2723 2456 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512); 2724 2725 2457 if ( pExtent->fFooter 2726 2458 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2736 2468 rc = VERR_VD_VMDK_INVALID_HEADER; 2737 2469 } 2738 2739 2470 if (RT_SUCCESS(rc)) 2740 2471 rc = vmdkValidateHeader(pImage, pExtent, &Header); … … 2742 2473 pExtent->uAppendPosition = 0; 2743 2474 } 2744 2745 2475 if (RT_SUCCESS(rc)) 2746 2476 { … … 2765 2495 pExtent->uSectorRGD = 0; 2766 2496 } 2767 2768 2497 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors) 2769 2498 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2770 2499 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname); 2771 2772 2500 if ( RT_SUCCESS(rc) 2773 2501 && ( pExtent->uSectorGD == VMDK_GD_AT_END … … 2777 2505 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, 2778 2506 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname); 2779 2780 2507 if (RT_SUCCESS(rc)) 2781 2508 { … … 2788 2515 pExtent->cSectorsPerGDE = cSectorsPerGDE; 2789 2516 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 2790 2791 2517 /* Fix up the number of descriptor sectors, as some flat images have 2792 2518 * really just one, and this causes failures when inserting the UUID … … 2811 2537 rc = VERR_VD_VMDK_INVALID_HEADER; 2812 2538 } 2813 2814 2539 if (RT_FAILURE(rc)) 2815 2540 vmdkFreeExtentData(pImage, pExtent, false); 2816 2817 return rc; 2818 } 2819 2541 return rc; 2542 } 2820 2543 /** 2821 2544 * Internal: read additional metadata belonging to an extent. For those … … 2825 2548 { 2826 2549 int rc = VINF_SUCCESS; 2827 2828 2550 /* disabled the check as there are too many truncated vmdk images out there */ 2829 2551 #ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK … … 2865 2587 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2866 2588 pExtent->uAppendPosition = 0; 2867 2868 2589 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 2869 2590 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 2879 2600 } 2880 2601 } 2881 2882 2602 if (RT_FAILURE(rc)) 2883 2603 vmdkFreeExtentData(pImage, pExtent, false); 2884 2885 return rc; 2886 } 2887 2604 return rc; 2605 } 2888 2606 /** 2889 2607 * Internal: write/update the metadata for a sparse extent. … … 2893 2611 { 2894 2612 SparseExtentHeader Header; 2895 2896 2613 memset(&Header, '\0', sizeof(Header)); 2897 2614 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER); … … 2936 2653 Header.doubleEndLineChar2 = '\n'; 2937 2654 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression); 2938 2939 2655 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, 2940 2656 uOffset, &Header, sizeof(Header), … … 2944 2660 return rc; 2945 2661 } 2946 2947 2662 /** 2948 2663 * Internal: free the buffers used for streamOptimized images. … … 2961 2676 } 2962 2677 } 2963 2964 2678 /** 2965 2679 * Internal: free the memory used by the extent data structure, optionally … … 2975 2689 { 2976 2690 int rc = VINF_SUCCESS; 2977 2978 2691 vmdkFreeGrainDirectory(pExtent); 2979 2692 if (pExtent->pDescData) … … 3002 2715 } 3003 2716 vmdkFreeStreamBuffers(pExtent); 3004 3005 return rc; 3006 } 3007 2717 return rc; 2718 } 3008 2719 /** 3009 2720 * Internal: allocate grain table cache if necessary for this image. … … 3012 2723 { 3013 2724 PVMDKEXTENT pExtent; 3014 3015 2725 /* Allocate grain table cache if any sparse extent is present. */ 3016 2726 for (unsigned i = 0; i < pImage->cExtents; i++) … … 3032 2742 } 3033 2743 } 3034 3035 2744 return VINF_SUCCESS; 3036 2745 } 3037 3038 2746 /** 3039 2747 * Internal: allocate the given number of extents. … … 3063 2771 else 3064 2772 rc = VERR_NO_MEMORY; 3065 3066 return rc; 3067 } 3068 2773 return rc; 2774 } 3069 2775 /** 3070 2776 * Reads and processes the descriptor embedded in sparse images. … … 3114 2820 { 3115 2821 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors; 3116 3117 2822 pExtent->cDescriptorSectors = 4; 3118 2823 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3179 2884 rc = VERR_NO_MEMORY; 3180 2885 } 3181 3182 return rc; 3183 } 3184 2886 return rc; 2887 } 3185 2888 /** 3186 2889 * Reads the descriptor from a pure text file. … … 3269 2972 else 3270 2973 pExtent->pszFullname = NULL; 3271 3272 2974 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0); 3273 2975 switch (pExtent->enmType) … … 3290 2992 if (RT_FAILURE(rc)) 3291 2993 break; 3292 3293 2994 /* Mark extent as unclean if opened in read-write mode. */ 3294 2995 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) … … 3327 3028 else if (RT_SUCCESS(rc)) 3328 3029 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename); 3329 3330 return rc; 3331 } 3332 3030 return rc; 3031 } 3333 3032 /** 3334 3033 * Read and process the descriptor based on the image type. … … 3341 3040 { 3342 3041 uint32_t u32Magic; 3343 3344 3042 /* Read magic (if present). */ 3345 3043 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, … … 3358 3056 rc = VERR_VD_VMDK_INVALID_HEADER; 3359 3057 } 3360 3361 return rc; 3362 } 3363 3058 return rc; 3059 } 3364 3060 /** 3365 3061 * Internal: Open an image, constructing all necessary data structures. … … 3371 3067 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 3372 3068 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 3373 3374 3069 /* 3375 3070 * Open the image. … … 3384 3079 { 3385 3080 pImage->pFile = pFile; 3386 3387 3081 rc = vmdkDescriptorRead(pImage, pFile); 3388 3082 if (RT_SUCCESS(rc)) … … 3402 3096 } 3403 3097 } 3404 3405 3098 /* Update the image metadata now in case has changed. */ 3406 3099 rc = vmdkFlushImage(pImage, NULL); … … 3422 3115 || pExtent->enmType == VMDKETYPE_ZERO) 3423 3116 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED; 3424 3425 3117 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors); 3426 3118 } 3427 3428 3119 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3429 3120 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) … … 3435 3126 /* else: Do NOT signal an appropriate error here, as the VD layer has the 3436 3127 * choice of retrying the open if it failed. */ 3437 3438 3128 if (RT_SUCCESS(rc)) 3439 3129 { … … 3441 3131 pImage->RegionList.fFlags = 0; 3442 3132 pImage->RegionList.cRegions = 1; 3443 3444 3133 pRegion->offRegion = 0; /* Disk start. */ 3445 3134 pRegion->cbBlock = 512; … … 3454 3143 return rc; 3455 3144 } 3456 3457 3145 /** 3458 3146 * Frees a raw descriptor. … … 3463 3151 if (!pRawDesc) 3464 3152 return VINF_SUCCESS; 3465 3466 3153 RTStrFree(pRawDesc->pszRawDisk); 3467 3154 pRawDesc->pszRawDisk = NULL; 3468 3469 3155 /* Partitions: */ 3470 3156 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++) … … 3472 3158 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice); 3473 3159 pRawDesc->pPartDescs[i].pszRawDevice = NULL; 3474 3475 3160 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData); 3476 3161 pRawDesc->pPartDescs[i].pvPartitionData = NULL; 3477 3162 } 3478 3479 3163 RTMemFree(pRawDesc->pPartDescs); 3480 3164 pRawDesc->pPartDescs = NULL; 3481 3482 3165 RTMemFree(pRawDesc); 3483 3166 return VINF_SUCCESS; 3484 3167 } 3485 3486 3168 /** 3487 3169 * Helper that grows the raw partition descriptor table by @a cToAdd entries, … … 3500 3182 pRawDesc->cPartDescs = cNew; 3501 3183 pRawDesc->pPartDescs = paNew; 3502 3503 3184 *ppRet = &paNew[cOld]; 3504 3185 return VINF_SUCCESS; … … 3509 3190 pImage->pszFilename, cOld, cNew); 3510 3191 } 3511 3512 3192 /** 3513 3193 * @callback_method_impl{FNRTSORTCMP} … … 3519 3199 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0; 3520 3200 } 3521 3522 3201 /** 3523 3202 * Post processes the partition descriptors. … … 3531 3210 */ 3532 3211 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL); 3533 3534 3212 /* 3535 3213 * Check that we don't have overlapping descriptors. If we do, that's an … … 3546 3224 paPartDescs[i].pvPartitionData ? " (data)" : ""); 3547 3225 offLast -= 1; 3548 3549 3226 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk) 3550 3227 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS, … … 3559 3236 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize); 3560 3237 } 3561 3562 3238 return VINF_SUCCESS; 3563 3239 } 3564 3565 3566 3240 #ifdef RT_OS_LINUX 3567 3241 /** … … 3586 3260 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir); 3587 3261 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW); 3588 3589 3262 RTDIR hDir = NIL_RTDIR; 3590 3263 int rc = RTDirOpen(&hDir, pszBlockDevDir); … … 3604 3277 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName); 3605 3278 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */ 3606 3607 3279 dev_t uThisDevNo = ~uDevToLocate; 3608 3280 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir); … … 3634 3306 } 3635 3307 #endif /* RT_OS_LINUX */ 3636 3637 3308 #ifdef RT_OS_FREEBSD 3638 3639 3640 3309 /** 3641 3310 * Reads the config data from the provider and returns offset and size … … 3650 3319 gconfig *pConfEntry; 3651 3320 int rc = VERR_NOT_FOUND; 3652 3653 3321 /* 3654 3322 * Required parameters are located in the list containing key/value pairs. … … 3681 3349 return rc; 3682 3350 } 3683 3684 3685 3351 /** 3686 3352 * Searches the partition specified by name and calculates its size and absolute offset. … … 3701 3367 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER); 3702 3368 AssertReturn(pcbSize, VERR_INVALID_PARAMETER); 3703 3704 3369 ggeom *pParentGeom; 3705 3370 int rc = VERR_NOT_FOUND; … … 3714 3379 if (RT_FAILURE(rc)) 3715 3380 return rc; 3716 3717 3381 gprovider *pProvider; 3718 3382 /* … … 3726 3390 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize); 3727 3391 } 3728 3729 3392 /* 3730 3393 * No provider found. Go over the parent geom again … … 3736 3399 * provider 3737 3400 */ 3738 3739 3401 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider) 3740 3402 { … … 3744 3406 if (RT_FAILURE(rc)) 3745 3407 return rc; 3746 3747 3408 uint64_t cbProviderOffset = 0; 3748 3409 uint64_t cbProviderSize = 0; … … 3755 3416 } 3756 3417 } 3757 3758 3418 return VERR_NOT_FOUND; 3759 3419 } 3760 3420 #endif 3761 3762 3763 3421 /** 3764 3422 * Attempts to verify the raw partition path. … … 3770 3428 { 3771 3429 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 3772 3773 3430 /* 3774 3431 * Try open the raw partition device. … … 3780 3437 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"), 3781 3438 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc); 3782 3783 3439 /* 3784 3440 * Compare the partition UUID if we can get it. … … 3786 3442 #ifdef RT_OS_WINDOWS 3787 3443 DWORD cbReturned; 3788 3789 3444 /* 1. Get the device numbers for both handles, they should have the same disk. */ 3790 3445 STORAGE_DEVICE_NUMBER DevNum1; … … 3795 3450 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"), 3796 3451 pImage->pszFilename, pszRawDrive, GetLastError()); 3797 3798 3452 STORAGE_DEVICE_NUMBER DevNum2; 3799 3453 RT_ZERO(DevNum2); … … 3887 3541 rc = VERR_NO_TMP_MEMORY; 3888 3542 } 3889 3890 3543 #elif defined(RT_OS_LINUX) 3891 3544 RT_NOREF(hVol); 3892 3893 3545 /* Stat the two devices first to get their device numbers. (We probably 3894 3546 could make some assumptions here about the major & minor number assignments … … 3911 3563 { 3912 3564 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive); 3913 3914 3565 /* Now, scan the directories under that again for a partition device 3915 3566 matching the hRawPart device's number: */ 3916 3567 if (RT_SUCCESS(rc)) 3917 3568 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice); 3918 3919 3569 /* Having found the /sys/block/device/partition/ path, we can finally 3920 3570 read the partition attributes and compare with hVol. */ … … 3929 3579 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition); 3930 3580 /* else: ignore failure? */ 3931 3932 3581 /* start offset: */ 3933 3582 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */ … … 3943 3592 /* else: ignore failure? */ 3944 3593 } 3945 3946 3594 /* the size: */ 3947 3595 if (RT_SUCCESS(rc)) … … 3960 3608 /* else: We've got nothing to work on, so only do content comparison. */ 3961 3609 } 3962 3963 3610 #elif defined(RT_OS_FREEBSD) 3964 3611 char szDriveDevName[256]; … … 3991 3638 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS, 3992 3639 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename); 3993 3994 3995 3640 if (RT_SUCCESS(rc)) 3996 3641 { … … 4015 3660 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc); 4016 3661 } 4017 4018 3662 geom_deletetree(&geomMesh); 4019 3663 } … … 4022 3666 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err); 4023 3667 } 4024 4025 3668 #elif defined(RT_OS_SOLARIS) 4026 3669 RT_NOREF(hVol); 4027 4028 3670 dk_cinfo dkiDriveInfo; 4029 3671 dk_cinfo dkiPartInfo; … … 4073 3715 * using another way. If there is an error, it returns errno which will be handled below. 4074 3716 */ 4075 4076 3717 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition; 4077 3718 if (numPartition > NDKMAP) … … 4108 3749 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"), 4109 3750 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk); 4110 4111 3751 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData) 4112 3752 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS, … … 4114 3754 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData); 4115 3755 } 4116 4117 3756 #else 4118 3757 RT_NOREF(hVol); /* PORTME */ … … 4132 3771 { 4133 3772 uint8_t *pbSector2 = pbSector1 + cbToCompare; 4134 4135 3773 /* Do the comparing, we repeat if it fails and the data might be volatile. */ 4136 3774 uint64_t uPrevCrc1 = 0; … … 4148 3786 { 4149 3787 rc = VERR_MISMATCH; 4150 4151 3788 /* Do data stability checks before repeating: */ 4152 3789 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare); … … 4181 3818 offMissmatch++; 4182 3819 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16); 4183 4184 3820 if (cStable > 0) 4185 3821 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, … … 4195 3831 } 4196 3832 } 4197 4198 3833 RTMemTmpFree(pbSector1); 4199 3834 } … … 4206 3841 return rc; 4207 3842 } 4208 4209 3843 #ifdef RT_OS_WINDOWS 4210 3844 /** … … 4228 3862 } 4229 3863 #endif /* RT_OS_WINDOWS */ 4230 4231 3864 /** 4232 3865 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the … … 4245 3878 { 4246 3879 *phVolToRelease = NIL_RTDVMVOLUME; 4247 4248 3880 /* Check sanity/understanding. */ 4249 3881 Assert(fPartitions); 4250 3882 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */ 4251 4252 3883 /* 4253 3884 * Allocate on descriptor for each volume up front. 4254 3885 */ 4255 3886 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr); 4256 4257 3887 PVDISKRAWPARTDESC paPartDescs = NULL; 4258 3888 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs); 4259 3889 AssertRCReturn(rc, rc); 4260 4261 3890 /* 4262 3891 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them. … … 4281 3910 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs); 4282 3911 *phVolToRelease = hVol = hVolNext; 4283 4284 3912 /* 4285 3913 * Depending on the fPartitions selector and associated read-only mask, … … 4288 3916 */ 4289 3917 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol); 4290 4291 3918 uint64_t offVolumeEndIgnored = 0; 4292 3919 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored); … … 4296 3923 pImage->pszFilename, i, pszRawDrive, rc); 4297 3924 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk); 4298 4299 3925 /* Note! The index must match IHostDrivePartition::number. */ 4300 3926 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST); … … 4305 3931 if (fPartitionsReadOnly & RT_BIT_32(idxPartition)) 4306 3932 paPartDescs[i].uFlags |= VDISKRAW_READONLY; 4307 4308 3933 if (!fRelative) 4309 3934 { … … 4326 3951 */ 4327 3952 paPartDescs[i].offStartInDevice = 0; 4328 4329 3953 #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) 4330 3954 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */ … … 4340 3964 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR) 4341 3965 { 4342 /* 3966 /* 4343 3967 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK 4344 3968 * where X is the controller, … … 4380 4004 #endif 4381 4005 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY); 4382 4383 4006 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol); 4384 4007 AssertRCReturn(rc, rc); … … 4392 4015 } 4393 4016 } /* for each volume */ 4394 4395 4017 RTDvmVolumeRelease(hVol); 4396 4018 *phVolToRelease = NIL_RTDVMVOLUME; 4397 4398 4019 /* 4399 4020 * Check that we found all the partitions the user selected. … … 4410 4031 pImage->pszFilename, pszRawDrive, szLeft); 4411 4032 } 4412 4413 4033 return VINF_SUCCESS; 4414 4034 } 4415 4416 4035 /** 4417 4036 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies … … 4444 4063 pImage->pszFilename, pszRawDrive, rc); 4445 4064 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5); 4446 4447 4065 /* We can allocate the partition descriptors here to save an intentation level. */ 4448 4066 PVDISKRAWPARTDESC paPartDescs = NULL; 4449 4067 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs); 4450 4068 AssertRCReturn(rc, rc); 4451 4452 4069 /* Allocate the result table and repeat the location table query: */ 4453 4070 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations); … … 4529 4146 return rc; 4530 4147 } 4531 4532 4148 /** 4533 4149 * Opens the volume manager for the raw drive when in selected-partition mode. … … 4545 4161 { 4546 4162 *phVolMgr = NIL_RTDVM; 4547 4548 4163 RTVFSFILE hVfsFile = NIL_RTVFSFILE; 4549 4164 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile); … … 4552 4167 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"), 4553 4168 pImage->pszFilename, pszRawDrive, rc); 4554 4555 4169 RTDVM hVolMgr = NIL_RTDVM; 4556 4170 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/); 4557 4558 4171 RTVfsFileRelease(hVfsFile); 4559 4560 4172 if (RT_FAILURE(rc)) 4561 4173 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4562 4174 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"), 4563 4175 pImage->pszFilename, pszRawDrive, rc); 4564 4565 4176 rc = RTDvmMapOpen(hVolMgr); 4566 4177 if (RT_SUCCESS(rc)) … … 4573 4184 pImage->pszFilename, pszRawDrive, rc); 4574 4185 } 4575 4576 4186 /** 4577 4187 * Opens the raw drive device and get the sizes for it. … … 4597 4207 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"), 4598 4208 pImage->pszFilename, pszRawDrive, rc); 4599 4600 4209 /* 4601 4210 * Get the sector size. … … 4646 4255 return rc; 4647 4256 } 4648 4649 4257 /** 4650 4258 * Reads the raw disk configuration, leaving initalization and cleanup to the … … 4663 4271 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 4664 4272 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename); 4665 4666 4273 /* 4667 4274 * RawDrive = path … … 4672 4279 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4673 4280 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3); 4674 4675 4281 /* 4676 4282 * Partitions=n[r][,...] … … 4678 4284 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */; 4679 4285 *pfPartitions = *pfPartitionsReadOnly = 0; 4680 4681 4286 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe); 4682 4287 if (RT_SUCCESS(rc)) … … 4712 4317 pImage->pszFilename, psz); 4713 4318 } 4714 4715 4319 RTStrFree(*ppszFreeMe); 4716 4320 *ppszFreeMe = NULL; … … 4719 4323 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4720 4324 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4721 4722 4325 /* 4723 4326 * BootSector=base64 … … 4739 4342 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"), 4740 4343 pImage->pszFilename, *ppszRawDrive, cbBootSector); 4741 4742 4344 /* Refuse the boot sector if whole-drive. This used to be done quietly, 4743 4345 however, bird disagrees and thinks the user should be told that what … … 4748 4350 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"), 4749 4351 pImage->pszFilename, *ppszRawDrive); 4750 4751 4352 *pcbBootSector = (size_t)cbBootSector; 4752 4353 *ppvBootSector = RTMemAlloc((size_t)cbBootSector); … … 4755 4356 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"), 4756 4357 pImage->pszFilename, cbBootSector, *ppszRawDrive); 4757 4758 4358 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/); 4759 4359 if (RT_FAILURE(rc)) … … 4761 4361 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"), 4762 4362 pImage->pszFilename, *ppszRawDrive, rc); 4763 4764 4363 RTStrFree(*ppszFreeMe); 4765 4364 *ppszFreeMe = NULL; … … 4768 4367 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, 4769 4368 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc); 4770 4771 4369 /* 4772 4370 * Relative=0/1 … … 4796 4394 *pfRelative = false; 4797 4395 #endif 4798 4799 4396 return VINF_SUCCESS; 4800 4397 } 4801 4802 4398 /** 4803 4399 * Creates a raw drive (nee disk) descriptor. … … 4818 4414 /* Make sure it's NULL. */ 4819 4415 *ppRaw = NULL; 4820 4821 4416 /* 4822 4417 * Read the configuration. … … 4870 4465 //pRawDesc->cPartDescs = 0; 4871 4466 //pRawDesc->pPartDescs = NULL; 4872 4873 4467 /* We need to parse the partition map to complete the descriptor: */ 4874 4468 RTDVM hVolMgr = NIL_RTDVM; … … 4882 4476 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR 4883 4477 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT; 4884 4885 4478 /* Add copies of the partition tables: */ 4886 4479 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive, … … 4894 4487 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease); 4895 4488 RTDvmVolumeRelease(hVolRelease); 4896 4897 4489 /* Finally, sort the partition and check consistency (overlaps, etc): */ 4898 4490 if (RT_SUCCESS(rc)) … … 4938 4530 return rc; 4939 4531 } 4940 4941 4532 /** 4942 4533 * Internal: create VMDK images for raw disk/partition access. … … 4947 4538 int rc = VINF_SUCCESS; 4948 4539 PVMDKEXTENT pExtent; 4949 4950 4540 if (pRaw->uFlags & VDISKRAW_DISK) 4951 4541 { … … 4962 4552 if (RT_FAILURE(rc)) 4963 4553 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 4964 4965 4554 /* Set up basename for extent description. Cannot use StrDup. */ 4966 4555 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1; … … 4979 4568 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 4980 4569 pExtent->fMetaDirty = false; 4981 4982 4570 /* Open flat image, the raw disk. */ 4983 4571 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 4992 4580 * file, write the partition information to a flat extent and 4993 4581 * open all the (flat) raw disk partitions. */ 4994 4995 4582 /* First pass over the partition data areas to determine how many 4996 4583 * extents we need. One data area can require up to 2 extents, as … … 5004 4591 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, 5005 4592 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename); 5006 5007 4593 if (uStart < pPart->offStartInVDisk) 5008 4594 cExtents++; … … 5013 4599 if (uStart != cbSize) 5014 4600 cExtents++; 5015 5016 4601 rc = vmdkCreateExtents(pImage, cExtents); 5017 4602 if (RT_FAILURE(rc)) 5018 4603 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5019 5020 4604 /* Create raw partition descriptor file. */ 5021 4605 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename, … … 5024 4608 if (RT_FAILURE(rc)) 5025 4609 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename); 5026 5027 4610 /* Create base filename for the partition table extent. */ 5028 4611 /** @todo remove fixed buffer without creating memory leaks. */ … … 5039 4622 pszBaseBase, pszSuff); 5040 4623 RTStrFree(pszBaseBase); 5041 5042 4624 /* Second pass over the partitions, now define all extents. */ 5043 4625 uint64_t uPartOffset = 0; … … 5048 4630 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i]; 5049 4631 pExtent = &pImage->pExtents[cExtents++]; 5050 5051 4632 if (uStart < pPart->offStartInVDisk) 5052 4633 { … … 5062 4643 } 5063 4644 uStart = pPart->offStartInVDisk + pPart->cbData; 5064 5065 4645 if (pPart->pvPartitionData) 5066 4646 { … … 5072 4652 memcpy(pszBasename, pszPartition, cbBasename); 5073 4653 pExtent->pszBasename = pszBasename; 5074 5075 4654 /* Set up full name for partition extent. */ 5076 4655 char *pszDirname = RTStrDup(pImage->pszFilename); … … 5088 4667 pExtent->enmAccess = VMDKACCESS_READWRITE; 5089 4668 pExtent->fMetaDirty = false; 5090 5091 4669 /* Create partition table flat image. */ 5092 4670 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5123 4701 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE; 5124 4702 pExtent->fMetaDirty = false; 5125 5126 4703 /* Open flat image, the raw partition. */ 5127 4704 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5156 4733 } 5157 4734 } 5158 5159 4735 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5160 4736 (pRaw->uFlags & VDISKRAW_DISK) ? … … 5164 4740 return rc; 5165 4741 } 5166 5167 4742 /** 5168 4743 * Internal: create a regular (i.e. file-backed) VMDK image. … … 5176 4751 uint64_t cbOffset = 0; 5177 4752 uint64_t cbRemaining = cbSize; 5178 5179 4753 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 5180 4754 { … … 5188 4762 if (RT_FAILURE(rc)) 5189 4763 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5190 5191 4764 /* Basename strings needed for constructing the extent names. */ 5192 4765 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5193 4766 AssertPtr(pszBasenameSubstr); 5194 4767 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5195 5196 4768 /* Create separate descriptor file if necessary. */ 5197 4769 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5205 4777 else 5206 4778 pImage->pFile = NULL; 5207 5208 4779 /* Set up all extents. */ 5209 4780 for (unsigned i = 0; i < cExtents; i++) … … 5211 4782 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5212 4783 uint64_t cbExtent = cbRemaining; 5213 5214 4784 /* Set up fullname/basename for extent description. Cannot use StrDup 5215 4785 * for basename, as it is not guaranteed that the memory can be freed … … 5268 4838 return VERR_NO_STR_MEMORY; 5269 4839 pExtent->pszFullname = pszFullname; 5270 5271 4840 /* Create file for extent. */ 5272 4841 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5284 4853 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname); 5285 4854 } 5286 5287 4855 /* Place descriptor file information (where integrated). */ 5288 4856 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)) … … 5294 4862 pImage->pDescData = NULL; 5295 4863 } 5296 5297 4864 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5298 4865 { … … 5322 4889 pExtent->enmType = VMDKETYPE_FLAT; 5323 4890 } 5324 5325 4891 pExtent->enmAccess = VMDKACCESS_READWRITE; 5326 4892 pExtent->fUncleanShutdown = true; … … 5328 4894 pExtent->uSectorOffset = 0; 5329 4895 pExtent->fMetaDirty = true; 5330 5331 4896 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED)) 5332 4897 { … … 5340 4905 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5341 4906 } 5342 5343 4907 cbOffset += cbExtent; 5344 5345 4908 if (RT_SUCCESS(rc)) 5346 4909 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize); 5347 5348 4910 cbRemaining -= cbExtent; 5349 4911 } 5350 5351 4912 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX) 5352 4913 { … … 5357 4918 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename); 5358 4919 } 5359 5360 4920 const char *pszDescType = NULL; 5361 4921 if (uImageFlags & VD_IMAGE_FLAGS_FIXED) … … 5383 4943 return rc; 5384 4944 } 5385 5386 4945 /** 5387 4946 * Internal: Create a real stream optimized VMDK using only linear writes. … … 5392 4951 if (RT_FAILURE(rc)) 5393 4952 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); 5394 5395 4953 /* Basename strings needed for constructing the extent names. */ 5396 4954 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename); 5397 4955 AssertPtr(pszBasenameSubstr); 5398 4956 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1; 5399 5400 4957 /* No separate descriptor file. */ 5401 4958 pImage->pFile = NULL; 5402 5403 4959 /* Set up all extents. */ 5404 4960 PVMDKEXTENT pExtent = &pImage->pExtents[0]; 5405 5406 4961 /* Set up fullname/basename for extent description. Cannot use StrDup 5407 4962 * for basename, as it is not guaranteed that the memory can be freed … … 5413 4968 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr); 5414 4969 pExtent->pszBasename = pszBasename; 5415 5416 4970 char *pszBasedirectory = RTStrDup(pImage->pszFilename); 5417 4971 RTPathStripFilename(pszBasedirectory); … … 5421 4975 return VERR_NO_STR_MEMORY; 5422 4976 pExtent->pszFullname = pszFullname; 5423 5424 4977 /* Create file for extent. Make it write only, no reading allowed. */ 5425 4978 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname, … … 5429 4982 if (RT_FAILURE(rc)) 5430 4983 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname); 5431 5432 4984 /* Place descriptor file information. */ 5433 4985 pExtent->uDescriptorSector = 1; … … 5436 4988 pExtent->pDescData = pImage->pDescData; 5437 4989 pImage->pDescData = NULL; 5438 5439 4990 uint64_t cSectorsPerGDE, cSectorsPerGD; 5440 4991 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; … … 5446 4997 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE; 5447 4998 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t)); 5448 5449 4999 /* The spec says version is 1 for all VMDKs, but the vast 5450 5000 * majority of streamOptimized VMDKs actually contain … … 5453 5003 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE; 5454 5004 pExtent->fFooter = true; 5455 5456 5005 pExtent->enmAccess = VMDKACCESS_READONLY; 5457 5006 pExtent->fUncleanShutdown = false; … … 5459 5008 pExtent->uSectorOffset = 0; 5460 5009 pExtent->fMetaDirty = true; 5461 5462 5010 /* Create grain directory, without preallocating it straight away. It will 5463 5011 * be constructed on the fly when writing out the data and written when … … 5468 5016 if (RT_FAILURE(rc)) 5469 5017 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname); 5470 5471 5018 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType", 5472 5019 "streamOptimized"); 5473 5020 if (RT_FAILURE(rc)) 5474 5021 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 5475 5476 return rc; 5477 } 5478 5022 return rc; 5023 } 5479 5024 /** 5480 5025 * Initializes the UUID fields in the DDB. … … 5512 5057 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 5513 5058 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 5514 5515 return rc; 5516 } 5517 5059 return rc; 5060 } 5518 5061 /** 5519 5062 * Internal: The actual code for creating any VMDK variant currently in … … 5528 5071 { 5529 5072 pImage->uImageFlags = uImageFlags; 5530 5531 5073 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); 5532 5074 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); 5533 5075 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 5534 5535 5076 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 5536 5077 &pImage->Descriptor); … … 5544 5085 if (RT_FAILURE(rc)) 5545 5086 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename); 5546 5547 5087 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 5548 5088 vmdkRawDescFree(pRaw); … … 5560 5100 uPercentSpan * 95 / 100); 5561 5101 } 5562 5563 5102 if (RT_SUCCESS(rc)) 5564 5103 { 5565 5104 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100); 5566 5567 5105 pImage->cbSize = cbSize; 5568 5569 5106 for (unsigned i = 0; i < pImage->cExtents; i++) 5570 5107 { 5571 5108 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 5572 5573 5109 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 5574 5110 pExtent->cNominalSectors, pExtent->enmType, … … 5580 5116 } 5581 5117 } 5582 5583 5118 if (RT_SUCCESS(rc)) 5584 5119 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 5585 5586 5120 if ( RT_SUCCESS(rc) 5587 5121 && pPCHSGeometry->cCylinders != 0 … … 5589 5123 && pPCHSGeometry->cSectors != 0) 5590 5124 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5591 5592 5125 if ( RT_SUCCESS(rc) 5593 5126 && pLCHSGeometry->cCylinders != 0 … … 5595 5128 && pLCHSGeometry->cSectors != 0) 5596 5129 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 5597 5598 5130 pImage->LCHSGeometry = *pLCHSGeometry; 5599 5131 pImage->PCHSGeometry = *pPCHSGeometry; 5600 5601 5132 pImage->ImageUuid = *pUuid; 5602 5133 RTUuidClear(&pImage->ParentUuid); 5603 5134 RTUuidClear(&pImage->ModificationUuid); 5604 5135 RTUuidClear(&pImage->ParentModificationUuid); 5605 5606 5136 if (RT_SUCCESS(rc)) 5607 5137 rc = vmdkCreateImageDdbUuidsInit(pImage); 5608 5609 5138 if (RT_SUCCESS(rc)) 5610 5139 rc = vmdkAllocateGrainTableCache(pImage); 5611 5612 5140 if (RT_SUCCESS(rc)) 5613 5141 { … … 5616 5144 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 5617 5145 } 5618 5619 5146 if (RT_SUCCESS(rc)) 5620 5147 { 5621 5148 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100); 5622 5623 5149 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5624 5150 { … … 5645 5171 else 5646 5172 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 5647 5648 5649 5173 if (RT_SUCCESS(rc)) 5650 5174 { … … 5652 5176 pImage->RegionList.fFlags = 0; 5653 5177 pImage->RegionList.cRegions = 1; 5654 5655 5178 pRegion->offRegion = 0; /* Disk start. */ 5656 5179 pRegion->cbBlock = 512; … … 5660 5183 pRegion->cbMetadata = 0; 5661 5184 pRegion->cRegionBlocksOrBytes = pImage->cbSize; 5662 5663 5185 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan); 5664 5186 } … … 5667 5189 return rc; 5668 5190 } 5669 5670 5191 /** 5671 5192 * Internal: Update image comment. … … 5680 5201 return VERR_NO_MEMORY; 5681 5202 } 5682 5683 5203 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, 5684 5204 "ddb.comment", pszCommentEncoded); … … 5689 5209 return VINF_SUCCESS; 5690 5210 } 5691 5692 5211 /** 5693 5212 * Internal. Clear the grain table buffer for real stream optimized writing. … … 5700 5219 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t)); 5701 5220 } 5702 5703 5221 /** 5704 5222 * Internal. Flush the grain table buffer for real stream optimized writing. … … 5709 5227 int rc = VINF_SUCCESS; 5710 5228 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE; 5711 5712 5229 /* VMware does not write out completely empty grain tables in the case 5713 5230 * of streamOptimized images, which according to my interpretation of … … 5731 5248 if (fAllZero) 5732 5249 return VINF_SUCCESS; 5733 5734 5250 uint64_t uFileOffset = pExtent->uAppendPosition; 5735 5251 if (!uFileOffset) … … 5737 5253 /* Align to sector, as the previous write could have been any size. */ 5738 5254 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 5739 5740 5255 /* Grain table marker. */ 5741 5256 uint8_t aMarker[512]; … … 5748 5263 AssertRC(rc); 5749 5264 uFileOffset += 512; 5750 5751 5265 if (!pExtent->pGD || pExtent->pGD[uGDEntry]) 5752 5266 return VERR_INTERNAL_ERROR; 5753 5754 5267 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset); 5755 5756 5268 for (uint32_t i = 0; i < cCacheLines; i++) 5757 5269 { … … 5761 5273 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++) 5762 5274 *pGTTmp = RT_H2LE_U32(*pGTTmp); 5763 5764 5275 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset, 5765 5276 &pImage->pGTCache->aGTCache[i].aGTData[0], … … 5773 5284 return rc; 5774 5285 } 5775 5776 5286 /** 5777 5287 * Internal. Free all allocated space for representing an image, and optionally … … 5781 5291 { 5782 5292 int rc = VINF_SUCCESS; 5783 5784 5293 /* Freeing a never allocated image (e.g. because the open failed) is 5785 5294 * not signalled as an error. After all nothing bad happens. */ … … 5807 5316 pImage->pExtents[i].fMetaDirty = true; 5808 5317 } 5809 5810 5318 /* From now on it's not safe to append any more data. */ 5811 5319 pImage->pExtents[i].uAppendPosition = 0; … … 5813 5321 } 5814 5322 } 5815 5816 5323 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5817 5324 { … … 5832 5339 AssertRC(rc); 5833 5340 } 5834 5835 5341 uint64_t uFileOffset = pExtent->uAppendPosition; 5836 5342 if (!uFileOffset) 5837 5343 return VERR_INTERNAL_ERROR; 5838 5344 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 5839 5840 5345 /* From now on it's not safe to append any more data. */ 5841 5346 pExtent->uAppendPosition = 0; 5842 5843 5347 /* Grain directory marker. */ 5844 5348 uint8_t aMarker[512]; … … 5851 5355 AssertRC(rc); 5852 5356 uFileOffset += 512; 5853 5854 5357 /* Write grain directory in little endian style. The array will 5855 5358 * not be used after this, so convert in place. */ … … 5861 5364 pExtent->cGDEntries * sizeof(uint32_t)); 5862 5365 AssertRC(rc); 5863 5864 5366 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset); 5865 5367 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset); … … 5867 5369 + pExtent->cGDEntries * sizeof(uint32_t), 5868 5370 512); 5869 5870 5371 /* Footer marker. */ 5871 5372 memset(pMarker, '\0', sizeof(aMarker)); … … 5875 5376 uFileOffset, aMarker, sizeof(aMarker)); 5876 5377 AssertRC(rc); 5877 5878 5378 uFileOffset += 512; 5879 5379 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL); 5880 5380 AssertRC(rc); 5881 5882 5381 uFileOffset += 512; 5883 5382 /* End-of-stream marker. */ … … 5890 5389 else if (!fDelete && fFlush) 5891 5390 vmdkFlushImage(pImage, NULL); 5892 5893 5391 if (pImage->pExtents != NULL) 5894 5392 { … … 5912 5410 if (RT_SUCCESS(rc)) 5913 5411 rc = rc2; /* Propogate any error when closing the file. */ 5914 5915 5412 if (pImage->pGTCache) 5916 5413 { … … 5924 5421 } 5925 5422 } 5926 5927 5423 LogFlowFunc(("returns %Rrc\n", rc)); 5928 5424 return rc; 5929 5425 } 5930 5931 5426 /** 5932 5427 * Internal. Flush image data (and metadata) to disk. … … 5936 5431 PVMDKEXTENT pExtent; 5937 5432 int rc = VINF_SUCCESS; 5938 5939 5433 /* Update descriptor if changed. */ 5940 5434 if (pImage->Descriptor.fDirty) 5941 5435 rc = vmdkWriteDescriptor(pImage, pIoCtx); 5942 5943 5436 if (RT_SUCCESS(rc)) 5944 5437 { … … 5976 5469 } 5977 5470 } 5978 5979 5471 if (RT_FAILURE(rc)) 5980 5472 break; 5981 5982 5473 switch (pExtent->enmType) 5983 5474 { … … 6001 5492 } 6002 5493 } 6003 6004 return rc; 6005 } 6006 5494 return rc; 5495 } 6007 5496 /** 6008 5497 * Internal. Find extent corresponding to the sector number in the disk. … … 6013 5502 PVMDKEXTENT pExtent = NULL; 6014 5503 int rc = VINF_SUCCESS; 6015 6016 5504 for (unsigned i = 0; i < pImage->cExtents; i++) 6017 5505 { … … 6024 5512 offSector -= pImage->pExtents[i].cNominalSectors; 6025 5513 } 6026 6027 5514 if (pExtent) 6028 5515 *ppExtent = pExtent; 6029 5516 else 6030 5517 rc = VERR_IO_SECTOR_NOT_FOUND; 6031 6032 return rc; 6033 } 6034 5518 return rc; 5519 } 6035 5520 /** 6036 5521 * Internal. Hash function for placing the grain table hash entries. … … 6043 5528 return (uSector + uExtent) % pCache->cEntries; 6044 5529 } 6045 6046 5530 /** 6047 5531 * Internal. Get sector number in the extent file from the relative sector … … 6058 5542 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE]; 6059 5543 int rc; 6060 6061 5544 /* For newly created and readonly/sequentially opened streamOptimized 6062 5545 * images this must be a no-op, as the grain directory is not there. */ … … 6070 5553 return VINF_SUCCESS; 6071 5554 } 6072 6073 5555 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6074 5556 if (uGDIndex >= pExtent->cGDEntries) … … 6082 5564 return VINF_SUCCESS; 6083 5565 } 6084 6085 5566 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); 6086 5567 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent); … … 6111 5592 return VINF_SUCCESS; 6112 5593 } 6113 6114 5594 /** 6115 5595 * Internal. Writes the grain and also if necessary the grain tables. … … 6126 5606 const void *pData; 6127 5607 int rc; 6128 6129 5608 /* Very strict requirements: always write at least one full grain, with 6130 5609 * proper alignment. Everything else would require reading of already … … 6139 5618 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors) 6140 5619 return VERR_INVALID_PARAMETER; 6141 6142 5620 /* Clip write range to at most the rest of the grain. */ 6143 5621 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain)); 6144 6145 5622 /* Do not allow to go back. */ 6146 5623 uGrain = uSector / pExtent->cSectorsPerGrain; … … 6151 5628 if (uGrain < pExtent->uLastGrainAccess) 6152 5629 return VERR_VD_VMDK_INVALID_WRITE; 6153 6154 5630 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need 6155 5631 * to allocate something, we also need to detect the situation ourself. */ … … 6157 5633 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */)) 6158 5634 return VINF_SUCCESS; 6159 6160 5635 if (uGDEntry != uLastGDEntry) 6161 5636 { … … 6171 5646 } 6172 5647 } 6173 6174 5648 uint64_t uFileOffset; 6175 5649 uFileOffset = pExtent->uAppendPosition; … … 6178 5652 /* Align to sector, as the previous write could have been any size. */ 6179 5653 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6180 6181 5654 /* Paranoia check: extent type, grain table buffer presence and 6182 5655 * grain table buffer space. Also grain table entry must be clear. */ … … 6186 5659 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry]) 6187 5660 return VERR_INTERNAL_ERROR; 6188 6189 5661 /* Update grain table entry. */ 6190 5662 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset); 6191 6192 5663 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6193 5664 { … … 6202 5673 unsigned cSegments = 1; 6203 5674 size_t cbSeg = 0; 6204 6205 5675 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6206 5676 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); … … 6219 5689 pExtent->uLastGrainAccess = uGrain; 6220 5690 pExtent->uAppendPosition += cbGrain; 6221 6222 return rc; 6223 } 6224 5691 return rc; 5692 } 6225 5693 /** 6226 5694 * Internal: Updates the grain table during grain allocation. … … 6236 5704 uint64_t uSector = pGrainAlloc->uSector; 6237 5705 PVMDKGTCACHEENTRY pGTCacheEntry; 6238 6239 5706 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n", 6240 5707 pImage, pExtent, pCache, pIoCtx, pGrainAlloc)); 6241 6242 5708 uGTSector = pGrainAlloc->uGTSector; 6243 5709 uRGTSector = pGrainAlloc->uRGTSector; 6244 5710 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6245 6246 5711 /* Update the grain table (and the cache). */ 6247 5712 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE); … … 6306 5771 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname); 6307 5772 } 6308 6309 5773 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6310 5774 return rc; 6311 5775 } 6312 6313 5776 /** 6314 5777 * Internal - complete the grain allocation by updating disk grain table if required. … … 6320 5783 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6321 5784 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser; 6322 6323 5785 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n", 6324 5786 pBackendData, pIoCtx, pvUser, rcReq)); 6325 6326 5787 pGrainAlloc->cIoXfersPending--; 6327 5788 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded) 6328 5789 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc); 6329 6330 5790 if (!pGrainAlloc->cIoXfersPending) 6331 5791 { … … 6333 5793 RTMemFree(pGrainAlloc); 6334 5794 } 6335 6336 5795 LogFlowFunc(("Leaving rc=%Rrc\n", rc)); 6337 5796 return rc; 6338 5797 } 6339 6340 5798 /** 6341 5799 * Internal. Allocates a new grain table (if necessary). … … 6349 5807 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL; 6350 5808 int rc; 6351 6352 5809 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n", 6353 5810 pCache, pExtent, pIoCtx, uSector, cbWrite)); 6354 6355 5811 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC)); 6356 5812 if (!pGrainAlloc) 6357 5813 return VERR_NO_MEMORY; 6358 6359 5814 pGrainAlloc->pExtent = pExtent; 6360 5815 pGrainAlloc->uSector = uSector; 6361 6362 5816 uGDIndex = uSector / pExtent->cSectorsPerGDE; 6363 5817 if (uGDIndex >= pExtent->cGDEntries) … … 6374 5828 { 6375 5829 LogFlow(("Allocating new grain table\n")); 6376 6377 5830 /* There is no grain table referenced by this grain directory 6378 5831 * entry. So there is absolutely no data in this area. Allocate … … 6385 5838 } 6386 5839 Assert(!(uFileOffset % 512)); 6387 6388 5840 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 6389 5841 uGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6390 6391 5842 /* Normally the grain table is preallocated for hosted sparse extents 6392 5843 * that support more than 32 bit sector numbers. So this shouldn't … … 6397 5848 return VERR_VD_VMDK_INVALID_HEADER; 6398 5849 } 6399 6400 5850 /* Write grain table by writing the required number of grain table 6401 5851 * cache chunks. Allocate memory dynamically here or we flood the … … 6403 5853 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t); 6404 5854 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp); 6405 6406 5855 if (!paGTDataTmp) 6407 5856 { … … 6409 5858 return VERR_NO_MEMORY; 6410 5859 } 6411 6412 5860 memset(paGTDataTmp, '\0', cbGTDataTmp); 6413 5861 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage, … … 6425 5873 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition 6426 5874 + cbGTDataTmp, 512); 6427 6428 5875 if (pExtent->pRGD) 6429 5876 { … … 6434 5881 Assert(!(uFileOffset % 512)); 6435 5882 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset); 6436 6437 5883 /* Normally the redundant grain table is preallocated for hosted 6438 5884 * sparse extents that support more than 32 bit sector numbers. So … … 6443 5889 return VERR_VD_VMDK_INVALID_HEADER; 6444 5890 } 6445 6446 5891 /* Write grain table by writing the required number of grain table 6447 5892 * cache chunks. Allocate memory dynamically here or we flood the … … 6458 5903 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname); 6459 5904 } 6460 6461 5905 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp; 6462 5906 } 6463 6464 5907 RTMemTmpFree(paGTDataTmp); 6465 6466 5908 /* Update the grain directory on disk (doing it before writing the 6467 5909 * grain table will result in a garbled extent if the operation is … … 6489 5931 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname); 6490 5932 } 6491 6492 5933 /* As the final step update the in-memory copy of the GDs. */ 6493 5934 pExtent->pGD[uGDIndex] = uGTSector; … … 6495 5936 pExtent->pRGD[uGDIndex] = uRGTSector; 6496 5937 } 6497 6498 5938 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector)); 6499 5939 pGrainAlloc->uGTSector = uGTSector; 6500 5940 pGrainAlloc->uRGTSector = uRGTSector; 6501 6502 5941 uFileOffset = pExtent->uAppendPosition; 6503 5942 if (!uFileOffset) 6504 5943 return VERR_INTERNAL_ERROR; 6505 5944 Assert(!(uFileOffset % 512)); 6506 6507 5945 pGrainAlloc->uGrainOffset = uFileOffset; 6508 6509 5946 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6510 5947 { … … 6512 5949 ("Accesses to stream optimized images must be synchronous\n"), 6513 5950 VERR_INVALID_STATE); 6514 6515 5951 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 6516 5952 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname); 6517 6518 5953 /* Invalidate cache, just in case some code incorrectly allows mixing 6519 5954 * of reads and writes. Normally shouldn't be needed. */ 6520 5955 pExtent->uGrainSectorAbs = 0; 6521 6522 5956 /* Write compressed data block and the markers. */ 6523 5957 uint32_t cbGrain = 0; … … 6525 5959 RTSGSEG Segment; 6526 5960 unsigned cSegments = 1; 6527 6528 5961 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment, 6529 5962 &cSegments, cbWrite); 6530 5963 Assert(cbSeg == cbWrite); 6531 6532 5964 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, 6533 5965 Segment.pvSeg, cbWrite, uSector, &cbGrain); … … 6550 5982 else if (RT_FAILURE(rc)) 6551 5983 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname); 6552 6553 5984 pExtent->uAppendPosition += cbWrite; 6554 5985 } 6555 6556 5986 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc); 6557 6558 5987 if (!pGrainAlloc->cIoXfersPending) 6559 5988 { … … 6561 5990 RTMemFree(pGrainAlloc); 6562 5991 } 6563 6564 5992 LogFlowFunc(("leaving rc=%Rrc\n", rc)); 6565 6566 return rc; 6567 } 6568 5993 return rc; 5994 } 6569 5995 /** 6570 5996 * Internal. Reads the contents by sequentially going over the compressed … … 6576 6002 { 6577 6003 int rc; 6578 6579 6004 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n", 6580 6005 pImage, pExtent, uSector, pIoCtx, cbRead)); 6581 6582 6006 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 6583 6007 ("Async I/O not supported for sequential stream optimized images\n"), 6584 6008 VERR_INVALID_STATE); 6585 6586 6009 /* Do not allow to go back. */ 6587 6010 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain; … … 6589 6012 return VERR_VD_VMDK_INVALID_STATE; 6590 6013 pExtent->uLastGrainAccess = uGrain; 6591 6592 6014 /* After a previous error do not attempt to recover, as it would need 6593 6015 * seeking (in the general case backwards which is forbidden). */ 6594 6016 if (!pExtent->uGrainSectorAbs) 6595 6017 return VERR_VD_VMDK_INVALID_STATE; 6596 6597 6018 /* Check if we need to read something from the image or if what we have 6598 6019 * in the buffer is good to fulfill the request. */ … … 6601 6022 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs 6602 6023 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead); 6603 6604 6024 /* Get the marker from the next data block - and skip everything which 6605 6025 * is not a compressed grain. If it's a compressed grain which is for … … 6616 6036 Marker.uSector = RT_LE2H_U64(Marker.uSector); 6617 6037 Marker.cbSize = RT_LE2H_U32(Marker.cbSize); 6618 6619 6038 if (Marker.cbSize == 0) 6620 6039 { … … 6695 6114 } 6696 6115 } while (Marker.uType != VMDK_MARKER_EOS); 6697 6698 6116 pExtent->uGrainSectorAbs = uGrainSectorAbs; 6699 6700 6117 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS) 6701 6118 { … … 6706 6123 } 6707 6124 } 6708 6709 6125 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain) 6710 6126 { … … 6714 6130 return VERR_VD_BLOCK_FREE; 6715 6131 } 6716 6717 6132 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain; 6718 6133 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, … … 6722 6137 return VINF_SUCCESS; 6723 6138 } 6724 6725 6139 /** 6726 6140 * Replaces a fragment of a string with the specified string. … … 6791 6205 return pszNewStr; 6792 6206 } 6793 6794 6795 6207 /** @copydoc VDIMAGEBACKEND::pfnProbe */ 6796 6208 static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk, … … 6800 6212 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n", 6801 6213 pszFilename, pVDIfsDisk, pVDIfsImage, penmType)); 6802 6803 6214 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER); 6804 6805 6215 int rc = VINF_SUCCESS; 6806 6216 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); … … 6820 6230 vmdkFreeImage(pImage, false, false /*fFlush*/); 6821 6231 RTMemFree(pImage); 6822 6823 6232 if (RT_SUCCESS(rc)) 6824 6233 *penmType = VDTYPE_HDD; … … 6826 6235 else 6827 6236 rc = VERR_NO_MEMORY; 6828 6829 6237 LogFlowFunc(("returns %Rrc\n", rc)); 6830 6238 return rc; 6831 6239 } 6832 6833 6240 /** @copydoc VDIMAGEBACKEND::pfnOpen */ 6834 6241 static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags, … … 6837 6244 { 6838 6245 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */ 6839 6840 6246 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", 6841 6247 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 6842 6248 int rc; 6843 6844 6249 /* Check open flags. All valid flags are supported. */ 6845 6250 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 6846 6251 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER); 6847 6848 6252 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); 6849 6253 if (RT_LIKELY(pImage)) … … 6857 6261 pImage->pVDIfsDisk = pVDIfsDisk; 6858 6262 pImage->pVDIfsImage = pVDIfsImage; 6859 6860 6263 rc = vmdkOpenImage(pImage, uOpenFlags); 6861 6264 if (RT_SUCCESS(rc)) … … 6866 6269 else 6867 6270 rc = VERR_NO_MEMORY; 6868 6869 6271 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6870 6272 return rc; 6871 6273 } 6872 6873 6274 /** @copydoc VDIMAGEBACKEND::pfnCreate */ 6874 6275 static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize, … … 6884 6285 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); 6885 6286 int rc; 6886 6887 6287 /* Check the VD container type and image flags. */ 6888 6288 if ( enmType != VDTYPE_HDD 6889 6289 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 6890 6290 return VERR_VD_INVALID_TYPE; 6891 6892 6291 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */ 6893 6292 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) … … 6895 6294 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))) 6896 6295 return VERR_VD_INVALID_SIZE; 6897 6898 6296 /* Check image flags for invalid combinations. */ 6899 6297 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6900 6298 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))) 6901 6299 return VERR_INVALID_PARAMETER; 6902 6903 6300 /* Check open flags. All valid flags are supported. */ 6904 6301 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); … … 6910 6307 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)), 6911 6308 VERR_INVALID_PARAMETER); 6912 6913 6309 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1])); 6914 6310 if (RT_LIKELY(pImage)) 6915 6311 { 6916 6312 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 6917 6918 6313 pImage->pszFilename = pszFilename; 6919 6314 pImage->pFile = NULL; … … 6946 6341 rc = vmdkOpenImage(pImage, uOpenFlags); 6947 6342 } 6948 6949 6343 if (RT_SUCCESS(rc)) 6950 6344 *ppBackendData = pImage; 6951 6345 } 6952 6953 6346 if (RT_FAILURE(rc)) 6954 6347 RTMemFree(pImage->pDescData); … … 6956 6349 else 6957 6350 rc = VERR_NO_MEMORY; 6958 6959 6351 if (RT_FAILURE(rc)) 6960 6352 RTMemFree(pImage); … … 6962 6354 else 6963 6355 rc = VERR_NO_MEMORY; 6964 6965 6356 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 6966 6357 return rc; 6967 6358 } 6968 6969 6359 /** 6970 6360 * Prepares the state for renaming a VMDK image, setting up the state and allocating … … 6979 6369 { 6980 6370 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER); 6981 6982 6371 int rc = VINF_SUCCESS; 6983 6984 6372 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy)); 6985 6986 6373 /* 6987 6374 * Allocate an array to store both old and new names of renamed files … … 7009 6396 pRenameState->fEmbeddedDesc = true; 7010 6397 } 7011 7012 6398 /* Save the descriptor content. */ 7013 6399 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines; … … 7021 6407 } 7022 6408 } 7023 7024 6409 if (RT_SUCCESS(rc)) 7025 6410 { … … 7028 6413 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY); 7029 6414 RTPathStripSuffix(pRenameState->pszNewBaseName); 7030 7031 6415 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename)); 7032 6416 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY); 7033 6417 RTPathStripSuffix(pRenameState->pszOldBaseName); 7034 7035 6418 /* Prepare both old and new full names used for string replacement. 7036 6419 Note! Must abspath the stuff here, so the strstr weirdness later in … … 7040 6423 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY); 7041 6424 RTPathStripSuffix(pRenameState->pszNewFullName); 7042 7043 6425 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename); 7044 6426 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY); 7045 6427 RTPathStripSuffix(pRenameState->pszOldFullName); 7046 7047 6428 /* Save the old name for easy access to the old descriptor file. */ 7048 6429 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename); 7049 6430 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY); 7050 7051 6431 /* Save old image name. */ 7052 6432 pRenameState->pszOldImageName = pImage->pszFilename; … … 7055 6435 else 7056 6436 rc = VERR_NO_TMP_MEMORY; 7057 7058 return rc; 7059 } 7060 6437 return rc; 6438 } 7061 6439 /** 7062 6440 * Destroys the given rename state, freeing all allocated memory. … … 7102 6480 RTStrFree(pRenameState->pszNewFullName); 7103 6481 } 7104 7105 6482 /** 7106 6483 * Rolls back the rename operation to the original state. … … 7113 6490 { 7114 6491 int rc = VINF_SUCCESS; 7115 7116 6492 if (!pRenameState->fImageFreed) 7117 6493 { … … 7122 6498 vmdkFreeImage(pImage, false, true /*fFlush*/); 7123 6499 } 7124 7125 6500 /* Rename files back. */ 7126 6501 for (unsigned i = 0; i <= pRenameState->cExtents; i++) … … 7161 6536 pImage->pszFilename = pRenameState->pszOldImageName; 7162 6537 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); 7163 7164 return rc; 7165 } 7166 6538 return rc; 6539 } 7167 6540 /** 7168 6541 * Rename worker doing the real work. … … 7177 6550 int rc = VINF_SUCCESS; 7178 6551 unsigned i, line; 7179 7180 6552 /* Update the descriptor with modified extent names. */ 7181 6553 for (i = 0, line = pImage->Descriptor.uFirstExtent; … … 7194 6566 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i]; 7195 6567 } 7196 7197 6568 if (RT_SUCCESS(rc)) 7198 6569 { … … 7201 6572 /* Flush the descriptor now, in case it is embedded. */ 7202 6573 vmdkFlushImage(pImage, NULL); 7203 7204 6574 /* Close and rename/move extents. */ 7205 6575 for (i = 0; i < pRenameState->cExtents; i++) … … 7219 6589 if (RT_FAILURE(rc)) 7220 6590 break;; 7221 7222 6591 /* Rename the extent file. */ 7223 6592 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0); … … 7227 6596 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname); 7228 6597 } 7229 7230 6598 if (RT_SUCCESS(rc)) 7231 6599 { … … 7235 6603 { 7236 6604 pRenameState->fImageFreed = true; 7237 7238 6605 /* Last elements of new/old name arrays are intended for 7239 6606 * storing descriptor's names. … … 7250 6617 } 7251 6618 } 7252 7253 6619 /* Update pImage with the new information. */ 7254 6620 pImage->pszFilename = pszFilename; 7255 7256 6621 /* Open the new image. */ 7257 6622 rc = vmdkOpenImage(pImage, pImage->uOpenFlags); … … 7259 6624 } 7260 6625 } 7261 7262 return rc; 7263 } 7264 6626 return rc; 6627 } 7265 6628 /** @copydoc VDIMAGEBACKEND::pfnRename */ 7266 6629 static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename) 7267 6630 { 7268 6631 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename)); 7269 7270 6632 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7271 6633 VMDKRENAMESTATE RenameState; 7272 7273 6634 memset(&RenameState, 0, sizeof(RenameState)); 7274 7275 6635 /* Check arguments. */ 7276 6636 AssertReturn(( pImage … … 7278 6638 && *pszFilename 7279 6639 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)), VERR_INVALID_PARAMETER); 7280 7281 6640 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename); 7282 6641 if (RT_SUCCESS(rc)) 7283 6642 { 7284 6643 /* --- Up to this point we have not done any damage yet. --- */ 7285 7286 6644 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename); 7287 6645 /* Roll back all changes in case of failure. */ … … 7292 6650 } 7293 6651 } 7294 7295 6652 vmdkRenameStateDestroy(&RenameState); 7296 6653 LogFlowFunc(("returns %Rrc\n", rc)); 7297 6654 return rc; 7298 6655 } 7299 7300 6656 /** @copydoc VDIMAGEBACKEND::pfnClose */ 7301 6657 static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete) … … 7303 6659 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete)); 7304 6660 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7305 7306 6661 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/); 7307 6662 RTMemFree(pImage); 7308 7309 6663 LogFlowFunc(("returns %Rrc\n", rc)); 7310 6664 return rc; 7311 6665 } 7312 7313 6666 /** @copydoc VDIMAGEBACKEND::pfnRead */ 7314 6667 static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead, … … 7318 6671 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead)); 7319 6672 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7320 7321 6673 AssertPtr(pImage); 7322 6674 Assert(uOffset % 512 == 0); … … 7324 6676 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER); 7325 6677 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER); 7326 7327 6678 /* Find the extent and check access permissions as defined in the extent descriptor. */ 7328 6679 PVMDKEXTENT pExtent; … … 7335 6686 /* Clip read range to remain in this extent. */ 7336 6687 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 7337 7338 6688 /* Handle the read according to the current extent type. */ 7339 6689 switch (pExtent->enmType) … … 7342 6692 { 7343 6693 uint64_t uSectorExtentAbs; 7344 7345 6694 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 7346 6695 if (RT_FAILURE(rc)) … … 7366 6715 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 7367 6716 ("Async I/O is not supported for stream optimized VMDK's\n")); 7368 7369 6717 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 7370 6718 uSectorExtentAbs -= uSectorInGrain; … … 7407 6755 { 7408 6756 size_t cbSet; 7409 7410 6757 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 7411 6758 Assert(cbSet == cbToRead); … … 7418 6765 else if (RT_SUCCESS(rc)) 7419 6766 rc = VERR_VD_VMDK_INVALID_STATE; 7420 7421 6767 LogFlowFunc(("returns %Rrc\n", rc)); 7422 6768 return rc; 7423 6769 } 7424 7425 6770 /** @copydoc VDIMAGEBACKEND::pfnWrite */ 7426 6771 static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite, … … 7432 6777 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7433 6778 int rc; 7434 7435 6779 AssertPtr(pImage); 7436 6780 Assert(uOffset % 512 == 0); 7437 6781 Assert(cbToWrite % 512 == 0); 7438 6782 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER); 7439 7440 6783 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7441 6784 { … … 7443 6786 uint64_t uSectorExtentRel; 7444 6787 uint64_t uSectorExtentAbs; 7445 7446 6788 /* No size check here, will do that later when the extent is located. 7447 6789 * There are sparse images out there which according to the spec are … … 7450 6792 * grain boundaries, and with the nominal size not being a multiple of the 7451 6793 * grain size), this would prevent writing to the last grain. */ 7452 7453 6794 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 7454 6795 &pExtent, &uSectorExtentRel); … … 7548 6889 } 7549 6890 } 7550 7551 6891 if (pcbWriteProcess) 7552 6892 *pcbWriteProcess = cbToWrite; … … 7555 6895 else 7556 6896 rc = VERR_VD_IMAGE_READ_ONLY; 7557 7558 6897 LogFlowFunc(("returns %Rrc\n", rc)); 7559 6898 return rc; 7560 6899 } 7561 7562 6900 /** @copydoc VDIMAGEBACKEND::pfnFlush */ 7563 6901 static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx) 7564 6902 { 7565 6903 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7566 7567 6904 return vmdkFlushImage(pImage, pIoCtx); 7568 6905 } 7569 7570 6906 /** @copydoc VDIMAGEBACKEND::pfnGetVersion */ 7571 6907 static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData) … … 7573 6909 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7574 6910 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7575 7576 6911 AssertPtrReturn(pImage, 0); 7577 7578 6912 return VMDK_IMAGE_VERSION; 7579 6913 } 7580 7581 6914 /** @copydoc VDIMAGEBACKEND::pfnGetFileSize */ 7582 6915 static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData) … … 7585 6918 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7586 6919 uint64_t cb = 0; 7587 7588 6920 AssertPtrReturn(pImage, 0); 7589 7590 6921 if (pImage->pFile != NULL) 7591 6922 { … … 7605 6936 } 7606 6937 } 7607 7608 6938 LogFlowFunc(("returns %lld\n", cb)); 7609 6939 return cb; 7610 6940 } 7611 7612 6941 /** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */ 7613 6942 static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry) … … 7616 6945 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7617 6946 int rc = VINF_SUCCESS; 7618 7619 6947 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7620 7621 6948 if (pImage->PCHSGeometry.cCylinders) 7622 6949 *pPCHSGeometry = pImage->PCHSGeometry; 7623 6950 else 7624 6951 rc = VERR_VD_GEOMETRY_NOT_SET; 7625 7626 6952 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 7627 6953 return rc; 7628 6954 } 7629 7630 6955 /** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */ 7631 6956 static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry) … … 7635 6960 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7636 6961 int rc = VINF_SUCCESS; 7637 7638 6962 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7639 7640 6963 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7641 6964 { … … 7651 6974 else 7652 6975 rc = VERR_VD_IMAGE_READ_ONLY; 7653 7654 6976 LogFlowFunc(("returns %Rrc\n", rc)); 7655 6977 return rc; 7656 6978 } 7657 7658 6979 /** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */ 7659 6980 static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry) … … 7662 6983 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7663 6984 int rc = VINF_SUCCESS; 7664 7665 6985 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7666 7667 6986 if (pImage->LCHSGeometry.cCylinders) 7668 6987 *pLCHSGeometry = pImage->LCHSGeometry; 7669 6988 else 7670 6989 rc = VERR_VD_GEOMETRY_NOT_SET; 7671 7672 6990 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 7673 6991 return rc; 7674 6992 } 7675 7676 6993 /** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */ 7677 6994 static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry) … … 7681 6998 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7682 6999 int rc = VINF_SUCCESS; 7683 7684 7000 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7685 7686 7001 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7687 7002 { … … 7697 7012 else 7698 7013 rc = VERR_VD_IMAGE_READ_ONLY; 7699 7700 7014 LogFlowFunc(("returns %Rrc\n", rc)); 7701 7015 return rc; 7702 7016 } 7703 7704 7017 /** @copydoc VDIMAGEBACKEND::pfnQueryRegions */ 7705 7018 static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList) … … 7707 7020 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList)); 7708 7021 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7709 7710 7022 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED); 7711 7712 7023 *ppRegionList = &pThis->RegionList; 7713 7024 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS)); 7714 7025 return VINF_SUCCESS; 7715 7026 } 7716 7717 7027 /** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */ 7718 7028 static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList) … … 7722 7032 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData; 7723 7033 AssertPtr(pThis); RT_NOREF(pThis); 7724 7725 7034 /* Nothing to do here. */ 7726 7035 } 7727 7728 7036 /** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */ 7729 7037 static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData) … … 7731 7039 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7732 7040 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7733 7734 7041 AssertPtrReturn(pImage, 0); 7735 7736 7042 LogFlowFunc(("returns %#x\n", pImage->uImageFlags)); 7737 7043 return pImage->uImageFlags; 7738 7044 } 7739 7740 7045 /** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */ 7741 7046 static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData) … … 7743 7048 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 7744 7049 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7745 7746 7050 AssertPtrReturn(pImage, 0); 7747 7748 7051 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags)); 7749 7052 return pImage->uOpenFlags; 7750 7053 } 7751 7752 7054 /** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */ 7753 7055 static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags) … … 7756 7058 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7757 7059 int rc; 7758 7759 7060 /* Image must be opened and the new flags must be valid. */ 7760 7061 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO … … 7779 7080 } 7780 7081 } 7781 7782 7082 LogFlowFunc(("returns %Rrc\n", rc)); 7783 7083 return rc; 7784 7084 } 7785 7786 7085 /** @copydoc VDIMAGEBACKEND::pfnGetComment */ 7787 7086 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment) … … 7789 7088 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment)); 7790 7089 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7791 7792 7090 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7793 7794 7091 char *pszCommentEncoded = NULL; 7795 7092 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor, … … 7800 7097 rc = VINF_SUCCESS; 7801 7098 } 7802 7803 7099 if (RT_SUCCESS(rc)) 7804 7100 { … … 7807 7103 else if (pszComment) 7808 7104 *pszComment = '\0'; 7809 7810 7105 if (pszCommentEncoded) 7811 7106 RTMemTmpFree(pszCommentEncoded); 7812 7107 } 7813 7814 7108 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment)); 7815 7109 return rc; 7816 7110 } 7817 7818 7111 /** @copydoc VDIMAGEBACKEND::pfnSetComment */ 7819 7112 static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment) … … 7822 7115 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7823 7116 int rc; 7824 7825 7117 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7826 7827 7118 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7828 7119 { … … 7834 7125 else 7835 7126 rc = VERR_VD_IMAGE_READ_ONLY; 7836 7837 7127 LogFlowFunc(("returns %Rrc\n", rc)); 7838 7128 return rc; 7839 7129 } 7840 7841 7130 /** @copydoc VDIMAGEBACKEND::pfnGetUuid */ 7842 7131 static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid) … … 7844 7133 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7845 7134 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7846 7847 7135 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7848 7849 7136 *pUuid = pImage->ImageUuid; 7850 7851 7137 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7852 7138 return VINF_SUCCESS; 7853 7139 } 7854 7855 7140 /** @copydoc VDIMAGEBACKEND::pfnSetUuid */ 7856 7141 static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid) … … 7859 7144 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7860 7145 int rc = VINF_SUCCESS; 7861 7862 7146 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7863 7864 7147 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7865 7148 { … … 7878 7161 else 7879 7162 rc = VERR_VD_IMAGE_READ_ONLY; 7880 7881 7163 LogFlowFunc(("returns %Rrc\n", rc)); 7882 7164 return rc; 7883 7165 } 7884 7885 7166 /** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */ 7886 7167 static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7888 7169 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7889 7170 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7890 7891 7171 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7892 7893 7172 *pUuid = pImage->ModificationUuid; 7894 7895 7173 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7896 7174 return VINF_SUCCESS; 7897 7175 } 7898 7899 7176 /** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */ 7900 7177 static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7903 7180 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7904 7181 int rc = VINF_SUCCESS; 7905 7906 7182 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7907 7908 7183 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7909 7184 { … … 7925 7200 else 7926 7201 rc = VERR_VD_IMAGE_READ_ONLY; 7927 7928 7202 LogFlowFunc(("returns %Rrc\n", rc)); 7929 7203 return rc; 7930 7204 } 7931 7932 7205 /** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */ 7933 7206 static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid) … … 7935 7208 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7936 7209 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7937 7938 7210 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7939 7940 7211 *pUuid = pImage->ParentUuid; 7941 7942 7212 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7943 7213 return VINF_SUCCESS; 7944 7214 } 7945 7946 7215 /** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */ 7947 7216 static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid) … … 7950 7219 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7951 7220 int rc = VINF_SUCCESS; 7952 7953 7221 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7954 7955 7222 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 7956 7223 { … … 7969 7236 else 7970 7237 rc = VERR_VD_IMAGE_READ_ONLY; 7971 7972 7238 LogFlowFunc(("returns %Rrc\n", rc)); 7973 7239 return rc; 7974 7240 } 7975 7976 7241 /** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */ 7977 7242 static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid) … … 7979 7244 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 7980 7245 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7981 7982 7246 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7983 7984 7247 *pUuid = pImage->ParentModificationUuid; 7985 7986 7248 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 7987 7249 return VINF_SUCCESS; 7988 7250 } 7989 7990 7251 /** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */ 7991 7252 static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid) … … 7994 7255 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 7995 7256 int rc = VINF_SUCCESS; 7996 7997 7257 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 7998 7999 7258 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 8000 7259 { … … 8012 7271 else 8013 7272 rc = VERR_VD_IMAGE_READ_ONLY; 8014 8015 7273 LogFlowFunc(("returns %Rrc\n", rc)); 8016 7274 return rc; 8017 7275 } 8018 8019 7276 /** @copydoc VDIMAGEBACKEND::pfnDump */ 8020 7277 static DECLCALLBACK(void) vmdkDump(void *pBackendData) 8021 7278 { 8022 7279 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 8023 8024 7280 AssertPtrReturnVoid(pImage); 8025 7281 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", … … 8032 7288 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid); 8033 7289 } 8034 8035 8036 8037 7290 const VDIMAGEBACKEND g_VmdkBackend = 8038 7291 {
Note:
See TracChangeset
for help on using the changeset viewer.