- Timestamp:
- Sep 12, 2016 5:45:32 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Storage/VMDK.cpp
r63802 r63805 3509 3509 */ 3510 3510 static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize, 3511 unsigned uImageFlags, 3512 PFNVDPROGRESS pfnProgress, void *pvUser, 3511 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress, 3513 3512 unsigned uPercentStart, unsigned uPercentSpan) 3514 3513 { … … 3619 3618 { 3620 3619 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent, 3621 0 /* fFlags */, pfnProgress, pvUser, uPercentStart + cbOffset * uPercentSpan / cbSize, 3620 0 /* fFlags */, pIfProgress->pfnProgress, pIfProgress->Core.pvUser, 3621 uPercentStart + cbOffset * uPercentSpan / cbSize, 3622 3622 cbExtent * uPercentSpan / cbSize); 3623 3623 if (RT_FAILURE(rc)) … … 3683 3683 cbOffset += cbExtent; 3684 3684 3685 if (RT_SUCCESS(rc) && pfnProgress)3686 pfnProgress(pvUser, uPercentStart + cbOffset * uPercentSpan / cbSize);3685 if (RT_SUCCESS(rc)) 3686 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize); 3687 3687 3688 3688 cbRemaining -= cbExtent; … … 3727 3727 * Internal: Create a real stream optimized VMDK using only linear writes. 3728 3728 */ 3729 static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize, 3730 unsigned uImageFlags, 3731 PFNVDPROGRESS pfnProgress, void *pvUser, 3732 unsigned uPercentStart, unsigned uPercentSpan) 3733 { 3734 RT_NOREF5(uImageFlags, pfnProgress, pvUser, uPercentStart, uPercentSpan); 3735 int rc; 3736 3737 rc = vmdkCreateExtents(pImage, 1); 3729 static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize) 3730 { 3731 int rc = vmdkCreateExtents(pImage, 1); 3738 3732 if (RT_FAILURE(rc)) 3739 3733 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename); … … 3818 3812 "streamOptimized"); 3819 3813 if (RT_FAILURE(rc)) 3820 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 3814 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename); 3815 3816 return rc; 3817 } 3818 3819 /** 3820 * Initializes the UUID fields in the DDB. 3821 * 3822 * @returns VBox status code. 3823 * @param pImage The VMDK image instance. 3824 */ 3825 static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage) 3826 { 3827 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid); 3828 if (RT_SUCCESS(rc)) 3829 { 3830 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid); 3831 if (RT_SUCCESS(rc)) 3832 { 3833 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID, 3834 &pImage->ModificationUuid); 3835 if (RT_SUCCESS(rc)) 3836 { 3837 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID, 3838 &pImage->ParentModificationUuid); 3839 if (RT_FAILURE(rc)) 3840 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 3841 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename); 3842 } 3843 else 3844 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 3845 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename); 3846 } 3847 else 3848 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 3849 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename); 3850 } 3851 else 3852 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 3853 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 3821 3854 3822 3855 return rc; … … 3831 3864 PCVDGEOMETRY pPCHSGeometry, 3832 3865 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid, 3833 P FNVDPROGRESS pfnProgress, void *pvUser,3866 PVDINTERFACEPROGRESS pIfProgress, 3834 3867 unsigned uPercentStart, unsigned uPercentSpan) 3835 3868 { 3836 int rc;3837 3838 3869 pImage->uImageFlags = uImageFlags; 3839 3870 … … 3842 3873 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); 3843 3874 3844 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 3845 &pImage->Descriptor); 3846 if (RT_FAILURE(rc)) 3847 { 3848 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 3849 goto out; 3850 } 3851 3852 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED) 3853 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)) 3854 { 3855 /* Raw disk image (includes raw partition). */ 3856 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment; 3857 /* As the comment is misused, zap it so that no garbage comment 3858 * is set below. */ 3859 pszComment = NULL; 3860 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 3861 } 3862 else 3863 { 3864 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3875 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc, 3876 &pImage->Descriptor); 3877 if (RT_SUCCESS(rc)) 3878 { 3879 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED) 3880 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)) 3881 { 3882 /* Raw disk image (includes raw partition). */ 3883 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment; 3884 /* As the comment is misused, zap it so that no garbage comment 3885 * is set below. */ 3886 pszComment = NULL; 3887 rc = vmdkCreateRawImage(pImage, pRaw, cbSize); 3888 } 3889 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3865 3890 { 3866 3891 /* Stream optimized sparse image (monolithic). */ 3867 rc = vmdkCreateStreamImage(pImage, cbSize, uImageFlags, 3868 pfnProgress, pvUser, uPercentStart, 3869 uPercentSpan * 95 / 100); 3892 rc = vmdkCreateStreamImage(pImage, cbSize); 3870 3893 } 3871 3894 else … … 3873 3896 /* Regular fixed or sparse image (monolithic or split). */ 3874 3897 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags, 3875 p fnProgress, pvUser, uPercentStart,3898 pIfProgress, uPercentStart, 3876 3899 uPercentSpan * 95 / 100); 3877 3900 } 3878 } 3879 3880 if (RT_FAILURE(rc)) 3881 goto out; 3882 3883 if (RT_SUCCESS(rc) && pfnProgress) 3884 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100); 3885 3886 pImage->cbSize = cbSize; 3887 3888 for (unsigned i = 0; i < pImage->cExtents; i++) 3889 { 3890 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 3891 3892 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 3893 pExtent->cNominalSectors, pExtent->enmType, 3894 pExtent->pszBasename, pExtent->uSectorOffset); 3895 if (RT_FAILURE(rc)) 3896 { 3897 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3898 goto out; 3899 } 3900 } 3901 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 3902 3903 if ( pPCHSGeometry->cCylinders != 0 3904 && pPCHSGeometry->cHeads != 0 3905 && pPCHSGeometry->cSectors != 0) 3906 { 3907 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 3908 if (RT_FAILURE(rc)) 3909 goto out; 3910 } 3911 if ( pLCHSGeometry->cCylinders != 0 3912 && pLCHSGeometry->cHeads != 0 3913 && pLCHSGeometry->cSectors != 0) 3914 { 3915 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 3916 if (RT_FAILURE(rc)) 3917 goto out; 3918 } 3919 3920 pImage->LCHSGeometry = *pLCHSGeometry; 3921 pImage->PCHSGeometry = *pPCHSGeometry; 3922 3923 pImage->ImageUuid = *pUuid; 3924 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 3925 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid); 3926 if (RT_FAILURE(rc)) 3927 { 3928 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename); 3929 goto out; 3930 } 3931 RTUuidClear(&pImage->ParentUuid); 3932 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 3933 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid); 3934 if (RT_FAILURE(rc)) 3935 { 3936 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename); 3937 goto out; 3938 } 3939 RTUuidClear(&pImage->ModificationUuid); 3940 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 3941 VMDK_DDB_MODIFICATION_UUID, 3942 &pImage->ModificationUuid); 3943 if (RT_FAILURE(rc)) 3944 { 3945 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename); 3946 goto out; 3947 } 3948 RTUuidClear(&pImage->ParentModificationUuid); 3949 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 3950 VMDK_DDB_PARENT_MODIFICATION_UUID, 3951 &pImage->ParentModificationUuid); 3952 if (RT_FAILURE(rc)) 3953 { 3954 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename); 3955 goto out; 3956 } 3957 3958 rc = vmdkAllocateGrainTableCache(pImage); 3959 if (RT_FAILURE(rc)) 3960 goto out; 3961 3962 rc = vmdkSetImageComment(pImage, pszComment); 3963 if (RT_FAILURE(rc)) 3964 { 3965 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 3966 goto out; 3967 } 3968 3969 if (RT_SUCCESS(rc) && pfnProgress) 3970 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100); 3971 3972 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3973 { 3974 /* streamOptimized is a bit special, we cannot trigger the flush 3975 * until all data has been written. So we write the necessary 3976 * information explicitly. */ 3977 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines] 3978 - pImage->Descriptor.aLines[0], 512)); 3979 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL); 3980 if (RT_FAILURE(rc)) 3981 { 3982 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename); 3983 goto out; 3984 } 3985 3986 rc = vmdkWriteDescriptor(pImage, NULL); 3987 if (RT_FAILURE(rc)) 3988 { 3989 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename); 3990 goto out; 3901 3902 if (RT_SUCCESS(rc)) 3903 { 3904 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100); 3905 3906 pImage->cbSize = cbSize; 3907 3908 for (unsigned i = 0; i < pImage->cExtents; i++) 3909 { 3910 PVMDKEXTENT pExtent = &pImage->pExtents[i]; 3911 3912 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess, 3913 pExtent->cNominalSectors, pExtent->enmType, 3914 pExtent->pszBasename, pExtent->uSectorOffset); 3915 if (RT_FAILURE(rc)) 3916 { 3917 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename); 3918 break; 3919 } 3920 } 3921 3922 if (RT_SUCCESS(rc)) 3923 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor); 3924 3925 if ( RT_SUCCESS(rc) 3926 && pPCHSGeometry->cCylinders != 0 3927 && pPCHSGeometry->cHeads != 0 3928 && pPCHSGeometry->cSectors != 0) 3929 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 3930 3931 if ( RT_SUCCESS(rc) 3932 && pLCHSGeometry->cCylinders != 0 3933 && pLCHSGeometry->cHeads != 0 3934 && pLCHSGeometry->cSectors != 0) 3935 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 3936 3937 pImage->LCHSGeometry = *pLCHSGeometry; 3938 pImage->PCHSGeometry = *pPCHSGeometry; 3939 3940 pImage->ImageUuid = *pUuid; 3941 RTUuidClear(&pImage->ParentUuid); 3942 RTUuidClear(&pImage->ModificationUuid); 3943 RTUuidClear(&pImage->ParentModificationUuid); 3944 3945 if (RT_SUCCESS(rc)) 3946 rc = vmdkCreateImageDdbUuidsInit(pImage); 3947 3948 if (RT_SUCCESS(rc)) 3949 rc = vmdkAllocateGrainTableCache(pImage); 3950 3951 if (RT_SUCCESS(rc)) 3952 { 3953 rc = vmdkSetImageComment(pImage, pszComment); 3954 if (RT_FAILURE(rc)) 3955 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename); 3956 } 3957 3958 if (RT_SUCCESS(rc)) 3959 { 3960 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100); 3961 3962 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 3963 { 3964 /* streamOptimized is a bit special, we cannot trigger the flush 3965 * until all data has been written. So we write the necessary 3966 * information explicitly. */ 3967 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines] 3968 - pImage->Descriptor.aLines[0], 512)); 3969 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL); 3970 if (RT_SUCCESS(rc)) 3971 { 3972 rc = vmdkWriteDescriptor(pImage, NULL); 3973 if (RT_FAILURE(rc)) 3974 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename); 3975 } 3976 else 3977 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename); 3978 } 3979 else 3980 rc = vmdkFlushImage(pImage, NULL); 3981 } 3991 3982 } 3992 3983 } 3993 3984 else 3994 rc = vmdkFlushImage(pImage, NULL); 3995 3996 out: 3997 if (RT_SUCCESS(rc) && pfnProgress) 3998 pfnProgress(pvUser, uPercentStart + uPercentSpan); 3999 4000 if (RT_FAILURE(rc)) 3985 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename); 3986 3987 3988 if (RT_SUCCESS(rc)) 3989 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan); 3990 else 4001 3991 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS); 4002 3992 return rc; … … 4008 3998 static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment) 4009 3999 { 4010 char *pszCommentEncoded ;4000 char *pszCommentEncoded = NULL; 4011 4001 if (pszComment) 4012 4002 { … … 4015 4005 return VERR_NO_MEMORY; 4016 4006 } 4017 else 4018 pszCommentEncoded = NULL; 4007 4019 4008 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, 4020 4009 "ddb.comment", pszCommentEncoded); 4021 if (pszComment )4010 if (pszCommentEncoded) 4022 4011 RTStrFree(pszCommentEncoded); 4023 4012 if (RT_FAILURE(rc)) … … 4275 4264 /* Update descriptor if changed. */ 4276 4265 if (pImage->Descriptor.fDirty) 4277 {4278 4266 rc = vmdkWriteDescriptor(pImage, pIoCtx); 4279 if (RT_FAILURE(rc)) 4280 goto out; 4281 } 4282 4283 for (unsigned i = 0; i < pImage->cExtents; i++) 4284 { 4285 pExtent = &pImage->pExtents[i]; 4286 if (pExtent->pFile != NULL && pExtent->fMetaDirty) 4287 { 4267 4268 if (RT_SUCCESS(rc)) 4269 { 4270 for (unsigned i = 0; i < pImage->cExtents; i++) 4271 { 4272 pExtent = &pImage->pExtents[i]; 4273 if (pExtent->pFile != NULL && pExtent->fMetaDirty) 4274 { 4275 switch (pExtent->enmType) 4276 { 4277 case VMDKETYPE_HOSTED_SPARSE: 4278 if (!pExtent->fFooter) 4279 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx); 4280 else 4281 { 4282 uint64_t uFileOffset = pExtent->uAppendPosition; 4283 /* Simply skip writing anything if the streamOptimized 4284 * image hasn't been just created. */ 4285 if (!uFileOffset) 4286 break; 4287 uFileOffset = RT_ALIGN_64(uFileOffset, 512); 4288 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 4289 uFileOffset, pIoCtx); 4290 } 4291 break; 4292 case VMDKETYPE_VMFS: 4293 case VMDKETYPE_FLAT: 4294 /* Nothing to do. */ 4295 break; 4296 case VMDKETYPE_ZERO: 4297 default: 4298 AssertMsgFailed(("extent with type %d marked as dirty\n", 4299 pExtent->enmType)); 4300 break; 4301 } 4302 } 4303 4304 if (RT_FAILURE(rc)) 4305 break; 4306 4288 4307 switch (pExtent->enmType) 4289 4308 { 4290 4309 case VMDKETYPE_HOSTED_SPARSE: 4291 if (!pExtent->fFooter)4292 {4293 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);4294 if (RT_FAILURE(rc))4295 goto out;4296 }4297 else4298 {4299 uint64_t uFileOffset = pExtent->uAppendPosition;4300 /* Simply skip writing anything if the streamOptimized4301 * image hasn't been just created. */4302 if (!uFileOffset)4303 break;4304 uFileOffset = RT_ALIGN_64(uFileOffset, 512);4305 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,4306 uFileOffset, pIoCtx);4307 if (RT_FAILURE(rc))4308 goto out;4309 }4310 break;4311 4310 case VMDKETYPE_VMFS: 4312 4311 case VMDKETYPE_FLAT: 4313 /* Nothing to do. */ 4312 /** @todo implement proper path absolute check. */ 4313 if ( pExtent->pFile != NULL 4314 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 4315 && !(pExtent->pszBasename[0] == RTPATH_SLASH)) 4316 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx, 4317 NULL, NULL); 4314 4318 break; 4315 4319 case VMDKETYPE_ZERO: 4320 /* No need to do anything for this extent. */ 4321 break; 4316 4322 default: 4317 AssertMsgFailed(("extent with type %d marked as dirty\n", 4318 pExtent->enmType)); 4323 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType)); 4319 4324 break; 4320 4325 } 4321 4326 } 4322 switch (pExtent->enmType) 4323 { 4324 case VMDKETYPE_HOSTED_SPARSE: 4325 case VMDKETYPE_VMFS: 4326 case VMDKETYPE_FLAT: 4327 /** @todo implement proper path absolute check. */ 4328 if ( pExtent->pFile != NULL 4329 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 4330 && !(pExtent->pszBasename[0] == RTPATH_SLASH)) 4331 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx, 4332 NULL, NULL); 4333 break; 4334 case VMDKETYPE_ZERO: 4335 /* No need to do anything for this extent. */ 4336 break; 4337 default: 4338 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType)); 4339 break; 4340 } 4341 } 4342 4343 out: 4327 } 4328 4344 4329 return rc; 4345 4330 } … … 5101 5086 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n", 5102 5087 pszFilename, pVDIfsDisk, pVDIfsImage, penmType)); 5088 5089 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER); 5090 5103 5091 int rc = VINF_SUCCESS; 5104 PVMDKIMAGE pImage; 5105 5106 if ( !pszFilename 5107 || !*pszFilename 5108 || strchr(pszFilename, '"')) 5109 { 5110 rc = VERR_INVALID_PARAMETER; 5111 goto out; 5112 } 5113 5114 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5115 if (!pImage) 5116 { 5092 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5093 if (RT_LIKELY(pImage)) 5094 { 5095 pImage->pszFilename = pszFilename; 5096 pImage->pFile = NULL; 5097 pImage->pExtents = NULL; 5098 pImage->pFiles = NULL; 5099 pImage->pGTCache = NULL; 5100 pImage->pDescData = NULL; 5101 pImage->pVDIfsDisk = pVDIfsDisk; 5102 pImage->pVDIfsImage = pVDIfsImage; 5103 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as 5104 * much as possible in vmdkOpenImage. */ 5105 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY); 5106 vmdkFreeImage(pImage, false); 5107 RTMemFree(pImage); 5108 5109 if (RT_SUCCESS(rc)) 5110 *penmType = VDTYPE_HDD; 5111 } 5112 else 5117 5113 rc = VERR_NO_MEMORY; 5118 goto out; 5119 } 5120 pImage->pszFilename = pszFilename; 5121 pImage->pFile = NULL; 5122 pImage->pExtents = NULL; 5123 pImage->pFiles = NULL; 5124 pImage->pGTCache = NULL; 5125 pImage->pDescData = NULL; 5126 pImage->pVDIfsDisk = pVDIfsDisk; 5127 pImage->pVDIfsImage = pVDIfsImage; 5128 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as 5129 * much as possible in vmdkOpenImage. */ 5130 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY); 5131 vmdkFreeImage(pImage, false); 5132 RTMemFree(pImage); 5133 5134 if (RT_SUCCESS(rc)) 5135 *penmType = VDTYPE_HDD; 5136 5137 out: 5114 5138 5115 LogFlowFunc(("returns %Rrc\n", rc)); 5139 5116 return rc; … … 5145 5122 VDTYPE enmType, void **ppBackendData) 5146 5123 { 5147 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 5124 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */ 5125 5126 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n", 5127 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData)); 5148 5128 int rc; 5149 PVMDKIMAGE pImage;5150 5151 NOREF(enmType); /**< @todo r=klaus make use of the type info. */5152 5129 5153 5130 /* Check open flags. All valid flags are supported. */ 5154 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK) 5155 { 5156 rc = VERR_INVALID_PARAMETER; 5157 goto out; 5158 } 5159 5160 /* Check remaining arguments. */ 5161 if ( !VALID_PTR(pszFilename) 5162 || !*pszFilename 5163 || strchr(pszFilename, '"')) 5164 { 5165 rc = VERR_INVALID_PARAMETER; 5166 goto out; 5167 } 5168 5169 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5170 if (!pImage) 5171 { 5131 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 5132 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER); 5133 5134 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5135 if (RT_LIKELY(pImage)) 5136 { 5137 pImage->pszFilename = pszFilename; 5138 pImage->pFile = NULL; 5139 pImage->pExtents = NULL; 5140 pImage->pFiles = NULL; 5141 pImage->pGTCache = NULL; 5142 pImage->pDescData = NULL; 5143 pImage->pVDIfsDisk = pVDIfsDisk; 5144 pImage->pVDIfsImage = pVDIfsImage; 5145 5146 rc = vmdkOpenImage(pImage, uOpenFlags); 5147 if (RT_SUCCESS(rc)) 5148 *ppBackendData = pImage; 5149 else 5150 RTMemFree(pImage); 5151 } 5152 else 5172 5153 rc = VERR_NO_MEMORY; 5173 goto out; 5174 } 5175 pImage->pszFilename = pszFilename; 5176 pImage->pFile = NULL; 5177 pImage->pExtents = NULL; 5178 pImage->pFiles = NULL; 5179 pImage->pGTCache = NULL; 5180 pImage->pDescData = NULL; 5181 pImage->pVDIfsDisk = pVDIfsDisk; 5182 pImage->pVDIfsImage = pVDIfsImage; 5183 5184 rc = vmdkOpenImage(pImage, uOpenFlags); 5185 if (RT_SUCCESS(rc)) 5186 *ppBackendData = pImage; 5187 else 5188 RTMemFree(pImage); 5189 5190 out: 5154 5191 5155 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 5192 5156 return rc; … … 5206 5170 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); 5207 5171 int rc; 5208 PVMDKIMAGE pImage; 5209 5210 PFNVDPROGRESS pfnProgress = NULL; 5211 void *pvUser = NULL; 5212 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 5213 if (pIfProgress) 5214 { 5215 pfnProgress = pIfProgress->pfnProgress; 5216 pvUser = pIfProgress->Core.pvUser; 5217 } 5218 5219 /* Check the image flags. */ 5220 if ((uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 5221 { 5222 rc = VERR_VD_INVALID_TYPE; 5223 goto out; 5224 } 5225 5226 /* Check the VD container type. */ 5227 if (enmType != VDTYPE_HDD) 5228 { 5229 rc = VERR_VD_INVALID_TYPE; 5230 goto out; 5231 } 5232 5233 /* Check open flags. All valid flags are supported. */ 5234 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK) 5235 { 5236 rc = VERR_INVALID_PARAMETER; 5237 goto out; 5238 } 5172 5173 /* Check the VD container type and image flags. */ 5174 if ( enmType != VDTYPE_HDD 5175 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0) 5176 return VERR_VD_INVALID_TYPE; 5239 5177 5240 5178 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */ 5241 5179 if ( !cbSize 5242 5180 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) 5243 { 5244 rc = VERR_VD_INVALID_SIZE; 5245 goto out; 5246 } 5247 5248 /* Check remaining arguments. */ 5249 if ( !VALID_PTR(pszFilename) 5250 || !*pszFilename 5251 || strchr(pszFilename, '"') 5252 || !VALID_PTR(pPCHSGeometry) 5253 || !VALID_PTR(pLCHSGeometry) 5254 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5255 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))) 5256 { 5257 rc = VERR_INVALID_PARAMETER; 5258 goto out; 5259 } 5260 5261 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5262 if (!pImage) 5263 { 5181 return VERR_VD_INVALID_SIZE; 5182 5183 /* Check image flags for invalid combinations. */ 5184 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5185 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))) 5186 return VERR_INVALID_PARAMETER; 5187 5188 /* Check open flags. All valid flags are supported. */ 5189 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER); 5190 AssertReturn( VALID_PTR(pszFilename) 5191 && *pszFilename 5192 && VALID_PTR(pPCHSGeometry) 5193 && VALID_PTR(pLCHSGeometry), VERR_INVALID_PARAMETER); 5194 5195 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE)); 5196 if (RT_LIKELY(pImage)) 5197 { 5198 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); 5199 5200 pImage->pszFilename = pszFilename; 5201 pImage->pFile = NULL; 5202 pImage->pExtents = NULL; 5203 pImage->pFiles = NULL; 5204 pImage->pGTCache = NULL; 5205 pImage->pDescData = NULL; 5206 pImage->pVDIfsDisk = pVDIfsDisk; 5207 pImage->pVDIfsImage = pVDIfsImage; 5208 /* Descriptors for split images can be pretty large, especially if the 5209 * filename is long. So prepare for the worst, and allocate quite some 5210 * memory for the descriptor in this case. */ 5211 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 5212 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200); 5213 else 5214 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20); 5215 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc); 5216 if (RT_LIKELY(pImage->pDescData)) 5217 { 5218 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment, 5219 pPCHSGeometry, pLCHSGeometry, pUuid, 5220 pIfProgress, uPercentStart, uPercentSpan); 5221 if (RT_SUCCESS(rc)) 5222 { 5223 /* So far the image is opened in read/write mode. Make sure the 5224 * image is opened in read-only mode if the caller requested that. */ 5225 if (uOpenFlags & VD_OPEN_FLAGS_READONLY) 5226 { 5227 vmdkFreeImage(pImage, false); 5228 rc = vmdkOpenImage(pImage, uOpenFlags); 5229 } 5230 5231 if (RT_SUCCESS(rc)) 5232 *ppBackendData = pImage; 5233 } 5234 5235 if (RT_FAILURE(rc)) 5236 RTMemFree(pImage->pDescData); 5237 } 5238 else 5239 rc = VERR_NO_MEMORY; 5240 5241 if (RT_FAILURE(rc)) 5242 RTMemFree(pImage); 5243 } 5244 else 5264 5245 rc = VERR_NO_MEMORY; 5265 goto out; 5266 } 5267 pImage->pszFilename = pszFilename; 5268 pImage->pFile = NULL; 5269 pImage->pExtents = NULL; 5270 pImage->pFiles = NULL; 5271 pImage->pGTCache = NULL; 5272 pImage->pDescData = NULL; 5273 pImage->pVDIfsDisk = pVDIfsDisk; 5274 pImage->pVDIfsImage = pVDIfsImage; 5275 /* Descriptors for split images can be pretty large, especially if the 5276 * filename is long. So prepare for the worst, and allocate quite some 5277 * memory for the descriptor in this case. */ 5278 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G) 5279 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200); 5280 else 5281 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20); 5282 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc); 5283 if (!pImage->pDescData) 5284 { 5285 RTMemFree(pImage); 5286 rc = VERR_NO_MEMORY; 5287 goto out; 5288 } 5289 5290 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment, 5291 pPCHSGeometry, pLCHSGeometry, pUuid, 5292 pfnProgress, pvUser, uPercentStart, uPercentSpan); 5293 if (RT_SUCCESS(rc)) 5294 { 5295 /* So far the image is opened in read/write mode. Make sure the 5296 * image is opened in read-only mode if the caller requested that. */ 5297 if (uOpenFlags & VD_OPEN_FLAGS_READONLY) 5298 { 5299 vmdkFreeImage(pImage, false); 5300 rc = vmdkOpenImage(pImage, uOpenFlags); 5301 if (RT_FAILURE(rc)) 5302 goto out; 5303 } 5304 *ppBackendData = pImage; 5305 } 5306 else 5307 { 5308 RTMemFree(pImage->pDescData); 5309 RTMemFree(pImage); 5310 } 5311 5312 out: 5246 5313 5247 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); 5314 5248 return rc; … … 5328 5262 bool fImageFreed = false; 5329 5263 bool fEmbeddedDesc = false; 5330 unsigned cExtents = 0;5264 unsigned cExtents = pImage->cExtents;; 5331 5265 char *pszNewBaseName = NULL; 5332 5266 char *pszOldBaseName = NULL; … … 5341 5275 5342 5276 /* Check arguments. */ 5343 if ( !pImage 5344 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) 5345 || !VALID_PTR(pszFilename) 5346 || !*pszFilename) 5347 { 5348 rc = VERR_INVALID_PARAMETER; 5349 goto out; 5350 } 5351 5352 cExtents = pImage->cExtents; 5277 AssertReturn(( !pImage 5278 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK) 5279 || !VALID_PTR(pszFilename) 5280 || !*pszFilename), VERR_INVALID_PARAMETER); 5353 5281 5354 5282 /* … … 5575 5503 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete)); 5576 5504 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5577 int rc; 5578 5579 rc = vmdkFreeImage(pImage, fDelete); 5505 5506 int rc = vmdkFreeImage(pImage, fDelete); 5580 5507 RTMemFree(pImage); 5581 5508 … … 5591 5518 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead)); 5592 5519 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5593 PVMDKEXTENT pExtent;5594 uint64_t uSectorExtentRel;5595 uint64_t uSectorExtentAbs;5596 int rc;5597 5520 5598 5521 AssertPtr(pImage); 5599 5522 Assert(uOffset % 512 == 0); 5600 5523 Assert(cbToRead % 512 == 0); 5601 5602 if ( uOffset + cbToRead > pImage->cbSize 5603 || cbToRead == 0) 5604 { 5605 rc = VERR_INVALID_PARAMETER; 5606 goto out; 5607 } 5608 5609 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 5610 &pExtent, &uSectorExtentRel); 5611 if (RT_FAILURE(rc)) 5612 goto out; 5613 5614 /* Check access permissions as defined in the extent descriptor. */ 5615 if (pExtent->enmAccess == VMDKACCESS_NOACCESS) 5616 { 5617 rc = VERR_VD_VMDK_INVALID_STATE; 5618 goto out; 5619 } 5620 5621 /* Clip read range to remain in this extent. */ 5622 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5623 5624 /* Handle the read according to the current extent type. */ 5625 switch (pExtent->enmType) 5626 { 5627 case VMDKETYPE_HOSTED_SPARSE: 5628 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 5629 if (RT_FAILURE(rc)) 5630 goto out; 5631 /* Clip read range to at most the rest of the grain. */ 5632 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain)); 5633 Assert(!(cbToRead % 512)); 5634 if (uSectorExtentAbs == 0) 5524 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER); 5525 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER); 5526 5527 /* Find the extent and check access permissions as defined in the extent descriptor. */ 5528 PVMDKEXTENT pExtent; 5529 uint64_t uSectorExtentRel; 5530 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 5531 &pExtent, &uSectorExtentRel); 5532 if ( RT_SUCCESS(rc) 5533 && pExtent->enmAccess != VMDKACCESS_NOACCESS) 5534 { 5535 /* Clip read range to remain in this extent. */ 5536 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5537 5538 /* Handle the read according to the current extent type. */ 5539 switch (pExtent->enmType) 5540 { 5541 case VMDKETYPE_HOSTED_SPARSE: 5635 5542 { 5636 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5637 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 5638 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)) 5639 rc = VERR_VD_BLOCK_FREE; 5640 else 5641 rc = vmdkStreamReadSequential(pImage, pExtent, 5642 uSectorExtentRel, 5643 pIoCtx, cbToRead); 5644 } 5645 else 5646 { 5647 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5543 uint64_t uSectorExtentAbs; 5544 5545 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 5546 if (RT_FAILURE(rc)) 5547 break; 5548 /* Clip read range to at most the rest of the grain. */ 5549 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain)); 5550 Assert(!(cbToRead % 512)); 5551 if (uSectorExtentAbs == 0) 5648 5552 { 5649 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 5650 ("Async I/O is not supported for stream optimized VMDK's\n")); 5651 5652 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 5653 uSectorExtentAbs -= uSectorInGrain; 5654 if (pExtent->uGrainSectorAbs != uSectorExtentAbs) 5655 { 5656 uint64_t uLBA = 0; /* gcc maybe uninitialized */ 5657 rc = vmdkFileInflateSync(pImage, pExtent, 5658 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5659 pExtent->pvGrain, 5660 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), 5661 NULL, &uLBA, NULL); 5662 if (RT_FAILURE(rc)) 5663 { 5664 pExtent->uGrainSectorAbs = 0; 5665 AssertRC(rc); 5666 goto out; 5667 } 5668 pExtent->uGrainSectorAbs = uSectorExtentAbs; 5669 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain; 5670 Assert(uLBA == uSectorExtentRel); 5671 } 5672 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, 5673 (uint8_t *)pExtent->pvGrain 5674 + VMDK_SECTOR2BYTE(uSectorInGrain), 5675 cbToRead); 5553 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5554 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 5555 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)) 5556 rc = VERR_VD_BLOCK_FREE; 5557 else 5558 rc = vmdkStreamReadSequential(pImage, pExtent, 5559 uSectorExtentRel, 5560 pIoCtx, cbToRead); 5676 5561 } 5677 5562 else 5678 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage, 5679 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5680 pIoCtx, cbToRead); 5563 { 5564 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5565 { 5566 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx), 5567 ("Async I/O is not supported for stream optimized VMDK's\n")); 5568 5569 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain; 5570 uSectorExtentAbs -= uSectorInGrain; 5571 if (pExtent->uGrainSectorAbs != uSectorExtentAbs) 5572 { 5573 uint64_t uLBA = 0; /* gcc maybe uninitialized */ 5574 rc = vmdkFileInflateSync(pImage, pExtent, 5575 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5576 pExtent->pvGrain, 5577 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), 5578 NULL, &uLBA, NULL); 5579 if (RT_FAILURE(rc)) 5580 { 5581 pExtent->uGrainSectorAbs = 0; 5582 break; 5583 } 5584 pExtent->uGrainSectorAbs = uSectorExtentAbs; 5585 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain; 5586 Assert(uLBA == uSectorExtentRel); 5587 } 5588 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx, 5589 (uint8_t *)pExtent->pvGrain 5590 + VMDK_SECTOR2BYTE(uSectorInGrain), 5591 cbToRead); 5592 } 5593 else 5594 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage, 5595 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5596 pIoCtx, cbToRead); 5597 } 5598 break; 5681 5599 } 5682 break; 5683 case VMDKETYPE_VMFS: 5684 case VMDKETYPE_FLAT: 5685 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage, 5686 VMDK_SECTOR2BYTE(uSectorExtentRel), 5687 pIoCtx, cbToRead); 5688 break; 5689 case VMDKETYPE_ZERO: 5690 size_t cbSet; 5691 5692 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 5693 Assert(cbSet == cbToRead); 5694 5695 rc = VINF_SUCCESS; 5696 break; 5697 } 5698 if (pcbActuallyRead) 5699 *pcbActuallyRead = cbToRead; 5700 5701 out: 5600 case VMDKETYPE_VMFS: 5601 case VMDKETYPE_FLAT: 5602 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage, 5603 VMDK_SECTOR2BYTE(uSectorExtentRel), 5604 pIoCtx, cbToRead); 5605 break; 5606 case VMDKETYPE_ZERO: 5607 { 5608 size_t cbSet; 5609 5610 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead); 5611 Assert(cbSet == cbToRead); 5612 break; 5613 } 5614 } 5615 if (pcbActuallyRead) 5616 *pcbActuallyRead = cbToRead; 5617 } 5618 else if (RT_SUCCESS(rc)) 5619 rc = VERR_VD_VMDK_INVALID_STATE; 5620 5702 5621 LogFlowFunc(("returns %Rrc\n", rc)); 5703 5622 return rc; … … 5712 5631 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead)); 5713 5632 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5714 PVMDKEXTENT pExtent;5715 uint64_t uSectorExtentRel;5716 uint64_t uSectorExtentAbs;5717 5633 int rc; 5718 5634 … … 5720 5636 Assert(uOffset % 512 == 0); 5721 5637 Assert(cbToWrite % 512 == 0); 5722 5723 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 5724 { 5725 rc = VERR_VD_IMAGE_READ_ONLY; 5726 goto out; 5727 } 5728 5729 if (cbToWrite == 0) 5730 { 5731 rc = VERR_INVALID_PARAMETER; 5732 goto out; 5733 } 5734 5735 /* No size check here, will do that later when the extent is located. 5736 * There are sparse images out there which according to the spec are 5737 * invalid, because the total size is not a multiple of the grain size. 5738 * Also for sparse images which are stitched together in odd ways (not at 5739 * grain boundaries, and with the nominal size not being a multiple of the 5740 * grain size), this would prevent writing to the last grain. */ 5741 5742 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 5743 &pExtent, &uSectorExtentRel); 5744 if (RT_FAILURE(rc)) 5745 goto out; 5746 5747 /* Check access permissions as defined in the extent descriptor. */ 5748 if ( pExtent->enmAccess != VMDKACCESS_READWRITE 5749 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5750 && !pImage->pExtents[0].uAppendPosition 5751 && pExtent->enmAccess != VMDKACCESS_READONLY)) 5752 { 5753 rc = VERR_VD_VMDK_INVALID_STATE; 5754 goto out; 5755 } 5756 5757 /* Handle the write according to the current extent type. */ 5758 switch (pExtent->enmType) 5759 { 5760 case VMDKETYPE_HOSTED_SPARSE: 5761 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 5762 if (RT_FAILURE(rc)) 5763 goto out; 5764 /* Clip write range to at most the rest of the grain. */ 5765 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain)); 5766 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED 5767 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain) 5638 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER); 5639 AssertReturn(uOffset + cbToWrite <= pImage->cbSize, VERR_INVALID_PARAMETER); 5640 5641 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 5642 { 5643 PVMDKEXTENT pExtent; 5644 uint64_t uSectorExtentRel; 5645 uint64_t uSectorExtentAbs; 5646 5647 /* No size check here, will do that later when the extent is located. 5648 * There are sparse images out there which according to the spec are 5649 * invalid, because the total size is not a multiple of the grain size. 5650 * Also for sparse images which are stitched together in odd ways (not at 5651 * grain boundaries, and with the nominal size not being a multiple of the 5652 * grain size), this would prevent writing to the last grain. */ 5653 5654 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset), 5655 &pExtent, &uSectorExtentRel); 5656 if (RT_SUCCESS(rc)) 5657 { 5658 if ( pExtent->enmAccess != VMDKACCESS_READWRITE 5659 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5660 && !pImage->pExtents[0].uAppendPosition 5661 && pExtent->enmAccess != VMDKACCESS_READONLY)) 5662 rc = VERR_VD_VMDK_INVALID_STATE; 5663 else 5768 5664 { 5769 rc = VERR_VD_VMDK_INVALID_WRITE; 5770 goto out; 5771 } 5772 if (uSectorExtentAbs == 0) 5773 { 5774 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 5665 /* Handle the write according to the current extent type. */ 5666 switch (pExtent->enmType) 5775 5667 { 5776 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 5777 { 5778 /* Full block write to a previously unallocated block. 5779 * Check if the caller wants to avoid the automatic alloc. */ 5780 if (!(fWrite & VD_WRITE_NO_ALLOC)) 5668 case VMDKETYPE_HOSTED_SPARSE: 5669 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs); 5670 if (RT_SUCCESS(rc)) 5781 5671 { 5782 /* Allocate GT and find out where to store the grain. */ 5783 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx, 5784 uSectorExtentRel, cbToWrite); 5672 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED 5673 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain) 5674 rc = VERR_VD_VMDK_INVALID_WRITE; 5675 else 5676 { 5677 /* Clip write range to at most the rest of the grain. */ 5678 cbToWrite = RT_MIN(cbToWrite, 5679 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain 5680 - uSectorExtentRel % pExtent->cSectorsPerGrain)); 5681 if (uSectorExtentAbs == 0) 5682 { 5683 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 5684 { 5685 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)) 5686 { 5687 /* Full block write to a previously unallocated block. 5688 * Check if the caller wants to avoid the automatic alloc. */ 5689 if (!(fWrite & VD_WRITE_NO_ALLOC)) 5690 { 5691 /* Allocate GT and find out where to store the grain. */ 5692 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx, 5693 uSectorExtentRel, cbToWrite); 5694 } 5695 else 5696 rc = VERR_VD_BLOCK_FREE; 5697 *pcbPreRead = 0; 5698 *pcbPostRead = 0; 5699 } 5700 else 5701 { 5702 /* Clip write range to remain in this extent. */ 5703 cbToWrite = RT_MIN(cbToWrite, 5704 VMDK_SECTOR2BYTE( pExtent->uSectorOffset 5705 + pExtent->cNominalSectors - uSectorExtentRel)); 5706 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain); 5707 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead; 5708 rc = VERR_VD_BLOCK_FREE; 5709 } 5710 } 5711 else 5712 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel, 5713 pIoCtx, cbToWrite); 5714 } 5715 else 5716 { 5717 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5718 { 5719 /* A partial write to a streamOptimized image is simply 5720 * invalid. It requires rewriting already compressed data 5721 * which is somewhere between expensive and impossible. */ 5722 rc = VERR_VD_VMDK_INVALID_STATE; 5723 pExtent->uGrainSectorAbs = 0; 5724 AssertRC(rc); 5725 } 5726 else 5727 { 5728 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)); 5729 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage, 5730 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5731 pIoCtx, cbToWrite, NULL, NULL); 5732 } 5733 } 5734 } 5785 5735 } 5786 else 5787 rc = VERR_VD_BLOCK_FREE; 5788 *pcbPreRead = 0; 5789 *pcbPostRead = 0; 5790 } 5791 else 5792 { 5736 break; 5737 case VMDKETYPE_VMFS: 5738 case VMDKETYPE_FLAT: 5793 5739 /* Clip write range to remain in this extent. */ 5794 5740 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5795 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain); 5796 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead; 5797 rc = VERR_VD_BLOCK_FREE; 5798 } 5799 } 5800 else 5801 { 5802 rc = vmdkStreamAllocGrain(pImage, pExtent, 5803 uSectorExtentRel, 5804 pIoCtx, cbToWrite); 5741 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage, 5742 VMDK_SECTOR2BYTE(uSectorExtentRel), 5743 pIoCtx, cbToWrite, NULL, NULL); 5744 break; 5745 case VMDKETYPE_ZERO: 5746 /* Clip write range to remain in this extent. */ 5747 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5748 break; 5805 5749 } 5806 5750 } 5807 else 5808 { 5809 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5810 { 5811 /* A partial write to a streamOptimized image is simply 5812 * invalid. It requires rewriting already compressed data 5813 * which is somewhere between expensive and impossible. */ 5814 rc = VERR_VD_VMDK_INVALID_STATE; 5815 pExtent->uGrainSectorAbs = 0; 5816 AssertRC(rc); 5817 } 5818 else 5819 { 5820 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)); 5821 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage, 5822 VMDK_SECTOR2BYTE(uSectorExtentAbs), 5823 pIoCtx, cbToWrite, NULL, NULL); 5824 } 5825 } 5826 break; 5827 case VMDKETYPE_VMFS: 5828 case VMDKETYPE_FLAT: 5829 /* Clip write range to remain in this extent. */ 5830 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5831 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage, 5832 VMDK_SECTOR2BYTE(uSectorExtentRel), 5833 pIoCtx, cbToWrite, NULL, NULL); 5834 break; 5835 case VMDKETYPE_ZERO: 5836 /* Clip write range to remain in this extent. */ 5837 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel)); 5838 break; 5839 } 5840 5841 if (pcbWriteProcess) 5842 *pcbWriteProcess = cbToWrite; 5843 5844 out: 5751 5752 if (pcbWriteProcess) 5753 *pcbWriteProcess = cbToWrite; 5754 } 5755 } 5756 else 5757 rc = VERR_VD_IMAGE_READ_ONLY; 5758 5845 5759 LogFlowFunc(("returns %Rrc\n", rc)); 5846 5760 return rc; … … 5861 5775 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5862 5776 5863 AssertPtr(pImage); 5864 5865 if (pImage) 5866 return VMDK_IMAGE_VERSION; 5867 else 5868 return 0; 5777 AssertPtrReturn(pImage, 0); 5778 5779 return VMDK_IMAGE_VERSION; 5869 5780 } 5870 5781 … … 5875 5786 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5876 5787 5877 AssertPtr(pImage); 5878 5879 if (pImage) 5880 return 512; 5881 else 5882 return 0; 5788 AssertPtrReturn(pImage, 0); 5789 5790 return 512; 5883 5791 } 5884 5792 … … 5889 5797 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5890 5798 5891 AssertPtr(pImage); 5892 5893 if (pImage) 5894 return pImage->cbSize; 5895 else 5896 return 0; 5799 AssertPtrReturn(pImage, 0); 5800 5801 return pImage->cbSize; 5897 5802 } 5898 5803 … … 5904 5809 uint64_t cb = 0; 5905 5810 5906 AssertPtr (pImage);5907 5908 if (pImage )5811 AssertPtrReturn(pImage, 0); 5812 5813 if (pImage->pFile != NULL) 5909 5814 { 5910 5815 uint64_t cbFile; 5911 if (pImage->pFile != NULL) 5912 { 5913 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile); 5816 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile); 5817 if (RT_SUCCESS(rc)) 5818 cb += cbFile; 5819 } 5820 for (unsigned i = 0; i < pImage->cExtents; i++) 5821 { 5822 if (pImage->pExtents[i].pFile != NULL) 5823 { 5824 uint64_t cbFile; 5825 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile); 5914 5826 if (RT_SUCCESS(rc)) 5915 5827 cb += cbFile; 5916 5828 } 5917 for (unsigned i = 0; i < pImage->cExtents; i++)5918 {5919 if (pImage->pExtents[i].pFile != NULL)5920 {5921 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);5922 if (RT_SUCCESS(rc))5923 cb += cbFile;5924 }5925 }5926 5829 } 5927 5830 … … 5935 5838 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry)); 5936 5839 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5937 int rc; 5938 5939 AssertPtr(pImage); 5940 5941 if (pImage) 5942 { 5943 if (pImage->PCHSGeometry.cCylinders) 5944 { 5945 *pPCHSGeometry = pImage->PCHSGeometry; 5946 rc = VINF_SUCCESS; 5947 } 5948 else 5949 rc = VERR_VD_GEOMETRY_NOT_SET; 5950 } 5840 int rc = VINF_SUCCESS; 5841 5842 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 5843 5844 if (pImage->PCHSGeometry.cCylinders) 5845 *pPCHSGeometry = pImage->PCHSGeometry; 5951 5846 else 5952 rc = VERR_VD_ NOT_OPENED;5847 rc = VERR_VD_GEOMETRY_NOT_SET; 5953 5848 5954 5849 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); … … 5959 5854 static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry) 5960 5855 { 5961 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 5856 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", 5857 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors)); 5962 5858 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5963 int rc ;5964 5965 AssertPtr (pImage);5966 5967 if ( pImage)5968 { 5969 if ( pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)5970 { 5971 rc = VERR_VD_IMAGE_READ_ONLY;5972 goto out;5973 }5974 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)5975 {5859 int rc = VINF_SUCCESS; 5860 5861 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 5862 5863 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 5864 { 5865 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 5866 { 5867 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry); 5868 if (RT_SUCCESS(rc)) 5869 pImage->PCHSGeometry = *pPCHSGeometry; 5870 } 5871 else 5976 5872 rc = VERR_NOT_SUPPORTED; 5977 goto out;5978 }5979 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);5980 if (RT_FAILURE(rc))5981 goto out;5982 5983 pImage->PCHSGeometry = *pPCHSGeometry;5984 rc = VINF_SUCCESS;5985 5873 } 5986 5874 else 5987 rc = VERR_VD_NOT_OPENED; 5988 5989 out: 5875 rc = VERR_VD_IMAGE_READ_ONLY; 5876 5990 5877 LogFlowFunc(("returns %Rrc\n", rc)); 5991 5878 return rc; … … 5995 5882 static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry) 5996 5883 { 5997 5884 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry)); 5998 5885 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 5999 int rc; 6000 6001 AssertPtr(pImage); 6002 6003 if (pImage) 6004 { 6005 if (pImage->LCHSGeometry.cCylinders) 6006 { 6007 *pLCHSGeometry = pImage->LCHSGeometry; 6008 rc = VINF_SUCCESS; 6009 } 6010 else 6011 rc = VERR_VD_GEOMETRY_NOT_SET; 6012 } 5886 int rc = VINF_SUCCESS; 5887 5888 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 5889 5890 if (pImage->LCHSGeometry.cCylinders) 5891 *pLCHSGeometry = pImage->LCHSGeometry; 6013 5892 else 6014 rc = VERR_VD_ NOT_OPENED;5893 rc = VERR_VD_GEOMETRY_NOT_SET; 6015 5894 6016 5895 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); … … 6021 5900 static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry) 6022 5901 { 6023 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 5902 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", 5903 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors)); 6024 5904 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6025 int rc ;6026 6027 AssertPtr (pImage);6028 6029 if ( pImage)6030 { 6031 if ( pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)6032 { 6033 rc = VERR_VD_IMAGE_READ_ONLY;6034 goto out;6035 }6036 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)6037 {5905 int rc = VINF_SUCCESS; 5906 5907 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 5908 5909 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 5910 { 5911 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 5912 { 5913 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry); 5914 if (RT_SUCCESS(rc)) 5915 pImage->LCHSGeometry = *pLCHSGeometry; 5916 } 5917 else 6038 5918 rc = VERR_NOT_SUPPORTED; 6039 goto out;6040 }6041 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);6042 if (RT_FAILURE(rc))6043 goto out;6044 6045 pImage->LCHSGeometry = *pLCHSGeometry;6046 rc = VINF_SUCCESS;6047 5919 } 6048 5920 else 6049 rc = VERR_VD_NOT_OPENED; 6050 6051 out: 5921 rc = VERR_VD_IMAGE_READ_ONLY; 5922 6052 5923 LogFlowFunc(("returns %Rrc\n", rc)); 6053 5924 return rc; … … 6059 5930 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 6060 5931 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6061 unsigned uImageFlags; 6062 6063 AssertPtr(pImage); 6064 6065 if (pImage) 6066 uImageFlags = pImage->uImageFlags; 6067 else 6068 uImageFlags = 0; 6069 6070 LogFlowFunc(("returns %#x\n", uImageFlags)); 6071 return uImageFlags; 5932 5933 AssertPtrReturn(pImage, 0); 5934 5935 LogFlowFunc(("returns %#x\n", pImage->uImageFlags)); 5936 return pImage->uImageFlags; 6072 5937 } 6073 5938 … … 6077 5942 LogFlowFunc(("pBackendData=%#p\n", pBackendData)); 6078 5943 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6079 unsigned uOpenFlags; 6080 6081 AssertPtr(pImage); 6082 6083 if (pImage) 6084 uOpenFlags = pImage->uOpenFlags; 6085 else 6086 uOpenFlags = 0; 6087 6088 LogFlowFunc(("returns %#x\n", uOpenFlags)); 6089 return uOpenFlags; 5944 5945 AssertPtrReturn(pImage, 0); 5946 5947 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags)); 5948 return pImage->uOpenFlags; 6090 5949 } 6091 5950 … … 6101 5960 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE 6102 5961 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))) 6103 {6104 5962 rc = VERR_INVALID_PARAMETER; 6105 goto out; 6106 } 6107 6108 /* StreamOptimized images need special treatment: reopen is prohibited. */ 6109 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6110 { 6111 if (pImage->uOpenFlags == uOpenFlags) 6112 rc = VINF_SUCCESS; 5963 else 5964 { 5965 /* StreamOptimized images need special treatment: reopen is prohibited. */ 5966 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 5967 { 5968 if (pImage->uOpenFlags == uOpenFlags) 5969 rc = VINF_SUCCESS; 5970 else 5971 rc = VERR_INVALID_PARAMETER; 5972 } 6113 5973 else 6114 rc = VERR_INVALID_PARAMETER; 6115 } 6116 else 6117 { 6118 /* Implement this operation via reopening the image. */ 6119 vmdkFreeImage(pImage, false); 6120 rc = vmdkOpenImage(pImage, uOpenFlags); 6121 } 6122 6123 out: 5974 { 5975 /* Implement this operation via reopening the image. */ 5976 vmdkFreeImage(pImage, false); 5977 rc = vmdkOpenImage(pImage, uOpenFlags); 5978 } 5979 } 5980 6124 5981 LogFlowFunc(("returns %Rrc\n", rc)); 6125 5982 return rc; … … 6127 5984 6128 5985 /** @copydoc VDIMAGEBACKEND::pfnGetComment */ 6129 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, 6130 size_t cbComment) 5986 static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment) 6131 5987 { 6132 5988 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment)); 6133 5989 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6134 int rc; 6135 6136 AssertPtr(pImage); 6137 6138 i f (pImage)6139 {6140 char *pszCommentEncoded = NULL;6141 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,6142 "ddb.comment", &pszCommentEncoded);6143 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)6144 pszCommentEncoded = NULL;6145 else if (RT_FAILURE(rc)) 6146 goto out;6147 5990 5991 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 5992 5993 char *pszCommentEncoded = NULL; 5994 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor, 5995 "ddb.comment", &pszCommentEncoded); 5996 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND) 5997 { 5998 pszCommentEncoded = NULL; 5999 rc = VINF_SUCCESS; 6000 } 6001 6002 if (RT_SUCCESS(rc)) 6003 { 6148 6004 if (pszComment && pszCommentEncoded) 6149 6005 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment); 6150 else 6151 { 6152 if (pszComment) 6006 else if (pszComment) 6153 6007 *pszComment = '\0'; 6154 rc = VINF_SUCCESS; 6155 } 6156 RTMemTmpFree(pszCommentEncoded); 6157 } 6158 else 6159 rc = VERR_VD_NOT_OPENED; 6160 6161 out: 6008 6009 if (pszCommentEncoded) 6010 RTMemTmpFree(pszCommentEncoded); 6011 } 6012 6162 6013 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment)); 6163 6014 return rc; … … 6171 6022 int rc; 6172 6023 6173 AssertPtr(pImage); 6174 6175 if (pImage) 6176 { 6177 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY) 6178 { 6179 rc = VERR_VD_IMAGE_READ_ONLY; 6180 goto out; 6181 } 6182 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) 6183 { 6024 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6025 6026 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6027 { 6028 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6029 rc = vmdkSetImageComment(pImage, pszComment); 6030 else 6184 6031 rc = VERR_NOT_SUPPORTED; 6185 goto out;6186 }6187 6188 rc = vmdkSetImageComment(pImage, pszComment);6189 6032 } 6190 6033 else 6191 rc = VERR_VD_NOT_OPENED; 6192 6193 out: 6034 rc = VERR_VD_IMAGE_READ_ONLY; 6035 6194 6036 LogFlowFunc(("returns %Rrc\n", rc)); 6195 6037 return rc; … … 6201 6043 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 6202 6044 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6203 int rc; 6204 6205 AssertPtr(pImage); 6206 6207 if (pImage) 6208 { 6209 *pUuid = pImage->ImageUuid; 6210 rc = VINF_SUCCESS; 6211 } 6212 else 6213 rc = VERR_VD_NOT_OPENED; 6214 6215 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid)); 6216 return rc; 6045 6046 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6047 6048 *pUuid = pImage->ImageUuid; 6049 6050 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 6051 return VINF_SUCCESS; 6217 6052 } 6218 6053 … … 6222 6057 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid)); 6223 6058 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6224 int rc; 6225 6226 LogFlowFunc(("%RTuuid\n", pUuid)); 6227 AssertPtr(pImage); 6228 6229 if (pImage) 6230 { 6231 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6232 { 6233 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6234 { 6235 pImage->ImageUuid = *pUuid; 6236 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6237 VMDK_DDB_IMAGE_UUID, pUuid); 6238 if (RT_FAILURE(rc)) 6239 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename); 6240 rc = VINF_SUCCESS; 6241 } 6242 else 6243 rc = VERR_NOT_SUPPORTED; 6059 int rc = VINF_SUCCESS; 6060 6061 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6062 6063 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6064 { 6065 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6066 { 6067 pImage->ImageUuid = *pUuid; 6068 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6069 VMDK_DDB_IMAGE_UUID, pUuid); 6070 if (RT_FAILURE(rc)) 6071 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 6072 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename); 6244 6073 } 6245 6074 else 6246 rc = VERR_ VD_IMAGE_READ_ONLY;6075 rc = VERR_NOT_SUPPORTED; 6247 6076 } 6248 6077 else 6249 rc = VERR_VD_ NOT_OPENED;6078 rc = VERR_VD_IMAGE_READ_ONLY; 6250 6079 6251 6080 LogFlowFunc(("returns %Rrc\n", rc)); … … 6258 6087 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 6259 6088 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6260 int rc; 6261 6262 AssertPtr(pImage); 6263 6264 if (pImage) 6265 { 6266 *pUuid = pImage->ModificationUuid; 6267 rc = VINF_SUCCESS; 6268 } 6269 else 6270 rc = VERR_VD_NOT_OPENED; 6271 6272 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid)); 6273 return rc; 6089 6090 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6091 6092 *pUuid = pImage->ModificationUuid; 6093 6094 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 6095 return VINF_SUCCESS; 6274 6096 } 6275 6097 … … 6279 6101 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid)); 6280 6102 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6281 int rc; 6282 6283 AssertPtr(pImage); 6284 6285 if (pImage) 6286 { 6287 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6288 { 6289 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6103 int rc = VINF_SUCCESS; 6104 6105 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6106 6107 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6108 { 6109 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6110 { 6111 /* Only touch the modification uuid if it changed. */ 6112 if (RTUuidCompare(&pImage->ModificationUuid, pUuid)) 6290 6113 { 6291 /* Only touch the modification uuid if it changed. */ 6292 if (RTUuidCompare(&pImage->ModificationUuid, pUuid)) 6293 { 6294 pImage->ModificationUuid = *pUuid; 6295 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6296 VMDK_DDB_MODIFICATION_UUID, pUuid); 6297 if (RT_FAILURE(rc)) 6298 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename); 6299 } 6300 rc = VINF_SUCCESS; 6114 pImage->ModificationUuid = *pUuid; 6115 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6116 VMDK_DDB_MODIFICATION_UUID, pUuid); 6117 if (RT_FAILURE(rc)) 6118 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename); 6301 6119 } 6302 else6303 rc = VERR_NOT_SUPPORTED;6304 6120 } 6305 6121 else 6306 rc = VERR_ VD_IMAGE_READ_ONLY;6122 rc = VERR_NOT_SUPPORTED; 6307 6123 } 6308 6124 else 6309 rc = VERR_VD_ NOT_OPENED;6125 rc = VERR_VD_IMAGE_READ_ONLY; 6310 6126 6311 6127 LogFlowFunc(("returns %Rrc\n", rc)); … … 6318 6134 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 6319 6135 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6320 int rc; 6321 6322 AssertPtr(pImage); 6323 6324 if (pImage) 6325 { 6326 *pUuid = pImage->ParentUuid; 6327 rc = VINF_SUCCESS; 6328 } 6329 else 6330 rc = VERR_VD_NOT_OPENED; 6331 6332 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid)); 6333 return rc; 6136 6137 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6138 6139 *pUuid = pImage->ParentUuid; 6140 6141 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 6142 return VINF_SUCCESS; 6334 6143 } 6335 6144 … … 6339 6148 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid)); 6340 6149 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6341 int rc; 6342 6343 AssertPtr(pImage); 6344 6345 if (pImage) 6346 { 6347 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6348 { 6349 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6350 { 6351 pImage->ParentUuid = *pUuid; 6352 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6353 VMDK_DDB_PARENT_UUID, pUuid); 6354 if (RT_FAILURE(rc)) 6355 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename); 6356 rc = VINF_SUCCESS; 6357 } 6358 else 6359 rc = VERR_NOT_SUPPORTED; 6150 int rc = VINF_SUCCESS; 6151 6152 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6153 6154 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6155 { 6156 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6157 { 6158 pImage->ParentUuid = *pUuid; 6159 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6160 VMDK_DDB_PARENT_UUID, pUuid); 6161 if (RT_FAILURE(rc)) 6162 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, 6163 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename); 6360 6164 } 6361 6165 else 6362 rc = VERR_ VD_IMAGE_READ_ONLY;6166 rc = VERR_NOT_SUPPORTED; 6363 6167 } 6364 6168 else 6365 rc = VERR_VD_ NOT_OPENED;6169 rc = VERR_VD_IMAGE_READ_ONLY; 6366 6170 6367 6171 LogFlowFunc(("returns %Rrc\n", rc)); … … 6374 6178 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid)); 6375 6179 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6376 int rc; 6377 6378 AssertPtr(pImage); 6379 6380 if (pImage) 6381 { 6382 *pUuid = pImage->ParentModificationUuid; 6383 rc = VINF_SUCCESS; 6384 } 6385 else 6386 rc = VERR_VD_NOT_OPENED; 6387 6388 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid)); 6389 return rc; 6180 6181 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6182 6183 *pUuid = pImage->ParentModificationUuid; 6184 6185 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid)); 6186 return VINF_SUCCESS; 6390 6187 } 6391 6188 … … 6395 6192 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid)); 6396 6193 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6397 int rc; 6398 6399 AssertPtr(pImage); 6400 6401 if (pImage) 6402 { 6403 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6404 { 6405 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6406 { 6407 pImage->ParentModificationUuid = *pUuid; 6408 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6409 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid); 6410 if (RT_FAILURE(rc)) 6411 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename); 6412 rc = VINF_SUCCESS; 6413 } 6414 else 6415 rc = VERR_NOT_SUPPORTED; 6194 int rc = VINF_SUCCESS; 6195 6196 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED); 6197 6198 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)) 6199 { 6200 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)) 6201 { 6202 pImage->ParentModificationUuid = *pUuid; 6203 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, 6204 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid); 6205 if (RT_FAILURE(rc)) 6206 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename); 6416 6207 } 6417 6208 else 6418 rc = VERR_ VD_IMAGE_READ_ONLY;6209 rc = VERR_NOT_SUPPORTED; 6419 6210 } 6420 6211 else 6421 rc = VERR_VD_ NOT_OPENED;6212 rc = VERR_VD_IMAGE_READ_ONLY; 6422 6213 6423 6214 LogFlowFunc(("returns %Rrc\n", rc)); … … 6430 6221 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData; 6431 6222 6432 AssertPtr(pImage); 6433 if (pImage) 6434 { 6435 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", 6436 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors, 6437 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors, 6438 VMDK_BYTE2SECTOR(pImage->cbSize)); 6439 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid); 6440 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid); 6441 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid); 6442 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid); 6443 } 6223 AssertPtrReturnVoid(pImage); 6224 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n", 6225 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors, 6226 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors, 6227 VMDK_BYTE2SECTOR(pImage->cbSize)); 6228 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid); 6229 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid); 6230 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid); 6231 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid); 6444 6232 } 6445 6233
Note:
See TracChangeset
for help on using the changeset viewer.